From 1ce5ad9572695eb07f2d565751078af068219514 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:24:43 +0200 Subject: [PATCH 1/4] translation script improvements --- .../_04_sql_translation_reference.md | 0 .../current/_clients/go/README.md | 7 - .../current/_clients/go/README.md.hash | 1 - .../api/_invitations-api-reference.md | 10 - .../api/_invitations-api-reference.md.hash | 1 - .../_placeholders/api/_keys-api-reference.md | 10 - .../api/_keys-api-reference.md.hash | 1 - .../api/_members-api-reference.md | 10 - .../api/_members-api-reference.md.hash | 1 - .../api/_organizations-api-reference.md | 10 - .../api/_organizations-api-reference.md.hash | 1 - .../api/_services-api-reference.md | 10 - .../api/_services-api-reference.md.hash | 1 - .../current/_placeholders/changelog/_index.md | 12 - .../_placeholders/changelog/_index.md.hash | 1 - .../_GCS_authentication_and_bucket.md | 56 - .../_GCS_authentication_and_bucket.md.hash | 1 - .../_S3_authentication_and_bucket.md | 156 - .../_S3_authentication_and_bucket.md.hash | 1 - .../_add_remote_ip_access_list_detail.md | 20 - .../_add_remote_ip_access_list_detail.md.hash | 1 - .../current/_snippets/_add_superset_detail.md | 52 - .../_snippets/_add_superset_detail.md.hash | 1 - .../current/_snippets/_aws_regions.md | 17 - .../current/_snippets/_aws_regions.md.hash | 1 - .../_clickhouse_mysql_cloud_setup.mdx | 91 - .../_clickhouse_mysql_cloud_setup.mdx.hash | 1 - .../_clickhouse_mysql_on_premise_setup.mdx | 94 - ...clickhouse_mysql_on_premise_setup.mdx.hash | 1 - .../current/_snippets/_config-files.md | 13 - .../current/_snippets/_config-files.md.hash | 1 - .../_snippets/_gather_your_details_http.mdx | 45 - .../_gather_your_details_http.mdx.hash | 1 - .../_snippets/_gather_your_details_native.md | 25 - .../_gather_your_details_native.md.hash | 1 - .../current/_snippets/_gcp_regions.md | 12 - .../current/_snippets/_gcp_regions.md.hash | 1 - .../current/_snippets/_keeper-config-files.md | 11 - .../_snippets/_keeper-config-files.md.hash | 1 - .../current/_snippets/_launch_sql_console.md | 24 - .../_snippets/_launch_sql_console.md.hash | 1 - .../_replication-sharding-terminology.md | 15 - .../_replication-sharding-terminology.md.hash | 1 - .../_snippets/_self_managed_only_automated.md | 11 - .../_self_managed_only_automated.md.hash | 1 - .../_self_managed_only_no_roadmap.md | 12 - .../_self_managed_only_no_roadmap.md.hash | 1 - .../_self_managed_only_not_applicable.md | 11 - .../_self_managed_only_not_applicable.md.hash | 1 - .../_snippets/_self_managed_only_roadmap.md | 12 - .../_self_managed_only_roadmap.md.hash | 1 - .../_snippets/_service_actions_menu.md | 10 - .../_snippets/_service_actions_menu.md.hash | 1 - .../current/_snippets/_system_table_cloud.md | 9 - .../_snippets/_system_table_cloud.md.hash | 1 - .../current/_snippets/_tabs.md | 22 - .../current/_snippets/_tabs.md.hash | 1 - .../_snippets/_users-and-roles-common.md | 453 - .../_snippets/_users-and-roles-common.md.hash | 1 - .../current/about-us/_category_.yml | 6 - .../current/about-us/about-faq-index.md | 21 - .../current/about-us/about-faq-index.md.hash | 1 - .../current/about-us/adopters.md | 561 - .../beta-and-experimental-features.md | 47 - .../beta-and-experimental-features.md.hash | 1 - .../current/about-us/cloud.md | 34 - .../current/about-us/cloud.md.hash | 1 - .../current/about-us/distinctive-features.md | 100 - .../about-us/distinctive-features.md.hash | 1 - .../current/about-us/history.md | 59 - .../current/about-us/history.md.hash | 1 - .../current/about-us/index.md | 21 - .../current/about-us/index.md.hash | 1 - .../current/about-us/intro.mdx | 10 - .../current/about-us/intro.mdx.hash | 1 - .../current/about-us/support.md | 25 - .../current/about-us/support.md.hash | 1 - .../architecture/cluster-deployment.md | 155 - .../architecture/cluster-deployment.md.hash | 1 - .../_snippets/_async_inserts.md | 67 - .../_snippets/_async_inserts.md.hash | 1 - .../_snippets/_avoid_mutations.md | 19 - .../_snippets/_avoid_mutations.md.hash | 1 - .../_snippets/_avoid_nullable_columns.md | 34 - .../_snippets/_avoid_nullable_columns.md.hash | 1 - .../_snippets/_avoid_optimize_final.md | 47 - .../_snippets/_avoid_optimize_final.md.hash | 1 - .../best-practices/_snippets/_bulk_inserts.md | 29 - .../_snippets/_bulk_inserts.md.hash | 1 - .../current/best-practices/avoid_mutations.md | 11 - .../best-practices/avoid_mutations.md.hash | 1 - .../best-practices/avoid_optimize_final.md | 11 - .../avoid_optimize_final.md.hash | 1 - .../best-practices/choosing_a_primary_key.md | 174 - .../choosing_a_primary_key.md.hash | 1 - .../current/best-practices/index.md | 38 - .../current/best-practices/index.md.hash | 1 - .../current/best-practices/json_type.md | 316 - .../current/best-practices/json_type.md.hash | 1 - .../best-practices/minimize_optimize_joins.md | 66 - .../minimize_optimize_joins.md.hash | 1 - .../best-practices/partitioning_keys.mdx | 66 - .../best-practices/partitioning_keys.mdx.hash | 1 - .../best-practices/select_data_type.md | 138 - .../best-practices/select_data_type.md.hash | 1 - .../selecting_an_insert_strategy.md | 145 - .../selecting_an_insert_strategy.md.hash | 1 - .../sizing-and-hardware-recommendations.md | 237 - ...izing-and-hardware-recommendations.md.hash | 1 - .../best-practices/use_materialized_views.md | 79 - .../use_materialized_views.md.hash | 1 - .../using_data_skipping_indices.md | 250 - .../using_data_skipping_indices.md.hash | 1 - .../current/chdb/getting-started.md | 389 - .../current/chdb/getting-started.md.hash | 1 - .../current/chdb/guides/clickhouse-local.md | 136 - .../chdb/guides/clickhouse-local.md.hash | 1 - .../current/chdb/guides/index.md | 27 - .../current/chdb/guides/index.md.hash | 1 - .../current/chdb/guides/jupysql.md | 412 - .../current/chdb/guides/jupysql.md.hash | 1 - .../chdb/guides/query-remote-clickhouse.md | 192 - .../guides/query-remote-clickhouse.md.hash | 1 - .../chdb/guides/querying-apache-arrow.md | 177 - .../chdb/guides/querying-apache-arrow.md.hash | 1 - .../current/chdb/guides/querying-pandas.md | 403 - .../chdb/guides/querying-pandas.md.hash | 1 - .../current/chdb/guides/querying-parquet.md | 185 - .../chdb/guides/querying-parquet.md.hash | 1 - .../current/chdb/guides/querying-s3-bucket.md | 202 - .../chdb/guides/querying-s3-bucket.md.hash | 1 - .../current/chdb/index.md | 69 - .../current/chdb/index.md.hash | 1 - .../current/chdb/install/bun.md | 60 - .../current/chdb/install/bun.md.hash | 1 - .../current/chdb/install/c.md | 51 - .../current/chdb/install/c.md.hash | 1 - .../current/chdb/install/go.md | 38 - .../current/chdb/install/go.md.hash | 1 - .../current/chdb/install/index.md | 26 - .../current/chdb/install/index.md.hash | 1 - .../current/chdb/install/nodejs.md | 74 - .../current/chdb/install/nodejs.md.hash | 1 - .../current/chdb/install/python.md | 269 - .../current/chdb/install/python.md.hash | 1 - .../current/chdb/install/rust.md | 30 - .../current/chdb/install/rust.md.hash | 1 - .../current/chdb/reference/data-formats.md | 104 - .../chdb/reference/data-formats.md.hash | 1 - .../current/chdb/reference/index.md | 15 - .../current/chdb/reference/index.md.hash | 1 - .../current/chdb/reference/sql-reference.md | 26 - .../chdb/reference/sql-reference.md.hash | 1 - .../current/cloud-index.md.hash | 1 - .../current/cloud/_category_.yml | 7 - .../cloud/bestpractices/_category_.yml | 7 - .../cloud/bestpractices/asyncinserts.md.hash | 1 - .../bestpractices/avoidmutations.md.hash | 1 - .../avoidnullablecolumns.md.hash | 1 - .../bestpractices/avoidoptimizefinal.md.hash | 1 - .../cloud/bestpractices/bulkinserts.md.hash | 1 - .../current/cloud/bestpractices/index.md | 44 - .../current/cloud/bestpractices/index.md.hash | 1 - .../cloud/bestpractices/multitenancy.md | 379 - .../cloud/bestpractices/multitenancy.md.hash | 1 - .../bestpractices/partitioningkey.md.hash | 1 - .../cloud/bestpractices/usagelimits.md | 33 - .../cloud/bestpractices/usagelimits.md.hash | 1 - .../cloud/changelogs/changelog-24-10.md | 54 - .../cloud/changelogs/changelog-24-12.md | 226 - .../cloud/changelogs/changelog-24-5.md | 182 - .../cloud/changelogs/changelog-24-6.md | 140 - .../cloud/changelogs/changelog-24-8.md | 66 - .../cloud/changelogs/changelog-25_1-25_4.md | 646 -- .../cloud/changelogs/fast-release-24-2.md | 241 - .../get-started/cloud-quick-start.md.hash | 1 - .../cloud/get-started/cloud-quick-start.mdx | 328 - .../get-started/cloud-quick-start.mdx.hash | 1 - .../current/cloud/get-started/index.md | 26 - .../current/cloud/get-started/index.md.hash | 1 - .../cloud/get-started/query-endpoints.md | 511 - .../cloud/get-started/query-endpoints.md.hash | 1 - .../cloud/get-started/query-insights.md | 57 - .../cloud/get-started/query-insights.md.hash | 1 - .../current/cloud/get-started/sql-console.md | 312 - .../cloud/get-started/sql-console.md.hash | 1 - .../current/cloud/manage/_category_.yml | 6 - .../_snippets/_network_transfer_rates.md | 213 - .../_snippets/_network_transfer_rates.md.hash | 1 - .../current/cloud/manage/account-close.md | 53 - .../cloud/manage/account-close.md.hash | 1 - .../current/cloud/manage/api/api-overview.md | 38 - .../cloud/manage/api/api-overview.md.hash | 1 - .../manage/api/api-reference-index.md.hash | 1 - .../current/cloud/manage/api/index.md | 15 - .../current/cloud/manage/api/index.md.hash | 1 - .../api/invitations-api-reference.md.hash | 1 - .../manage/api/keys-api-reference.md.hash | 1 - .../manage/api/members-api-reference.md.hash | 1 - .../api/organizations-api-reference.md.hash | 1 - ...rivateEndpointConfig-api-reference.md.hash | 1 - .../api/prometheus-api-reference.md.hash | 1 - .../manage/api/services-api-reference.md.hash | 1 - .../api/usageCost-api-reference.md.hash | 1 - .../manage/backups/configurable-backups.md | 44 - .../backups/configurable-backups.md.hash | 1 - .../export-backups-to-own-cloud-account.md | 155 - ...xport-backups-to-own-cloud-account.md.hash | 1 - .../current/cloud/manage/backups/index.md | 17 - .../cloud/manage/backups/index.md.hash | 1 - .../current/cloud/manage/backups/overview.md | 181 - .../cloud/manage/backups/overview.md.hash | 1 - .../current/cloud/manage/billing.md | 412 - .../current/cloud/manage/billing.md.hash | 1 - .../current/cloud/manage/billing/index.md | 21 - .../cloud/manage/billing/index.md.hash | 1 - .../marketplace/aws-marketplace-committed.md | 99 - .../aws-marketplace-committed.md.hash | 1 - .../marketplace/aws-marketplace-payg.md | 140 - .../marketplace/aws-marketplace-payg.md.hash | 1 - .../azure-marketplace-committed.md | 143 - .../azure-marketplace-committed.md.hash | 1 - .../marketplace/azure-marketplace-payg.md | 151 - .../azure-marketplace-payg.md.hash | 1 - .../marketplace/gcp-marketplace-committed.md | 146 - .../gcp-marketplace-committed.md.hash | 1 - .../marketplace/gcp-marketplace-payg.md | 120 - .../marketplace/gcp-marketplace-payg.md.hash | 1 - .../cloud/manage/billing/marketplace/index.md | 21 - .../manage/billing/marketplace/index.md.hash | 1 - .../manage/billing/marketplace/overview.md | 98 - .../billing/marketplace/overview.md.hash | 1 - .../manage/billing/payment-thresholds.md | 26 - .../manage/billing/payment-thresholds.md.hash | 1 - .../current/cloud/manage/cloud-tiers.md | 210 - .../current/cloud/manage/cloud-tiers.md.hash | 1 - .../current/cloud/manage/dashboards.md | 106 - .../current/cloud/manage/dashboards.md.hash | 1 - .../current/cloud/manage/index.md | 37 - .../current/cloud/manage/index.md.hash | 1 - .../current/cloud/manage/integrations.md | 34 - .../current/cloud/manage/integrations.md.hash | 1 - .../cloud/manage/jan2025_faq/backup.md | 24 - .../cloud/manage/jan2025_faq/backup.md.hash | 1 - .../cloud/manage/jan2025_faq/billing.md | 51 - .../cloud/manage/jan2025_faq/billing.md.hash | 1 - .../cloud/manage/jan2025_faq/dimensions.md | 107 - .../manage/jan2025_faq/dimensions.md.hash | 1 - .../current/cloud/manage/jan2025_faq/index.md | 25 - .../cloud/manage/jan2025_faq/index.md.hash | 1 - .../cloud/manage/jan2025_faq/new_tiers.md | 70 - .../manage/jan2025_faq/new_tiers.md.hash | 1 - .../manage/jan2025_faq/plan_migrations.md | 113 - .../jan2025_faq/plan_migrations.md.hash | 1 - .../cloud/manage/jan2025_faq/scaling.md | 41 - .../cloud/manage/jan2025_faq/scaling.md.hash | 1 - .../cloud/manage/jan2025_faq/summary.md | 99 - .../cloud/manage/jan2025_faq/summary.md.hash | 1 - .../cloud/manage/network-data-transfer.mdx | 37 - .../manage/network-data-transfer.mdx.hash | 1 - .../current/cloud/manage/notifications.md | 48 - .../cloud/manage/notifications.md.hash | 1 - .../current/cloud/manage/openapi.md | 65 - .../current/cloud/manage/openapi.md.hash | 1 - .../current/cloud/manage/postman.md | 117 - .../current/cloud/manage/postman.md.hash | 1 - .../cloud/manage/replica-aware-routing.md | 46 - .../manage/replica-aware-routing.md.hash | 1 - .../current/cloud/manage/scaling.md | 147 - .../current/cloud/manage/scaling.md.hash | 1 - .../current/cloud/manage/service-uptime.md | 16 - .../cloud/manage/service-uptime.md.hash | 1 - .../current/cloud/manage/settings.md | 20 - .../current/cloud/manage/settings.md.hash | 1 - .../manage/troubleshooting-billing-issues.md | 24 - .../troubleshooting-billing-issues.md.hash | 1 - .../current/cloud/manage/upgrades.md | 101 - .../current/cloud/manage/upgrades.md.hash | 1 - .../current/cloud/reference/_category_.yml | 7 - .../current/cloud/reference/architecture.md | 54 - .../cloud/reference/architecture.md.hash | 1 - .../current/cloud/reference/byoc.md | 429 - .../current/cloud/reference/byoc.md.hash | 1 - .../current/cloud/reference/changelog.md | 1182 -- .../current/cloud/reference/changelog.md.hash | 1 - .../cloud/reference/changelogs-index.md | 12 - .../cloud/reference/changelogs-index.md.hash | 1 - .../cloud/reference/cloud-compatibility.md | 127 - .../reference/cloud-compatibility.md.hash | 1 - .../compute-compute-separation.md.hash | 1 - .../current/cloud/reference/index.md | 33 - .../current/cloud/reference/index.md.hash | 1 - .../cloud/reference/release-notes-index.md | 20 - .../reference/release-notes-index.md.hash | 1 - .../cloud/reference/shared-merge-tree.md | 126 - .../cloud/reference/shared-merge-tree.md.hash | 1 - .../cloud/reference/supported-regions.md | 107 - .../cloud/reference/supported-regions.md.hash | 1 - .../current/cloud/reference/warehouses.md | 189 - .../cloud/reference/warehouses.md.hash | 1 - .../current/cloud/security/_category_.yml | 6 - .../security/accessing-s3-data-securely.md | 146 - .../accessing-s3-data-securely.md.hash | 1 - .../current/cloud/security/audit-logging.md | 69 - .../cloud/security/audit-logging.md.hash | 1 - .../current/cloud/security/aws-privatelink.md | 354 - .../cloud/security/aws-privatelink.md.hash | 1 - .../cloud/security/azure-privatelink.md | 548 - .../cloud/security/azure-privatelink.md.hash | 1 - .../cloud-access-management.md | 128 - .../cloud-access-management.md.hash | 1 - .../cloud-authentication.md | 130 - .../cloud-authentication.md.hash | 1 - .../security/cloud-access-management/index.md | 15 - .../cloud-access-management/index.md.hash | 1 - .../cloud/security/cloud-endpoints-api.md | 47 - .../security/cloud-endpoints-api.md.hash | 1 - .../current/cloud/security/cmek.md | 111 - .../current/cloud/security/cmek.md.hash | 1 - .../common-access-management-queries.md | 70 - .../common-access-management-queries.md.hash | 1 - .../cloud/security/compliance-overview.md | 68 - .../security/compliance-overview.md.hash | 1 - .../cloud/security/connectivity-overview.md | 19 - .../security/connectivity-overview.md.hash | 1 - .../security/gcp-private-service-connect.md | 439 - .../gcp-private-service-connect.md.hash | 1 - .../current/cloud/security/index.md | 25 - .../current/cloud/security/index.md.hash | 1 - .../cloud/security/inviting-new-users.md | 30 - .../cloud/security/inviting-new-users.md.hash | 1 - .../cloud/security/personal-data-access.md | 63 - .../security/personal-data-access.md.hash | 1 - .../security/privacy-compliance-overview.md | 18 - .../privacy-compliance-overview.md.hash | 1 - .../cloud/security/private-link-overview.md | 17 - .../security/private-link-overview.md.hash | 1 - .../current/cloud/security/saml-sso-setup.md | 364 - .../cloud/security/saml-sso-setup.md.hash | 1 - .../cloud/security/setting-ip-filters.md | 85 - .../cloud/security/setting-ip-filters.md.hash | 1 - .../security/shared-responsibility-model.md | 110 - .../shared-responsibility-model.md.hash | 1 - .../current/cloud/support.md | 11 - .../current/cloud/support.md.hash | 1 - .../current/concepts/glossary.md | 39 - .../current/concepts/glossary.md.hash | 1 - .../current/concepts/index.md | 19 - .../current/concepts/index.md.hash | 1 - .../current/concepts/olap.md | 41 - .../current/concepts/olap.md.hash | 1 - .../concepts/why-clickhouse-is-so-fast.md | 147 - .../why-clickhouse-is-so-fast.md.hash | 1 - .../compression-in-clickhouse.md | 445 - .../compression-in-clickhouse.md.hash | 1 - .../data-compression/compression-modes.md | 58 - .../compression-modes.md.hash | 1 - .../current/data-modeling/backfilling.md | 621 - .../current/data-modeling/backfilling.md.hash | 1 - .../current/data-modeling/denormalization.md | 377 - .../data-modeling/denormalization.md.hash | 1 - .../current/data-modeling/index.md | 28 - .../current/data-modeling/index.md.hash | 1 - .../current/data-modeling/projections.md | 445 - .../current/data-modeling/projections.md.hash | 1 - .../current/data-modeling/schema-design.md | 346 - .../data-modeling/schema-design.md.hash | 1 - .../deployment-guides/horizontal-scaling.md | 459 - .../horizontal-scaling.md.hash | 1 - .../current/deployment-guides/index.md | 19 - .../current/deployment-guides/index.md.hash | 1 - .../deployment-guides/parallel-replicas.mdx | 402 - .../parallel-replicas.mdx.hash | 1 - .../current/deployment-guides/replicated.md | 544 - .../deployment-guides/replicated.md.hash | 1 - .../current/deployment-guides/terminology.md | 41 - .../deployment-guides/terminology.md.hash | 1 - .../current/deployment-modes.md | 74 - .../current/deployment-modes.md.hash | 1 - .../current/development/_category_.yml | 8 - .../development/adding_test_queries.md.hash | 1 - .../current/development/architecture.md | 265 - .../current/development/architecture.md.hash | 1 - .../current/development/build-cross-arm.md | 16 - .../development/build-cross-arm.md.hash | 1 - .../development/build-cross-loongarch.md | 27 - .../development/build-cross-loongarch.md.hash | 1 - .../current/development/build-cross-osx.md | 64 - .../development/build-cross-osx.md.hash | 1 - .../current/development/build-cross-riscv.md | 27 - .../development/build-cross-riscv.md.hash | 1 - .../current/development/build-cross-s390x.md | 213 - .../development/build-cross-s390x.md.hash | 1 - .../current/development/build-osx.md | 106 - .../current/development/build-osx.md.hash | 1 - .../current/development/build.md | 221 - .../current/development/build.md.hash | 1 - .../building_and_benchmarking_deflate_qpl.md | 334 - ...lding_and_benchmarking_deflate_qpl.md.hash | 1 - .../development/continuous-integration.md | 180 - .../continuous-integration.md.hash | 1 - .../current/development/contrib.md | 53 - .../current/development/contrib.md.hash | 1 - .../development/developer-instruction.md | 223 - .../development/developer-instruction.md.hash | 1 - .../development/images/concurrency.png | Bin 34535 -> 0 bytes .../images/find-build-artifact.png | Bin 125175 -> 0 bytes .../current/development/index.md | 27 - .../current/development/index.md.hash | 1 - .../development/integrating_rust_libraries.md | 87 - .../integrating_rust_libraries.md.hash | 1 - .../current/development/style.md | 809 -- .../current/development/style.md.hash | 1 - .../current/development/tests.md | 504 - .../current/development/tests.md.hash | 1 - .../current/dictionary/index.md | 336 - .../current/dictionary/index.md.hash | 1 - .../current/engines/_category_.yml | 7 - .../engines/database-engines/atomic.md | 79 - .../engines/database-engines/atomic.md.hash | 1 - .../engines/database-engines/backup.md | 114 - .../engines/database-engines/backup.md.hash | 1 - .../current/engines/database-engines/index.md | 33 - .../engines/database-engines/index.md.hash | 1 - .../current/engines/database-engines/lazy.md | 24 - .../engines/database-engines/lazy.md.hash | 1 - .../materialized-postgresql.md | 297 - .../materialized-postgresql.md.hash | 1 - .../current/engines/database-engines/mysql.md | 158 - .../engines/database-engines/mysql.md.hash | 1 - .../engines/database-engines/postgresql.md | 148 - .../database-engines/postgresql.md.hash | 1 - .../engines/database-engines/replicated.md | 129 - .../database-engines/replicated.md.hash | 1 - .../engines/database-engines/sqlite.md | 88 - .../engines/database-engines/sqlite.md.hash | 1 - .../current/engines/index.md | 12 - .../current/engines/index.md.hash | 1 - .../current/engines/table-engines/index.md | 107 - .../engines/table-engines/index.md.hash | 1 - .../integrations/ExternalDistributed.md | 58 - .../integrations/ExternalDistributed.md.hash | 1 - .../table-engines/integrations/azure-queue.md | 156 - .../integrations/azure-queue.md.hash | 1 - .../integrations/azureBlobStorage.md | 103 - .../integrations/azureBlobStorage.md.hash | 1 - .../table-engines/integrations/deltalake.md | 63 - .../integrations/deltalake.md.hash | 1 - .../integrations/embedded-rocksdb.md | 224 - .../integrations/embedded-rocksdb.md.hash | 1 - .../table-engines/integrations/hdfs.md | 250 - .../table-engines/integrations/hdfs.md.hash | 1 - .../table-engines/integrations/hive.md | 423 - .../table-engines/integrations/hive.md.hash | 1 - .../table-engines/integrations/hudi.md | 58 - .../table-engines/integrations/hudi.md.hash | 1 - .../table-engines/integrations/iceberg.md | 299 - .../integrations/iceberg.md.hash | 1 - .../table-engines/integrations/index.md | 46 - .../table-engines/integrations/index.md.hash | 1 - .../table-engines/integrations/jdbc.md | 106 - .../table-engines/integrations/jdbc.md.hash | 1 - .../table-engines/integrations/kafka.md | 319 - .../table-engines/integrations/kafka.md.hash | 1 - .../integrations/materialized-postgresql.md | 81 - .../materialized-postgresql.md.hash | 1 - .../table-engines/integrations/mongodb.md | 245 - .../integrations/mongodb.md.hash | 1 - .../table-engines/integrations/mysql.md | 202 - .../table-engines/integrations/mysql.md.hash | 1 - .../table-engines/integrations/nats.md | 189 - .../table-engines/integrations/nats.md.hash | 1 - .../table-engines/integrations/odbc.md | 140 - .../table-engines/integrations/odbc.md.hash | 1 - .../table-engines/integrations/postgresql.md | 229 - .../integrations/postgresql.md.hash | 1 - .../table-engines/integrations/rabbitmq.md | 215 - .../integrations/rabbitmq.md.hash | 1 - .../table-engines/integrations/redis.md | 161 - .../table-engines/integrations/redis.md.hash | 1 - .../engines/table-engines/integrations/s3.md | 365 - .../table-engines/integrations/s3.md.hash | 1 - .../table-engines/integrations/s3queue.md | 411 - .../integrations/s3queue.md.hash | 1 - .../table-engines/integrations/sqlite.md | 68 - .../table-engines/integrations/sqlite.md.hash | 1 - .../table-engines/integrations/time-series.md | 307 - .../integrations/time-series.md.hash | 1 - .../engines/table-engines/log-family/index.md | 56 - .../table-engines/log-family/index.md.hash | 1 - .../engines/table-engines/log-family/log.md | 103 - .../table-engines/log-family/log.md.hash | 1 - .../table-engines/log-family/stripelog.md | 97 - .../log-family/stripelog.md.hash | 1 - .../table-engines/log-family/tinylog.md | 86 - .../table-engines/log-family/tinylog.md.hash | 1 - .../mergetree-family/aggregatingmergetree.md | 170 - .../aggregatingmergetree.md.hash | 1 - .../mergetree-family/annindexes.md | 454 - .../mergetree-family/annindexes.md.hash | 1 - .../mergetree-family/collapsingmergetree.md | 327 - .../collapsingmergetree.md.hash | 1 - .../custom-partitioning-key.md | 185 - .../custom-partitioning-key.md.hash | 1 - .../mergetree-family/graphitemergetree.md | 277 - .../graphitemergetree.md.hash | 1 - .../table-engines/mergetree-family/index.md | 42 - .../mergetree-family/index.md.hash | 1 - .../mergetree-family/invertedindexes.md | 217 - .../mergetree-family/invertedindexes.md.hash | 1 - .../mergetree-family/mergetree.md | 1026 -- .../mergetree-family/mergetree.md.hash | 1 - .../mergetree-family/replacingmergetree.md | 226 - .../replacingmergetree.md.hash | 1 - .../mergetree-family/replication.md | 363 - .../mergetree-family/replication.md.hash | 1 - .../mergetree-family/summingmergetree.md | 198 - .../mergetree-family/summingmergetree.md.hash | 1 - .../versionedcollapsingmergetree.md | 239 - .../versionedcollapsingmergetree.md.hash | 1 - .../engines/table-engines/special/buffer.md | 114 - .../table-engines/special/buffer.md.hash | 1 - .../table-engines/special/dictionary.md | 106 - .../table-engines/special/dictionary.md.hash | 1 - .../table-engines/special/distributed.md | 282 - .../table-engines/special/distributed.md.hash | 1 - .../table-engines/special/executable.md | 232 - .../table-engines/special/executable.md.hash | 1 - .../table-engines/special/external-data.md | 72 - .../special/external-data.md.hash | 1 - .../engines/table-engines/special/file.md | 116 - .../table-engines/special/file.md.hash | 1 - .../engines/table-engines/special/filelog.md | 111 - .../table-engines/special/filelog.md.hash | 1 - .../engines/table-engines/special/generate.md | 61 - .../table-engines/special/generate.md.hash | 1 - .../engines/table-engines/special/index.md | 45 - .../table-engines/special/index.md.hash | 1 - .../engines/table-engines/special/join.md | 169 - .../table-engines/special/join.md.hash | 1 - .../table-engines/special/keepermap.md | 124 - .../table-engines/special/keepermap.md.hash | 1 - .../engines/table-engines/special/memory.md | 112 - .../table-engines/special/memory.md.hash | 1 - .../engines/table-engines/special/merge.md | 118 - .../table-engines/special/merge.md.hash | 1 - .../engines/table-engines/special/null.md | 18 - .../table-engines/special/null.md.hash | 1 - .../engines/table-engines/special/set.md | 38 - .../engines/table-engines/special/set.md.hash | 1 - .../engines/table-engines/special/url.md | 114 - .../engines/table-engines/special/url.md.hash | 1 - .../engines/table-engines/special/view.md | 15 - .../table-engines/special/view.md.hash | 1 - .../current/faq/_category_.yml | 7 - .../current/faq/general/_category_.yml | 4 - .../current/faq/general/columnar-database.md | 34 - .../faq/general/columnar-database.md.hash | 1 - .../current/faq/general/dbms-naming.md | 23 - .../current/faq/general/dbms-naming.md.hash | 1 - .../current/faq/general/index.md | 32 - .../current/faq/general/index.md.hash | 1 - .../current/faq/general/mapreduce.md | 18 - .../current/faq/general/mapreduce.md.hash | 1 - .../current/faq/general/ne-tormozit.md | 33 - .../current/faq/general/ne-tormozit.md.hash | 1 - .../current/faq/general/olap.md | 44 - .../current/faq/general/olap.md.hash | 1 - .../faq/general/who-is-using-clickhouse.md | 24 - .../general/who-is-using-clickhouse.md.hash | 1 - .../current/faq/index.md | 17 - .../current/faq/index.md.hash | 1 - .../current/faq/integration/_category_.yml | 4 - .../current/faq/integration/index.md | 29 - .../current/faq/integration/index.md.hash | 1 - .../current/faq/integration/json-import.md | 39 - .../faq/integration/json-import.md.hash | 1 - .../current/faq/integration/oracle-odbc.md | 20 - .../faq/integration/oracle-odbc.md.hash | 1 - .../current/faq/operations/_category_.yml | 4 - .../current/faq/operations/delete-old-data.md | 59 - .../faq/operations/delete-old-data.md.hash | 1 - .../current/faq/operations/index.md | 25 - .../current/faq/operations/index.md.hash | 1 - .../operations/multi-region-replication.md | 18 - .../multi-region-replication.md.hash | 1 - .../current/faq/operations/production.md | 70 - .../current/faq/operations/production.md.hash | 1 - .../faq/operations/separate_storage.md | 14 - .../faq/operations/separate_storage.md.hash | 1 - .../current/faq/troubleshooting.md | 27 - .../current/faq/troubleshooting.md.hash | 1 - .../current/faq/use-cases/_category_.yml | 4 - .../current/faq/use-cases/index.md | 19 - .../current/faq/use-cases/index.md.hash | 1 - .../current/faq/use-cases/key-value.md | 22 - .../current/faq/use-cases/key-value.md.hash | 1 - .../current/faq/use-cases/time-series.md | 26 - .../current/faq/use-cases/time-series.md.hash | 1 - .../current/fast-release-24-2.md.hash | 1 - .../current/getting-started/_category_.yml | 8 - .../example-datasets/amazon-reviews.md | 241 - .../example-datasets/amazon-reviews.md.hash | 1 - .../example-datasets/amplab-benchmark.md | 128 - .../example-datasets/amplab-benchmark.md.hash | 1 - .../example-datasets/brown-benchmark.md | 448 - .../example-datasets/brown-benchmark.md.hash | 1 - .../example-datasets/cell-towers.md | 368 - .../example-datasets/cell-towers.md.hash | 1 - .../example-datasets/covid19.md | 268 - .../example-datasets/covid19.md.hash | 1 - .../example-datasets/criteo.md | 168 - .../example-datasets/criteo.md.hash | 1 - .../example-datasets/environmental-sensors.md | 176 - .../environmental-sensors.md.hash | 1 - .../example-datasets/foursquare-os-places.md | 255 - .../foursquare-os-places.md.hash | 1 - .../example-datasets/github-events.md | 13 - .../example-datasets/github-events.md.hash | 1 - .../example-datasets/github.md | 2430 ---- .../example-datasets/github.md.hash | 1 - .../getting-started/example-datasets/laion.md | 287 - .../example-datasets/laion.md.hash | 1 - .../getting-started/example-datasets/menus.md | 363 - .../example-datasets/menus.md.hash | 1 - .../example-datasets/metrica.md | 147 - .../example-datasets/metrica.md.hash | 1 - .../getting-started/example-datasets/noaa.md | 341 - .../example-datasets/noaa.md.hash | 1 - .../example-datasets/nyc-taxi.md | 306 - .../example-datasets/nyc-taxi.md.hash | 1 - .../example-datasets/nypd_complaint_data.md | 650 -- .../nypd_complaint_data.md.hash | 1 - .../example-datasets/ontime.md | 399 - .../example-datasets/ontime.md.hash | 1 - .../example-datasets/opensky.md | 423 - .../example-datasets/opensky.md.hash | 1 - .../example-datasets/recipes.md | 333 - .../example-datasets/recipes.md.hash | 1 - .../example-datasets/reddit-comments.md | 728 -- .../example-datasets/reddit-comments.md.hash | 1 - .../example-datasets/stackoverflow.md | 395 - .../example-datasets/stackoverflow.md.hash | 1 - .../example-datasets/star-schema.md | 771 -- .../example-datasets/star-schema.md.hash | 1 - .../getting-started/example-datasets/tpcds.md | 601 - .../example-datasets/tpcds.md.hash | 1 - .../getting-started/example-datasets/tpch.md | 1100 -- .../example-datasets/tpch.md.hash | 1 - .../example-datasets/tw-weather.md | 303 - .../example-datasets/tw-weather.md.hash | 1 - .../example-datasets/uk-price-paid.md | 175 - .../example-datasets/uk-price-paid.md.hash | 1 - .../example-datasets/wikistat.md | 86 - .../example-datasets/wikistat.md.hash | 1 - .../example-datasets/youtube-dislikes.md | 486 - .../example-datasets/youtube-dislikes.md.hash | 1 - .../current/getting-started/index.md | 59 - .../current/getting-started/index.md.hash | 1 - .../current/getting-started/install.md.hash | 1 - .../install/_snippets/_deb_install.md | 146 - .../install/_snippets/_deb_install.md.hash | 1 - .../install/_snippets/_docker.md | 193 - .../install/_snippets/_docker.md.hash | 1 - .../install/_snippets/_linux_tar_install.md | 107 - .../_snippets/_linux_tar_install.md.hash | 1 - .../install/_snippets/_macos.md | 103 - .../install/_snippets/_macos.md.hash | 1 - .../install/_snippets/_quick_install.md | 71 - .../install/_snippets/_quick_install.md.hash | 1 - .../install/_snippets/_rpm_install.md | 90 - .../install/_snippets/_rpm_install.md.hash | 1 - .../install/_snippets/_windows_install.md | 88 - .../_snippets/_windows_install.md.hash | 1 - .../getting-started/install/advanced.md | 52 - .../getting-started/install/advanced.md.hash | 1 - .../getting-started/install/debian_ubuntu.md | 17 - .../install/debian_ubuntu.md.hash | 1 - .../current/getting-started/install/docker.md | 15 - .../getting-started/install/docker.md.hash | 1 - .../getting-started/install/install.mdx | 50 - .../getting-started/install/install.mdx.hash | 1 - .../current/getting-started/install/macos.md | 15 - .../getting-started/install/macos.md.hash | 1 - .../getting-started/install/other_linux.md | 16 - .../install/other_linux.md.hash | 1 - .../install/quick-install-curl.md | 16 - .../install/quick-install-curl.md.hash | 1 - .../current/getting-started/install/redhat.md | 17 - .../getting-started/install/redhat.md.hash | 1 - .../getting-started/install/windows.md | 16 - .../getting-started/install/windows.md.hash | 1 - .../current/getting-started/playground.md | 62 - .../getting-started/playground.md.hash | 1 - .../current/guides/_category_.yml | 7 - .../guides/best-practices/_category_.yml | 7 - .../guides/best-practices/asyncinserts.md | 10 - .../best-practices/asyncinserts.md.hash | 1 - .../guides/best-practices/avoidmutations.md | 10 - .../best-practices/avoidmutations.md.hash | 1 - .../best-practices/avoidnullablecolumns.md | 10 - .../avoidnullablecolumns.md.hash | 1 - .../best-practices/avoidoptimizefinal.md | 10 - .../best-practices/avoidoptimizefinal.md.hash | 1 - .../guides/best-practices/bulkinserts.md | 10 - .../guides/best-practices/bulkinserts.md.hash | 1 - .../current/guides/best-practices/index.md | 33 - .../guides/best-practices/index.md.hash | 1 - .../guides/best-practices/partitioningkey.md | 10 - .../best-practices/partitioningkey.md.hash | 1 - .../current/guides/best-practices/prewhere.md | 212 - .../guides/best-practices/prewhere.md.hash | 1 - .../best-practices/query-optimization.md | 771 -- .../best-practices/query-optimization.md.hash | 1 - .../best-practices/query-parallelism.md | 263 - .../best-practices/query-parallelism.md.hash | 1 - .../guides/best-practices/skipping-indexes.md | 183 - .../best-practices/skipping-indexes.md.hash | 1 - .../best-practices/sparse-primary-indexes.md | 1469 --- .../sparse-primary-indexes.md.hash | 1 - .../current/guides/creating-tables.md | 66 - .../current/guides/creating-tables.md.hash | 1 - .../current/guides/developer/_category_.yml | 8 - .../developer/alternative-query-languages.md | 80 - .../alternative-query-languages.md.hash | 1 - .../developer/cascading-materialized-views.md | 370 - .../cascading-materialized-views.md.hash | 1 - .../developer/debugging-memory-issues.md | 83 - .../developer/debugging-memory-issues.md.hash | 1 - .../deduplicating-inserts-on-retries.md | 562 - .../deduplicating-inserts-on-retries.md.hash | 1 - .../current/guides/developer/deduplication.md | 345 - .../guides/developer/deduplication.md.hash | 1 - .../current/guides/developer/index.md | 25 - .../current/guides/developer/index.md.hash | 1 - .../guides/developer/lightweight-delete.md | 11 - .../developer/lightweight-delete.md.hash | 1 - .../guides/developer/lightweight-update.md | 95 - .../developer/lightweight-update.md.hash | 1 - .../current/guides/developer/mutations.md | 108 - .../guides/developer/mutations.md.hash | 1 - .../guides/developer/replacing-merge-tree.md | 350 - .../developer/replacing-merge-tree.md.hash | 1 - .../developer/time-series-filling-gaps.md | 350 - .../time-series-filling-gaps.md.hash | 1 - .../current/guides/developer/ttl.md | 268 - .../current/guides/developer/ttl.md.hash | 1 - ...nding-query-execution-with-the-analyzer.md | 442 - ...-query-execution-with-the-analyzer.md.hash | 1 - .../aggregate_function_combinators/anyIf.md | 62 - .../anyIf.md.hash | 1 - .../argMaxIf.md | 60 - .../argMaxIf.md.hash | 1 - .../argMinIf.md | 67 - .../argMinIf.md.hash | 1 - .../aggregate_function_combinators/avgIf.md | 58 - .../avgIf.md.hash | 1 - .../aggregate_function_combinators/avgMap.md | 69 - .../avgMap.md.hash | 1 - .../avgMerge.md | 30 - .../avgMerge.md.hash | 1 - .../avgMergeState.md | 213 - .../avgMergeState.md.hash | 1 - .../avgResample.md | 77 - .../avgResample.md.hash | 1 - .../avgState.md | 140 - .../avgState.md.hash | 1 - .../aggregate_function_combinators/countIf.md | 60 - .../countIf.md.hash | 1 - .../countResample.md | 64 - .../countResample.md.hash | 1 - .../groupArrayDistinct.md | 42 - .../groupArrayDistinct.md.hash | 1 - .../groupArrayResample.md | 67 - .../groupArrayResample.md.hash | 1 - .../aggregate_function_combinators/maxMap.md | 69 - .../maxMap.md.hash | 1 - .../maxSimpleState.md | 31 - .../maxSimpleState.md.hash | 1 - .../aggregate_function_combinators/minMap.md | 69 - .../minMap.md.hash | 1 - .../minSimpleState.md | 169 - .../minSimpleState.md.hash | 1 - .../quantilesTimingArrayIf.md | 70 - .../quantilesTimingArrayIf.md.hash | 1 - .../quantilesTimingIf.md | 86 - .../quantilesTimingIf.md.hash | 1 - .../sumArray.md | 56 - .../sumArray.md.hash | 1 - .../sumForEach.md | 50 - .../sumForEach.md.hash | 1 - .../aggregate_function_combinators/sumIf.md | 126 - .../sumIf.md.hash | 1 - .../aggregate_function_combinators/sumMap.md | 69 - .../sumMap.md.hash | 1 - .../sumSimpleState.md | 109 - .../sumSimpleState.md.hash | 1 - .../uniqArray.md | 63 - .../uniqArray.md.hash | 1 - .../uniqArrayIf.md | 87 - .../uniqArrayIf.md.hash | 1 - .../current/guides/inserting-data.md | 159 - .../current/guides/inserting-data.md.hash | 1 - .../current/guides/joining-tables.md | 189 - .../current/guides/joining-tables.md.hash | 1 - .../current/guides/manage-and-deploy-index.md | 38 - .../guides/manage-and-deploy-index.md.hash | 1 - .../guides/separation-storage-compute.md | 177 - .../guides/separation-storage-compute.md.hash | 1 - ...izing-and-hardware-recommendations.md.hash | 1 - .../current/guides/sre/_category_.yml | 8 - .../current/guides/sre/configuring-ssl.md | 506 - .../guides/sre/configuring-ssl.md.hash | 1 - .../current/guides/sre/index.md | 12 - .../current/guides/sre/index.md.hash | 1 - .../current/guides/sre/keeper/_category_.yml | 8 - .../current/guides/sre/keeper/index.md | 1301 --- .../current/guides/sre/keeper/index.md.hash | 1 - .../current/guides/sre/network-ports.md | 35 - .../current/guides/sre/network-ports.md.hash | 1 - .../current/guides/sre/scaling-clusters.md | 22 - .../guides/sre/scaling-clusters.md.hash | 1 - .../guides/sre/user-management/_category_.yml | 7 - .../sre/user-management/configuring-ldap.md | 171 - .../user-management/configuring-ldap.md.hash | 1 - .../guides/sre/user-management/index.md | 548 - .../guides/sre/user-management/index.md.hash | 1 - .../sre/user-management/ssl-user-auth.md | 144 - .../sre/user-management/ssl-user-auth.md.hash | 1 - .../current/guides/troubleshooting.md | 197 - .../current/guides/troubleshooting.md.hash | 1 - .../current/guides/writing-queries.md | 59 - .../current/guides/writing-queries.md.hash | 1 - .../current/home_links/deployment_links.json | 14 - .../current/home_links/links_101.json | 27 - .../current/integrations/cli.mdx | 12 - .../current/integrations/cli.mdx.hash | 1 - .../data-ingestion/_category_.yml | 8 - .../data-ingestion/apache-spark/index.md | 32 - .../data-ingestion/apache-spark/index.md.hash | 1 - .../data-ingestion/apache-spark/spark-jdbc.md | 358 - .../apache-spark/spark-jdbc.md.hash | 1 - .../apache-spark/spark-native-connector.md | 563 - .../spark-native-connector.md.hash | 1 - .../data-ingestion/aws-glue/index.md | 115 - .../data-ingestion/aws-glue/index.md.hash | 1 - .../azure-data-factory/index.md | 18 - .../azure-data-factory/index.md.hash | 1 - .../azure-data-factory/overview.md | 25 - .../azure-data-factory/overview.md.hash | 1 - .../using_azureblobstorage.md | 139 - .../using_azureblobstorage.md.hash | 1 - .../using_http_interface.md | 262 - .../using_http_interface.md.hash | 1 - .../data-ingestion/azure-synapse/index.md | 96 - .../azure-synapse/index.md.hash | 1 - .../clickpipes/assets/static-ips.json | 1 - .../clickpipes/aws-privatelink.md | 181 - .../clickpipes/aws-privatelink.md.hash | 1 - .../data-ingestion/clickpipes/index.md | 97 - .../data-ingestion/clickpipes/index.md.hash | 1 - .../data-ingestion/clickpipes/kafka.md | 372 - .../data-ingestion/clickpipes/kafka.md.hash | 1 - .../data-ingestion/clickpipes/kinesis.md | 157 - .../data-ingestion/clickpipes/kinesis.md.hash | 1 - .../clickpipes/mysql/datatypes.md | 33 - .../clickpipes/mysql/datatypes.md.hash | 1 - .../data-ingestion/clickpipes/mysql/faq.md | 31 - .../clickpipes/mysql/faq.md.hash | 1 - .../data-ingestion/clickpipes/mysql/index.md | 117 - .../clickpipes/mysql/index.md.hash | 1 - .../clickpipes/mysql/source/aurora.md | 134 - .../clickpipes/mysql/source/aurora.md.hash | 1 - .../clickpipes/mysql/source/gcp.md | 85 - .../clickpipes/mysql/source/gcp.md.hash | 1 - .../clickpipes/mysql/source/rds.md | 138 - .../clickpipes/mysql/source/rds.md.hash | 1 - .../clickpipes/mysql/source/rds_maria.md | 116 - .../clickpipes/mysql/source/rds_maria.md.hash | 1 - .../clickpipes/object-storage.md | 183 - .../clickpipes/object-storage.md.hash | 1 - .../clickpipes/postgres/add_table.md | 28 - .../clickpipes/postgres/add_table.md.hash | 1 - .../clickpipes/postgres/deduplication.md | 210 - .../clickpipes/postgres/deduplication.md.hash | 1 - .../data-ingestion/clickpipes/postgres/faq.md | 281 - .../clickpipes/postgres/faq.md.hash | 1 - .../clickpipes/postgres/index.md | 158 - .../clickpipes/postgres/index.md.hash | 1 - .../clickpipes/postgres/maintenance.md | 18 - .../clickpipes/postgres/maintenance.md.hash | 1 - .../clickpipes/postgres/ordering_keys.md | 56 - .../clickpipes/postgres/ordering_keys.md.hash | 1 - .../clickpipes/postgres/pause_and_resume.md | 51 - .../postgres/pause_and_resume.md.hash | 1 - .../postgres/postgres_generated_columns.md | 32 - .../postgres_generated_columns.md.hash | 1 - .../clickpipes/postgres/remove_table.md | 26 - .../clickpipes/postgres/remove_table.md.hash | 1 - .../clickpipes/postgres/schema-changes.md | 16 - .../postgres/schema-changes.md.hash | 1 - .../clickpipes/postgres/source/aurora.md | 135 - .../clickpipes/postgres/source/aurora.md.hash | 1 - .../source/azure-flexible-server-postgres.md | 83 - .../azure-flexible-server-postgres.md.hash | 1 - .../postgres/source/crunchy-postgres.md | 67 - .../postgres/source/crunchy-postgres.md.hash | 1 - .../clickpipes/postgres/source/generic.md | 122 - .../postgres/source/generic.md.hash | 1 - .../postgres/source/google-cloudsql.md | 107 - .../postgres/source/google-cloudsql.md.hash | 1 - .../postgres/source/neon-postgres.md | 77 - .../postgres/source/neon-postgres.md.hash | 1 - .../clickpipes/postgres/source/rds.md | 121 - .../clickpipes/postgres/source/rds.md.hash | 1 - .../clickpipes/postgres/source/supabase.md | 88 - .../postgres/source/supabase.md.hash | 1 - .../clickpipes/postgres/source/timescale.md | 105 - .../postgres/source/timescale.md.hash | 1 - .../clickpipes/postgres/table_resync.md | 27 - .../clickpipes/postgres/table_resync.md.hash | 1 - .../clickpipes/postgres/toast.md | 65 - .../clickpipes/postgres/toast.md.hash | 1 - .../clickpipes/secure-kinesis.md | 110 - .../clickpipes/secure-kinesis.md.hash | 1 - .../data-formats/_category_.yml | 7 - .../data-formats/arrow-avro-orc.md | 170 - .../data-formats/arrow-avro-orc.md.hash | 1 - .../data-formats/assets/arrays.json | 3 - .../data-formats/assets/capnp.bin | Bin 55432 -> 0 bytes .../data-formats/assets/columns-array.json | 5 - .../data-formats/assets/columns.json | 5 - .../data-formats/assets/custom.json | 5 - .../data-formats/assets/data.arrow | Bin 60178 -> 0 bytes .../data-formats/assets/data.avro | Bin 24103 -> 0 bytes .../data-formats/assets/data.binary | Bin 26213 -> 0 bytes .../data-formats/assets/data.bson | Bin 165 -> 0 bytes .../data-formats/assets/data.clickhouse | 59 - .../data-formats/assets/data.msgpk | Bin 25544 -> 0 bytes .../data-formats/assets/data.orc | Bin 44383 -> 0 bytes .../data-formats/assets/data.parquet | Bin 23974 -> 0 bytes .../data-formats/assets/data_csv_types.csv | 102 - .../data-formats/assets/data_small.csv | 1000 -- .../data-formats/assets/data_small.tsv | 1000 -- .../data-formats/assets/data_small_custom.txt | 1 - .../assets/data_small_headers.csv | 1001 -- .../data-formats/assets/dump.sql | 1 - .../data-formats/assets/error.log | 1000 -- .../data-formats/assets/export.parquet | Bin 22969 -> 0 bytes .../data-formats/assets/html.results | 14 - .../data-formats/assets/html.row | 4 - .../data-formats/assets/list-nested.json | 29 - .../data-formats/assets/list.json | 17 - .../data-formats/assets/mysql.sql | 51 - .../data-formats/assets/object-per-line.json | 3 - .../data-formats/assets/objects.json | 17 - .../data-formats/assets/out.html | 53 - .../data-formats/assets/output.results | 5 - .../data-formats/assets/output.rows | 1 - .../data-formats/assets/proto.bin | Bin 33411 -> 0 bytes .../data-formats/assets/row.template | 1 - .../data-formats/assets/schema.capnp | 7 - .../data-formats/assets/schema.proto | 7 - .../data-formats/assets/some_data.sql | 8 - .../data-formats/assets/some_data.tsv | 2000 ---- .../data-formats/assets/time.parquet | Bin 663 -> 0 bytes .../data-ingestion/data-formats/binary.md | 241 - .../data-formats/binary.md.hash | 1 - .../data-ingestion/data-formats/csv-tsv.md | 380 - .../data-formats/csv-tsv.md.hash | 1 - .../data-ingestion/data-formats/intro.md | 47 - .../data-ingestion/data-formats/intro.md.hash | 1 - .../data-formats/json/exporting.md | 180 - .../data-formats/json/exporting.md.hash | 1 - .../data-formats/json/formats.md | 462 - .../data-formats/json/formats.md.hash | 1 - .../data-formats/json/inference.md | 399 - .../data-formats/json/inference.md.hash | 1 - .../data-ingestion/data-formats/json/intro.md | 38 - .../data-formats/json/intro.md.hash | 1 - .../data-formats/json/loading.md | 218 - .../data-formats/json/loading.md.hash | 1 - .../data-ingestion/data-formats/json/other.md | 664 -- .../data-formats/json/other.md.hash | 1 - .../data-formats/json/schema.md | 960 -- .../data-formats/json/schema.md.hash | 1 - .../data-ingestion/data-formats/parquet.md | 197 - .../data-formats/parquet.md.hash | 1 - .../data-ingestion/data-formats/sql.md | 117 - .../data-ingestion/data-formats/sql.md.hash | 1 - .../data-formats/templates-regex.md | 252 - .../data-formats/templates-regex.md.hash | 1 - .../data-ingestion/data-ingestion-index.md | 39 - .../data-ingestion-index.md.hash | 1 - .../data-ingestion/data-sources-index.md | 63 - .../data-ingestion/data-sources-index.md.hash | 1 - .../data-ingestion/dbms/_category_.yml | 7 - .../data-ingestion/dbms/dynamodb/index.md | 147 - .../dbms/dynamodb/index.md.hash | 1 - .../dbms/jdbc-with-clickhouse.md | 181 - .../dbms/jdbc-with-clickhouse.md.hash | 1 - .../data-ingestion/dbms/mysql/index.md | 158 - .../data-ingestion/dbms/mysql/index.md.hash | 1 - .../dbms/odbc-with-clickhouse.md | 11 - .../dbms/odbc-with-clickhouse.md.hash | 1 - .../postgresql/connecting-to-postgresql.md | 352 - .../connecting-to-postgresql.md.hash | 1 - .../postgresql/data-type-mappings.md.hash | 1 - .../dbms/postgresql/inserting-data.md | 22 - .../dbms/postgresql/inserting-data.md.hash | 1 - .../postgresql/postgres-vs-clickhouse.md.hash | 1 - .../rewriting-postgres-queries.md.hash | 1 - .../integrations/data-ingestion/emqx/index.md | 287 - .../data-ingestion/emqx/index.md.hash | 1 - .../data-ingestion/etl-tools/_category_.yml | 8 - .../etl-tools/airbyte-and-clickhouse.md | 178 - .../etl-tools/airbyte-and-clickhouse.md.hash | 1 - .../data-ingestion/etl-tools/apache-beam.md | 152 - .../etl-tools/apache-beam.md.hash | 1 - .../data-ingestion/etl-tools/dbt/index.md | 1085 -- .../etl-tools/dbt/index.md.hash | 1 - .../etl-tools/dlt-and-clickhouse.md | 233 - .../etl-tools/dlt-and-clickhouse.md.hash | 1 - .../etl-tools/fivetran/index.md | 47 - .../etl-tools/fivetran/index.md.hash | 1 - .../etl-tools/nifi-and-clickhouse.md | 161 - .../etl-tools/nifi-and-clickhouse.md.hash | 1 - .../etl-tools/vector-to-clickhouse.md | 191 - .../etl-tools/vector-to-clickhouse.md.hash | 1 - .../integrations/data-ingestion/gcs/index.md | 639 -- .../data-ingestion/gcs/index.md.hash | 1 - .../google-dataflow/dataflow.md | 36 - .../google-dataflow/dataflow.md.hash | 1 - .../google-dataflow/java-runner.md | 26 - .../google-dataflow/java-runner.md.hash | 1 - .../google-dataflow/templates.md | 32 - .../google-dataflow/templates.md.hash | 1 - .../templates/bigquery-to-clickhouse.md | 148 - .../templates/bigquery-to-clickhouse.md.hash | 1 - .../data-ingestion/insert-local-files.md | 122 - .../data-ingestion/insert-local-files.md.hash | 1 - .../kafka/confluent/_category_.yml | 8 - .../kafka/confluent/custom-connector.md | 101 - .../kafka/confluent/custom-connector.md.hash | 1 - .../data-ingestion/kafka/confluent/index.md | 17 - .../kafka/confluent/index.md.hash | 1 - .../kafka/confluent/kafka-connect-http.md | 206 - .../confluent/kafka-connect-http.md.hash | 1 - .../data-ingestion/kafka/index.md | 57 - .../data-ingestion/kafka/index.md.hash | 1 - .../kafka/kafka-clickhouse-connect-sink.md | 405 - .../kafka-clickhouse-connect-sink.md.hash | 1 - .../kafka/kafka-connect-jdbc.md | 151 - .../kafka/kafka-connect-jdbc.md.hash | 1 - .../kafka-table-engine-named-collections.md | 222 - ...fka-table-engine-named-collections.md.hash | 1 - .../kafka/kafka-table-engine.md | 495 - .../kafka/kafka-table-engine.md.hash | 1 - .../data-ingestion/kafka/kafka-vector.md | 131 - .../data-ingestion/kafka/kafka-vector.md.hash | 1 - .../data-ingestion/kafka/msk/index.md | 96 - .../data-ingestion/kafka/msk/index.md.hash | 1 - .../data-ingestion/redshift/index.md | 262 - .../data-ingestion/redshift/index.md.hash | 1 - .../integrations/data-ingestion/s3-minio.md | 47 - .../data-ingestion/s3-minio.md.hash | 1 - .../integrations/data-ingestion/s3/index.md | 1275 --- .../data-ingestion/s3/index.md.hash | 1 - .../data-ingestion/s3/performance.md | 383 - .../data-ingestion/s3/performance.md.hash | 1 - .../integrations/data-sources/cassandra.md | 13 - .../data-sources/cassandra.md.hash | 1 - .../integrations/data-sources/deltalake.md | 15 - .../data-sources/deltalake.md.hash | 1 - .../current/integrations/data-sources/hive.md | 11 - .../integrations/data-sources/hive.md.hash | 1 - .../current/integrations/data-sources/hudi.md | 11 - .../integrations/data-sources/hudi.md.hash | 1 - .../integrations/data-sources/iceberg.md | 16 - .../integrations/data-sources/iceberg.md.hash | 1 - .../integrations/data-sources/mongodb.md | 11 - .../integrations/data-sources/mongodb.md.hash | 1 - .../integrations/data-sources/mysql.md | 11 - .../integrations/data-sources/mysql.md.hash | 1 - .../current/integrations/data-sources/nats.md | 11 - .../integrations/data-sources/nats.md.hash | 1 - .../integrations/data-sources/postgres.md | 13 - .../data-sources/postgres.md.hash | 1 - .../integrations/data-sources/rabbitmq.md | 11 - .../data-sources/rabbitmq.md.hash | 1 - .../integrations/data-sources/redis.md | 15 - .../integrations/data-sources/redis.md.hash | 1 - .../integrations/data-sources/rocksdb.md | 11 - .../integrations/data-sources/rocksdb.md.hash | 1 - .../integrations/data-sources/sqlite.md | 11 - .../integrations/data-sources/sqlite.md.hash | 1 - .../data-visualization/_category_.yml | 8 - .../astrato-and-clickhouse.md | 124 - .../astrato-and-clickhouse.md.hash | 1 - .../chartbrew-and-clickhouse.md | 124 - .../chartbrew-and-clickhouse.md.hash | 1 - .../data-visualization/deepnote.md | 59 - .../data-visualization/deepnote.md.hash | 1 - .../draxlr-and-clickhouse.md | 105 - .../draxlr-and-clickhouse.md.hash | 1 - .../embeddable-and-clickhouse.md | 76 - .../embeddable-and-clickhouse.md.hash | 1 - .../explo-and-clickhouse.md | 141 - .../explo-and-clickhouse.md.hash | 1 - .../data-visualization/grafana/config.md | 316 - .../data-visualization/grafana/config.md.hash | 1 - .../data-visualization/grafana/index.md | 114 - .../data-visualization/grafana/index.md.hash | 1 - .../grafana/query-builder.md | 261 - .../grafana/query-builder.md.hash | 1 - .../hashboard-and-clickhouse.md | 60 - .../hashboard-and-clickhouse.md.hash | 1 - .../integrations/data-visualization/index.md | 89 - .../data-visualization/index.md.hash | 1 - .../looker-and-clickhouse.md | 76 - .../looker-and-clickhouse.md.hash | 1 - .../looker-studio-and-clickhouse.md | 89 - .../looker-studio-and-clickhouse.md.hash | 1 - .../luzmo-and-clickhouse.md | 74 - .../luzmo-and-clickhouse.md.hash | 1 - .../metabase-and-clickhouse.md | 109 - .../metabase-and-clickhouse.md.hash | 1 - .../mitzu-and-clickhouse.md | 176 - .../mitzu-and-clickhouse.md.hash | 1 - .../data-visualization/omni-and-clickhouse.md | 43 - .../omni-and-clickhouse.md.hash | 1 - .../powerbi-and-clickhouse.md | 246 - .../powerbi-and-clickhouse.md.hash | 1 - .../quicksight-and-clickhouse.md | 168 - .../quicksight-and-clickhouse.md.hash | 1 - .../rocketbi-and-clickhouse.md | 168 - .../rocketbi-and-clickhouse.md.hash | 1 - .../splunk-and-clickhouse.md | 198 - .../splunk-and-clickhouse.md.hash | 1 - .../superset-and-clickhouse.md | 132 - .../superset-and-clickhouse.md.hash | 1 - .../tableau/tableau-analysis-tips.md | 70 - .../tableau/tableau-analysis-tips.md.hash | 1 - .../tableau/tableau-and-clickhouse.md | 183 - .../tableau/tableau-and-clickhouse.md.hash | 1 - .../tableau/tableau-connection-tips.md | 56 - .../tableau/tableau-connection-tips.md.hash | 1 - .../tableau/tableau-online-and-clickhouse.md | 108 - .../tableau-online-and-clickhouse.md.hash | 1 - .../zingdata-and-clickhouse.md | 100 - .../zingdata-and-clickhouse.md.hash | 1 - .../current/integrations/index.mdx | 395 - .../current/integrations/index.mdx.hash | 1 - .../language-clients/_category_.yml | 8 - .../integrations/language-clients/go/index.md | 2556 ----- .../language-clients/go/index.md.hash | 1 - .../integrations/language-clients/index.md | 27 - .../language-clients/index.md.hash | 1 - .../language-clients/java/client-v1.md.hash | 1 - .../language-clients/java/client.md.hash | 1 - .../java/client/_snippets/_v0_7.mdx | 339 - .../java/client/_snippets/_v0_7.mdx.hash | 1 - .../java/client/_snippets/_v0_8.mdx | 589 - .../java/client/_snippets/_v0_8.mdx.hash | 1 - .../language-clients/java/client/client.mdx | 27 - .../java/client/client.mdx.hash | 1 - .../language-clients/java/index.md | 191 - .../language-clients/java/index.md.hash | 1 - .../language-clients/java/jdbc.md.hash | 1 - .../java/jdbc/_snippets/_v0_7.mdx | 391 - .../java/jdbc/_snippets/_v0_7.mdx.hash | 1 - .../java/jdbc/_snippets/_v0_8.mdx | 225 - .../java/jdbc/_snippets/_v0_8.mdx.hash | 1 - .../language-clients/java/jdbc/jdbc.mdx | 28 - .../language-clients/java/jdbc/jdbc.mdx.hash | 1 - .../language-clients/java/r2dbc.md | 85 - .../language-clients/java/r2dbc.md.hash | 1 - .../integrations/language-clients/js.md | 1265 --- .../integrations/language-clients/js.md.hash | 1 - .../language-clients/python/index.md | 904 -- .../language-clients/python/index.md.hash | 1 - .../integrations/language-clients/rust.md | 564 - .../language-clients/rust.md.hash | 1 - .../integrations/migration/_category_.yml | 8 - .../migration/clickhouse-local-etl.md | 148 - .../migration/clickhouse-local-etl.md.hash | 1 - .../migration/clickhouse-to-cloud.md | 211 - .../migration/clickhouse-to-cloud.md.hash | 1 - .../migration/etl-tool-to-clickhouse.md | 33 - .../migration/etl-tool-to-clickhouse.md.hash | 1 - .../current/integrations/migration/index.md | 27 - .../integrations/migration/index.md.hash | 1 - .../migration/object-storage-to-clickhouse.md | 31 - .../object-storage-to-clickhouse.md.hash | 1 - .../integrations/migration/overview.md | 41 - .../integrations/migration/overview.md.hash | 1 - .../current/integrations/migration/rockset.md | 157 - .../integrations/migration/rockset.md.hash | 1 - .../migration/upload-a-csv-file.md | 39 - .../migration/upload-a-csv-file.md.hash | 1 - .../current/integrations/misc/index.md | 20 - .../current/integrations/misc/index.md.hash | 1 - .../current/integrations/prometheus.md | 346 - .../current/integrations/prometheus.md.hash | 1 - .../integrations/sql-clients/_category_.yml | 8 - .../integrations/sql-clients/datagrip.md | 64 - .../integrations/sql-clients/datagrip.md.hash | 1 - .../integrations/sql-clients/dbeaver.md | 84 - .../integrations/sql-clients/dbeaver.md.hash | 1 - .../integrations/sql-clients/dbvisualizer.md | 57 - .../sql-clients/dbvisualizer.md.hash | 1 - .../current/integrations/sql-clients/index.md | 31 - .../integrations/sql-clients/index.md.hash | 1 - .../integrations/sql-clients/jupysql.md | 417 - .../integrations/sql-clients/jupysql.md.hash | 1 - .../integrations/sql-clients/marimo.md | 122 - .../integrations/sql-clients/marimo.md.hash | 1 - .../integrations/sql-clients/qstudio.md | 67 - .../integrations/sql-clients/qstudio.md.hash | 1 - .../integrations/sql-clients/sql-console.md | 407 - .../sql-clients/sql-console.md.hash | 1 - .../integrations/sql-clients/tablum.md | 73 - .../integrations/sql-clients/tablum.md.hash | 1 - .../tools/data-integration/easypanel/index.md | 30 - .../data-integration/easypanel/index.md.hash | 1 - .../tools/data-integration/index.md | 20 - .../tools/data-integration/index.md.hash | 1 - .../tools/data-integration/retool/index.md | 65 - .../data-integration/retool/index.md.hash | 1 - .../tools/data-integration/splunk/index.md | 111 - .../data-integration/splunk/index.md.hash | 1 - .../current/integrations/tools/index.md | 20 - .../current/integrations/tools/index.md.hash | 1 - .../current/interfaces/cli.md | 650 -- .../current/interfaces/cli.md.hash | 1 - .../current/interfaces/cpp.md | 20 - .../current/interfaces/cpp.md.hash | 1 - .../current/interfaces/formats.md | 484 - .../current/interfaces/formats.md.hash | 1 - .../current/interfaces/formats/Arrow/Arrow.md | 97 - .../interfaces/formats/Arrow/Arrow.md.hash | 1 - .../interfaces/formats/Arrow/ArrowStream.md | 24 - .../formats/Arrow/ArrowStream.md.hash | 1 - .../current/interfaces/formats/Avro/Avro.md | 98 - .../interfaces/formats/Avro/Avro.md.hash | 1 - .../interfaces/formats/Avro/AvroConfluent.md | 73 - .../formats/Avro/AvroConfluent.md.hash | 1 - .../Avro/_snippets/data-types-matching.md | 45 - .../_snippets/data-types-matching.md.hash | 1 - .../current/interfaces/formats/BSONEachRow.md | 93 - .../interfaces/formats/BSONEachRow.md.hash | 1 - .../current/interfaces/formats/CSV/CSV.md | 73 - .../interfaces/formats/CSV/CSV.md.hash | 1 - .../interfaces/formats/CSV/CSVWithNames.md | 29 - .../formats/CSV/CSVWithNames.md.hash | 1 - .../formats/CSV/CSVWithNamesAndTypes.md | 32 - .../formats/CSV/CSVWithNamesAndTypes.md.hash | 1 - .../current/interfaces/formats/CapnProto.md | 119 - .../interfaces/formats/CapnProto.md.hash | 1 - .../CustomSeparated/CustomSeparated.md | 45 - .../CustomSeparated/CustomSeparated.md.hash | 1 - .../CustomSeparatedIgnoreSpaces.md | 15 - .../CustomSeparatedIgnoreSpaces.md.hash | 1 - .../CustomSeparatedIgnoreSpacesWithNames.md | 15 - ...stomSeparatedIgnoreSpacesWithNames.md.hash | 1 - ...mSeparatedIgnoreSpacesWithNamesAndTypes.md | 16 - ...ratedIgnoreSpacesWithNamesAndTypes.md.hash | 1 - .../CustomSeparatedWithNames.md | 31 - .../CustomSeparatedWithNames.md.hash | 1 - .../CustomSeparatedWithNamesAndTypes.md | 32 - .../CustomSeparatedWithNamesAndTypes.md.hash | 1 - .../current/interfaces/formats/DWARF.md | 85 - .../current/interfaces/formats/DWARF.md.hash | 1 - .../current/interfaces/formats/Form.md | 45 - .../current/interfaces/formats/Form.md.hash | 1 - .../current/interfaces/formats/HiveText.md | 15 - .../interfaces/formats/HiveText.md.hash | 1 - .../current/interfaces/formats/JSON/JSON.md | 111 - .../interfaces/formats/JSON/JSON.md.hash | 1 - .../interfaces/formats/JSON/JSONAsObject.md | 68 - .../formats/JSON/JSONAsObject.md.hash | 1 - .../interfaces/formats/JSON/JSONAsString.md | 67 - .../formats/JSON/JSONAsString.md.hash | 1 - .../interfaces/formats/JSON/JSONColumns.md | 44 - .../formats/JSON/JSONColumns.md.hash | 1 - .../formats/JSON/JSONColumnsWithMetadata.md | 71 - .../JSON/JSONColumnsWithMetadata.md.hash | 1 - .../interfaces/formats/JSON/JSONCompact.md | 62 - .../formats/JSON/JSONCompact.md.hash | 1 - .../formats/JSON/JSONCompactColumns.md | 38 - .../formats/JSON/JSONCompactColumns.md.hash | 1 - .../formats/JSON/JSONCompactEachRow.md | 32 - .../formats/JSON/JSONCompactEachRow.md.hash | 1 - .../JSON/JSONCompactEachRowWithNames.md | 32 - .../JSON/JSONCompactEachRowWithNames.md.hash | 1 - .../JSONCompactEachRowWithNamesAndTypes.md | 32 - ...SONCompactEachRowWithNamesAndTypes.md.hash | 1 - .../formats/JSON/JSONCompactStrings.md | 62 - .../formats/JSON/JSONCompactStrings.md.hash | 1 - .../formats/JSON/JSONCompactStringsEachRow.md | 32 - .../JSON/JSONCompactStringsEachRow.md.hash | 1 - .../JSONCompactStringsEachRowWithNames.md | 28 - ...JSONCompactStringsEachRowWithNames.md.hash | 1 - ...NCompactStringsEachRowWithNamesAndTypes.md | 25 - ...actStringsEachRowWithNamesAndTypes.md.hash | 1 - .../interfaces/formats/JSON/JSONEachRow.md | 27 - .../formats/JSON/JSONEachRow.md.hash | 1 - .../formats/JSON/JSONEachRowWithProgress.md | 31 - .../JSON/JSONEachRowWithProgress.md.hash | 1 - .../interfaces/formats/JSON/JSONLines.md | 15 - .../interfaces/formats/JSON/JSONLines.md.hash | 1 - .../formats/JSON/JSONObjectEachRow.md | 240 - .../formats/JSON/JSONObjectEachRow.md.hash | 1 - .../interfaces/formats/JSON/JSONStrings.md | 76 - .../formats/JSON/JSONStrings.md.hash | 1 - .../formats/JSON/JSONStringsEachRow.md | 30 - .../formats/JSON/JSONStringsEachRow.md.hash | 1 - .../JSON/JSONStringsEachRowWithProgress.md | 24 - .../JSONStringsEachRowWithProgress.md.hash | 1 - .../formats/JSON/PrettyJSONEachRow.md | 56 - .../formats/JSON/PrettyJSONEachRow.md.hash | 1 - .../formats/JSON/format-settings.md | 40 - .../formats/JSON/format-settings.md.hash | 1 - .../formats/LineAsString/LineAsString.md | 39 - .../formats/LineAsString/LineAsString.md.hash | 1 - .../LineAsString/LineAsStringWithNames.md | 43 - .../LineAsStringWithNames.md.hash | 1 - .../LineAsStringWithNamesAndTypes.md | 44 - .../LineAsStringWithNamesAndTypes.md.hash | 1 - .../current/interfaces/formats/Markdown.md | 36 - .../interfaces/formats/Markdown.md.hash | 1 - .../current/interfaces/formats/MsgPack.md | 62 - .../interfaces/formats/MsgPack.md.hash | 1 - .../current/interfaces/formats/MySQLDump.md | 89 - .../interfaces/formats/MySQLDump.md.hash | 1 - .../current/interfaces/formats/MySQLWire.md | 15 - .../interfaces/formats/MySQLWire.md.hash | 1 - .../current/interfaces/formats/Native.md | 33 - .../current/interfaces/formats/Native.md.hash | 1 - .../current/interfaces/formats/Npy.md | 76 - .../current/interfaces/formats/Npy.md.hash | 1 - .../current/interfaces/formats/Null.md | 53 - .../current/interfaces/formats/Null.md.hash | 1 - .../current/interfaces/formats/ODBCDriver2.md | 15 - .../interfaces/formats/ODBCDriver2.md.hash | 1 - .../current/interfaces/formats/ORC.md | 79 - .../current/interfaces/formats/ORC.md.hash | 1 - .../current/interfaces/formats/One.md | 45 - .../current/interfaces/formats/One.md.hash | 1 - .../interfaces/formats/Parquet/Parquet.md | 107 - .../formats/Parquet/Parquet.md.hash | 1 - .../formats/Parquet/ParquetMetadata.md | 145 - .../formats/Parquet/ParquetMetadata.md.hash | 1 - .../interfaces/formats/PostgreSQLWire.md | 15 - .../interfaces/formats/PostgreSQLWire.md.hash | 1 - .../interfaces/formats/Pretty/Pretty.md | 98 - .../interfaces/formats/Pretty/Pretty.md.hash | 1 - .../formats/Pretty/PrettyCompact.md | 30 - .../formats/Pretty/PrettyCompact.md.hash | 1 - .../formats/Pretty/PrettyCompactMonoBlock.md | 27 - .../Pretty/PrettyCompactMonoBlock.md.hash | 1 - .../formats/Pretty/PrettyCompactNoEscapes.md | 27 - .../Pretty/PrettyCompactNoEscapes.md.hash | 1 - .../Pretty/PrettyCompactNoEscapesMonoBlock.md | 27 - .../PrettyCompactNoEscapesMonoBlock.md.hash | 1 - .../formats/Pretty/PrettyMonoBlock.md | 27 - .../formats/Pretty/PrettyMonoBlock.md.hash | 1 - .../formats/Pretty/PrettyNoEscapes.md | 37 - .../formats/Pretty/PrettyNoEscapes.md.hash | 1 - .../Pretty/PrettyNoEscapesMonoBlock.md | 27 - .../Pretty/PrettyNoEscapesMonoBlock.md.hash | 1 - .../interfaces/formats/Pretty/PrettySpace.md | 26 - .../formats/Pretty/PrettySpace.md.hash | 1 - .../formats/Pretty/PrettySpaceMonoBlock.md | 26 - .../Pretty/PrettySpaceMonoBlock.md.hash | 1 - .../formats/Pretty/PrettySpaceNoEscapes.md | 27 - .../Pretty/PrettySpaceNoEscapes.md.hash | 1 - .../Pretty/PrettySpaceNoEscapesMonoBlock.md | 27 - .../PrettySpaceNoEscapesMonoBlock.md.hash | 1 - .../common-pretty-format-settings.md | 20 - .../common-pretty-format-settings.md.hash | 1 - .../current/interfaces/formats/Prometheus.md | 97 - .../interfaces/formats/Prometheus.md.hash | 1 - .../interfaces/formats/Protobuf/Protobuf.md | 131 - .../formats/Protobuf/Protobuf.md.hash | 1 - .../formats/Protobuf/ProtobufList.md | 51 - .../formats/Protobuf/ProtobufList.md.hash | 1 - .../formats/Protobuf/ProtobufSingle.md | 26 - .../formats/Protobuf/ProtobufSingle.md.hash | 1 - .../current/interfaces/formats/RawBLOB.md | 55 - .../interfaces/formats/RawBLOB.md.hash | 1 - .../current/interfaces/formats/Regexp.md | 80 - .../current/interfaces/formats/Regexp.md.hash | 1 - .../interfaces/formats/RowBinary/RowBinary.md | 56 - .../formats/RowBinary/RowBinary.md.hash | 1 - .../RowBinary/RowBinaryWithDefaults.md | 40 - .../RowBinary/RowBinaryWithDefaults.md.hash | 1 - .../formats/RowBinary/RowBinaryWithNames.md | 33 - .../RowBinary/RowBinaryWithNames.md.hash | 1 - .../RowBinary/RowBinaryWithNamesAndTypes.md | 38 - .../RowBinaryWithNamesAndTypes.md.hash | 1 - .../common-row-binary-format-settings.md | 17 - .../common-row-binary-format-settings.md.hash | 1 - .../current/interfaces/formats/SQLInsert.md | 48 - .../interfaces/formats/SQLInsert.md.hash | 1 - .../interfaces/formats/TabSeparated/TSKV.md | 64 - .../formats/TabSeparated/TSKV.md.hash | 1 - .../formats/TabSeparated/TabSeparated.md | 120 - .../formats/TabSeparated/TabSeparated.md.hash | 1 - .../formats/TabSeparated/TabSeparatedRaw.md | 32 - .../TabSeparated/TabSeparatedRaw.md.hash | 1 - .../TabSeparated/TabSeparatedRawWithNames.md | 32 - .../TabSeparatedRawWithNames.md.hash | 1 - .../TabSeparatedRawWithNamesAndTypes.md | 33 - .../TabSeparatedRawWithNamesAndTypes.md.hash | 1 - .../TabSeparated/TabSeparatedWithNames.md | 33 - .../TabSeparatedWithNames.md.hash | 1 - .../TabSeparatedWithNamesAndTypes.md | 29 - .../TabSeparatedWithNamesAndTypes.md.hash | 1 - .../interfaces/formats/Template/Template.md | 238 - .../formats/Template/Template.md.hash | 1 - .../formats/Template/TemplateIgnoreSpaces.md | 47 - .../Template/TemplateIgnoreSpaces.md.hash | 1 - .../current/interfaces/formats/Values.md | 46 - .../current/interfaces/formats/Values.md.hash | 1 - .../current/interfaces/formats/Vertical.md | 53 - .../interfaces/formats/Vertical.md.hash | 1 - .../current/interfaces/formats/XML.md | 97 - .../current/interfaces/formats/XML.md.hash | 1 - .../current/interfaces/grpc.md | 104 - .../current/interfaces/grpc.md.hash | 1 - .../current/interfaces/http.md | 1007 -- .../current/interfaces/http.md.hash | 1 - .../current/interfaces/images/mysql0.png | Bin 696049 -> 0 bytes .../current/interfaces/images/mysql1.png | Bin 134935 -> 0 bytes .../current/interfaces/images/mysql2.png | Bin 220612 -> 0 bytes .../current/interfaces/images/mysql3.png | Bin 221229 -> 0 bytes .../current/interfaces/jdbc.md | 14 - .../current/interfaces/jdbc.md.hash | 1 - .../current/interfaces/mysql.md | 173 - .../current/interfaces/mysql.md.hash | 1 - ...ative-clients-and-interfaces-index.md.hash | 1 - .../native-clients-interfaces-index.md | 26 - .../native-clients-interfaces-index.md.hash | 1 - .../current/interfaces/odbc.md | 14 - .../current/interfaces/odbc.md.hash | 1 - .../current/interfaces/overview.md | 48 - .../current/interfaces/overview.md.hash | 1 - .../current/interfaces/postgresql.md | 90 - .../current/interfaces/postgresql.md.hash | 1 - .../current/interfaces/prometheus.md | 164 - .../current/interfaces/prometheus.md.hash | 1 - .../current/interfaces/schema-inference.md | 2064 ---- .../interfaces/schema-inference.md.hash | 1 - .../current/interfaces/ssh.md | 122 - .../current/interfaces/ssh.md.hash | 1 - .../current/interfaces/tcp.md | 14 - .../current/interfaces/tcp.md.hash | 1 - .../third-party/client-libraries.md | 88 - .../third-party/client-libraries.md.hash | 1 - .../current/interfaces/third-party/gui.md | 401 - .../interfaces/third-party/gui.md.hash | 1 - .../current/interfaces/third-party/index.md | 23 - .../interfaces/third-party/index.md.hash | 1 - .../interfaces/third-party/integrations.md | 121 - .../third-party/integrations.md.hash | 1 - .../current/interfaces/third-party/proxy.md | 48 - .../interfaces/third-party/proxy.md.hash | 1 - .../current/intro.md | 91 - .../current/intro.md.hash | 1 - .../current/introduction-index.md | 18 - .../current/introduction-index.md.hash | 1 - .../core-concepts/academic_overview.md.hash | 1 - .../core-concepts/academic_overview.mdx | 421 - .../core-concepts/academic_overview.mdx.hash | 1 - .../managing-data/core-concepts/index.md | 23 - .../managing-data/core-concepts/index.md.hash | 1 - .../managing-data/core-concepts/merges.md | 181 - .../core-concepts/merges.md.hash | 1 - .../managing-data/core-concepts/partitions.md | 306 - .../core-concepts/partitions.md.hash | 1 - .../managing-data/core-concepts/parts.md | 102 - .../managing-data/core-concepts/parts.md.hash | 1 - .../core-concepts/primary-indexes.md | 179 - .../core-concepts/primary-indexes.md.hash | 1 - .../managing-data/core-concepts/shards.md | 120 - .../core-concepts/shards.md.hash | 1 - .../deleting-data/delete_mutations.md | 17 - .../deleting-data/delete_mutations.md.hash | 1 - .../managing-data/deleting-data/index.md | 22 - .../managing-data/deleting-data/index.md.hash | 1 - .../managing-data/deleting-data/overview.md | 73 - .../deleting-data/overview.md.hash | 1 - .../current/managing-data/drop_partition.md | 76 - .../managing-data/drop_partition.md.hash | 1 - .../current/managing-data/truncate.md | 13 - .../current/managing-data/truncate.md.hash | 1 - .../managing-data/updating-data/index.md | 19 - .../managing-data/updating-data/index.md.hash | 1 - .../managing-data/updating-data/overview.md | 127 - .../updating-data/overview.md.hash | 1 - .../updating-data/update_mutations.md | 17 - .../updating-data/update_mutations.md.hash | 1 - .../incremental-materialized-view.md | 1183 -- .../incremental-materialized-view.md.hash | 1 - .../current/materialized-view/index.md | 21 - .../current/materialized-view/index.md.hash | 1 - .../refreshable-materialized-view.md | 388 - .../refreshable-materialized-view.md.hash | 1 - .../bigquery/equivalent-concepts.md | 469 - .../bigquery/equivalent-concepts.md.hash | 1 - .../current/migrations/bigquery/index.md | 17 - .../current/migrations/bigquery/index.md.hash | 1 - .../migrations/bigquery/loading-data.md | 132 - .../migrations/bigquery/loading-data.md.hash | 1 - .../bigquery/migrating-to-clickhouse-cloud.md | 541 - .../migrating-to-clickhouse-cloud.md.hash | 1 - .../current/migrations/index.md | 19 - .../current/migrations/index.md.hash | 1 - .../current/migrations/postgres/appendix.md | 192 - .../migrations/postgres/appendix.md.hash | 1 - .../postgres/data-modeling-techniques.md | 264 - .../postgres/data-modeling-techniques.md.hash | 1 - .../current/migrations/postgres/dataset.md | 190 - .../migrations/postgres/dataset.md.hash | 1 - .../postgres/designing-schemas.md.hash | 1 - .../current/migrations/postgres/index.md | 18 - .../current/migrations/postgres/index.md.hash | 1 - .../current/migrations/postgres/overview.md | 50 - .../migrations/postgres/overview.md.hash | 1 - .../postgres/replacing-merge-tree.md.hash | 1 - .../migrations/postgres/rewriting-queries.md | 278 - .../postgres/rewriting-queries.md.hash | 1 - .../current/migrations/snowflake.md | 117 - .../current/migrations/snowflake.md.hash | 1 - .../current/native-protocol/_category_.yml | 3 - .../current/native-protocol/basics.md | 154 - .../current/native-protocol/basics.md.hash | 1 - .../current/native-protocol/client.md | 132 - .../current/native-protocol/client.md.hash | 1 - .../current/native-protocol/columns.md | 102 - .../current/native-protocol/columns.md.hash | 1 - .../current/native-protocol/hash.md | 44 - .../current/native-protocol/hash.md.hash | 1 - .../current/native-protocol/server.md | 138 - .../current/native-protocol/server.md.hash | 1 - .../current/operations/_category_.yml | 4 - .../current/operations/_troubleshooting.md | 221 - .../operations/_troubleshooting.md.hash | 1 - .../operations/allocation-profiling.md | 210 - .../operations/allocation-profiling.md.hash | 1 - .../current/operations/analyzer.md | 196 - .../current/operations/analyzer.md.hash | 1 - .../current/operations/backup.md | 525 - .../current/operations/backup.md.hash | 1 - .../current/operations/caches.md | 37 - .../current/operations/caches.md.hash | 1 - .../current/operations/cluster-discovery.md | 230 - .../operations/cluster-discovery.md.hash | 1 - .../current/operations/configuration-files.md | 455 - .../operations/configuration-files.md.hash | 1 - .../external-authenticators/http.md | 102 - .../external-authenticators/http.md.hash | 1 - .../external-authenticators/index.md | 21 - .../external-authenticators/index.md.hash | 1 - .../external-authenticators/kerberos.md | 132 - .../external-authenticators/kerberos.md.hash | 1 - .../external-authenticators/ldap.md | 188 - .../external-authenticators/ldap.md.hash | 1 - .../external-authenticators/ssl-x509.md | 45 - .../external-authenticators/ssl-x509.md.hash | 1 - .../current/operations/monitoring.md | 79 - .../current/operations/monitoring.md.hash | 1 - .../current/operations/named-collections.md | 592 - .../operations/named-collections.md.hash | 1 - .../current/operations/opentelemetry.md | 71 - .../current/operations/opentelemetry.md.hash | 1 - .../profile-guided-optimization.md | 30 - .../profile-guided-optimization.md.hash | 1 - .../sampling-query-profiler.md | 82 - .../sampling-query-profiler.md.hash | 1 - .../current/operations/performance-test.md | 32 - .../operations/performance-test.md.hash | 1 - .../current/operations/query-cache.md | 146 - .../current/operations/query-cache.md.hash | 1 - .../operations/query-condition-cache.md | 73 - .../operations/query-condition-cache.md.hash | 1 - .../current/operations/quotas.md | 129 - .../current/operations/quotas.md.hash | 1 - .../_server_settings_outside_source.md | 2381 ---- .../_server_settings_outside_source.md.hash | 1 - .../_snippets/_system-log-parameters.md | 25 - .../_snippets/_system-log-parameters.md.hash | 1 - .../settings.md | 3824 ------- .../settings.md.hash | 1 - .../settings/composable-protocols.md | 181 - .../settings/composable-protocols.md.hash | 1 - .../settings/constraints-on-settings.md | 164 - .../settings/constraints-on-settings.md.hash | 1 - .../current/operations/settings/index.md | 30 - .../current/operations/settings/index.md.hash | 1 - .../operations/settings/memory-overcommit.md | 46 - .../settings/memory-overcommit.md.hash | 1 - .../settings/merge-tree-settings.md | 1871 ---- .../settings/merge-tree-settings.md.hash | 1 - .../current/operations/settings/overview.md | 60 - .../operations/settings/overview.md.hash | 1 - .../settings/permissions-for-queries.md | 74 - .../settings/permissions-for-queries.md.hash | 1 - .../operations/settings/query-complexity.md | 108 - .../settings/query-complexity.md.hash | 1 - .../operations/settings/server-overload.md | 25 - .../settings/server-overload.md.hash | 1 - .../operations/settings/settings-formats.md | 1748 --- .../settings/settings-formats.md.hash | 1 - .../operations/settings/settings-profiles.md | 86 - .../settings/settings-profiles.md.hash | 1 - .../settings/settings-query-level.md | 224 - .../settings/settings-query-level.md.hash | 1 - .../operations/settings/settings-users.md | 260 - .../settings/settings-users.md.hash | 1 - .../current/operations/settings/settings.md | 9970 ----------------- .../operations/settings/settings.md.hash | 1 - .../current/operations/ssl-zookeeper.md | 81 - .../current/operations/ssl-zookeeper.md.hash | 1 - .../current/operations/startup-scripts.md | 40 - .../operations/startup-scripts.md.hash | 1 - .../current/operations/storing-data.md | 964 -- .../current/operations/storing-data.md.hash | 1 - .../system-tables/asynchronous_insert_log.md | 77 - .../asynchronous_insert_log.md.hash | 1 - .../system-tables/asynchronous_inserts.md | 54 - .../asynchronous_inserts.md.hash | 1 - .../system-tables/asynchronous_loader.md | 63 - .../system-tables/asynchronous_loader.md.hash | 1 - .../system-tables/asynchronous_metric_log.md | 61 - .../asynchronous_metric_log.md.hash | 1 - .../system-tables/asynchronous_metrics.md | 512 - .../asynchronous_metrics.md.hash | 1 - .../system-tables/azure_queue_settings.md | 26 - .../azure_queue_settings.md.hash | 1 - .../operations/system-tables/backup_log.md | 163 - .../system-tables/backup_log.md.hash | 1 - .../system-tables/blob_storage_log.md | 70 - .../system-tables/blob_storage_log.md.hash | 1 - .../operations/system-tables/build_options.md | 33 - .../system-tables/build_options.md.hash | 1 - .../operations/system-tables/clusters.md | 92 - .../operations/system-tables/clusters.md.hash | 1 - .../operations/system-tables/columns.md | 104 - .../operations/system-tables/columns.md.hash | 1 - .../operations/system-tables/contributors.md | 49 - .../system-tables/contributors.md.hash | 1 - .../operations/system-tables/crash-log.md | 59 - .../system-tables/crash-log.md.hash | 1 - .../operations/system-tables/current-roles.md | 18 - .../system-tables/current-roles.md.hash | 1 - .../operations/system-tables/dashboards.md | 77 - .../system-tables/dashboards.md.hash | 1 - .../system-tables/data_skipping_indices.md | 60 - .../data_skipping_indices.md.hash | 1 - .../system-tables/data_type_families.md | 43 - .../system-tables/data_type_families.md.hash | 1 - .../system-tables/database_engines.md | 32 - .../system-tables/database_engines.md.hash | 1 - .../operations/system-tables/databases.md | 50 - .../system-tables/databases.md.hash | 1 - .../system-tables/detached_parts.md | 18 - .../system-tables/detached_parts.md.hash | 1 - .../system-tables/detached_tables.md | 41 - .../system-tables/detached_tables.md.hash | 1 - .../operations/system-tables/dictionaries.md | 99 - .../system-tables/dictionaries.md.hash | 1 - .../current/operations/system-tables/disks.md | 38 - .../operations/system-tables/disks.md.hash | 1 - .../system-tables/distributed_ddl_queue.md | 80 - .../distributed_ddl_queue.md.hash | 1 - .../system-tables/distribution_queue.md | 58 - .../system-tables/distribution_queue.md.hash | 1 - .../operations/system-tables/dns_cache.md | 46 - .../system-tables/dns_cache.md.hash | 1 - .../system-tables/dropped_tables.md | 44 - .../system-tables/dropped_tables.md.hash | 1 - .../system-tables/dropped_tables_parts.md | 28 - .../dropped_tables_parts.md.hash | 1 - .../operations/system-tables/enabled-roles.md | 20 - .../system-tables/enabled-roles.md.hash | 1 - .../operations/system-tables/error_log.md | 48 - .../system-tables/error_log.md.hash | 1 - .../operations/system-tables/errors.md | 51 - .../operations/system-tables/errors.md.hash | 1 - .../operations/system-tables/events.md | 47 - .../operations/system-tables/events.md.hash | 1 - .../operations/system-tables/functions.md | 46 - .../system-tables/functions.md.hash | 1 - .../operations/system-tables/grants.md | 31 - .../operations/system-tables/grants.md.hash | 1 - .../system-tables/graphite_retentions.md | 24 - .../system-tables/graphite_retentions.md.hash | 1 - .../system-tables/histogram_metrics.md | 51 - .../system-tables/histogram_metrics.md.hash | 1 - .../system-tables/iceberg_history.md | 28 - .../system-tables/iceberg_history.md.hash | 1 - .../current/operations/system-tables/index.md | 126 - .../operations/system-tables/index.md.hash | 1 - .../system-tables/information_schema.md | 408 - .../system-tables/information_schema.md.hash | 1 - .../operations/system-tables/jemalloc_bins.md | 54 - .../system-tables/jemalloc_bins.md.hash | 1 - .../system-tables/kafka_consumers.md | 67 - .../system-tables/kafka_consumers.md.hash | 1 - .../system-tables/latency_buckets.md | 42 - .../system-tables/latency_buckets.md.hash | 1 - .../operations/system-tables/latency_log.md | 58 - .../system-tables/latency_log.md.hash | 1 - .../operations/system-tables/licenses.md | 39 - .../operations/system-tables/licenses.md.hash | 1 - .../system-tables/merge_tree_settings.md | 108 - .../system-tables/merge_tree_settings.md.hash | 1 - .../operations/system-tables/merges.md | 37 - .../operations/system-tables/merges.md.hash | 1 - .../operations/system-tables/metric_log.md | 73 - .../system-tables/metric_log.md.hash | 1 - .../operations/system-tables/metrics.md | 766 -- .../operations/system-tables/metrics.md.hash | 1 - .../current/operations/system-tables/moves.md | 51 - .../operations/system-tables/moves.md.hash | 1 - .../operations/system-tables/mutations.md | 78 - .../system-tables/mutations.md.hash | 1 - .../operations/system-tables/numbers.md | 66 - .../operations/system-tables/numbers.md.hash | 1 - .../operations/system-tables/numbers_mt.md | 38 - .../system-tables/numbers_mt.md.hash | 1 - .../current/operations/system-tables/one.md | 34 - .../operations/system-tables/one.md.hash | 1 - .../system-tables/opentelemetry_span_log.md | 64 - .../opentelemetry_span_log.md.hash | 1 - .../operations/system-tables/overview.md | 253 - .../operations/system-tables/overview.md.hash | 1 - .../operations/system-tables/part_log.md | 97 - .../operations/system-tables/part_log.md.hash | 1 - .../current/operations/system-tables/parts.md | 193 - .../operations/system-tables/parts.md.hash | 1 - .../operations/system-tables/parts_columns.md | 159 - .../system-tables/parts_columns.md.hash | 1 - .../operations/system-tables/processes.md | 76 - .../system-tables/processes.md.hash | 1 - .../system-tables/processors_profile_log.md | 96 - .../processors_profile_log.md.hash | 1 - .../operations/system-tables/projections.md | 51 - .../system-tables/projections.md.hash | 1 - .../operations/system-tables/query_cache.md | 51 - .../system-tables/query_cache.md.hash | 1 - .../system-tables/query_condition_cache.md | 45 - .../query_condition_cache.md.hash | 1 - .../operations/system-tables/query_log.md | 219 - .../system-tables/query_log.md.hash | 1 - .../system-tables/query_metric_log.md | 60 - .../system-tables/query_metric_log.md.hash | 1 - .../system-tables/query_thread_log.md | 133 - .../system-tables/query_thread_log.md.hash | 1 - .../system-tables/query_views_log.md | 101 - .../system-tables/query_views_log.md.hash | 1 - .../operations/system-tables/quota_limits.md | 32 - .../system-tables/quota_limits.md.hash | 1 - .../operations/system-tables/quota_usage.md | 44 - .../system-tables/quota_usage.md.hash | 1 - .../operations/system-tables/quotas.md | 38 - .../operations/system-tables/quotas.md.hash | 1 - .../operations/system-tables/quotas_usage.md | 50 - .../system-tables/quotas_usage.md.hash | 1 - .../operations/system-tables/replicas.md | 144 - .../operations/system-tables/replicas.md.hash | 1 - .../system-tables/replicated_fetches.md | 83 - .../system-tables/replicated_fetches.md.hash | 1 - .../system-tables/replication_queue.md | 100 - .../system-tables/replication_queue.md.hash | 1 - .../operations/system-tables/resources.md | 47 - .../system-tables/resources.md.hash | 1 - .../operations/system-tables/role-grants.md | 31 - .../system-tables/role-grants.md.hash | 1 - .../current/operations/system-tables/roles.md | 25 - .../operations/system-tables/roles.md.hash | 1 - .../operations/system-tables/row_policies.md | 45 - .../system-tables/row_policies.md.hash | 1 - .../system-tables/s3_queue_settings.md | 29 - .../system-tables/s3_queue_settings.md.hash | 1 - .../operations/system-tables/scheduler.md | 87 - .../system-tables/scheduler.md.hash | 1 - .../system-tables/schema_inference_cache.md | 78 - .../schema_inference_cache.md.hash | 1 - .../system-tables/server_settings.md | 76 - .../system-tables/server_settings.md.hash | 1 - .../operations/system-tables/session_log.md | 94 - .../system-tables/session_log.md.hash | 1 - .../operations/system-tables/settings.md | 154 - .../operations/system-tables/settings.md.hash | 1 - .../system-tables/settings_changes.md | 44 - .../system-tables/settings_changes.md.hash | 1 - .../settings_profile_elements.md | 41 - .../settings_profile_elements.md.hash | 1 - .../system-tables/settings_profiles.md | 34 - .../system-tables/settings_profiles.md.hash | 1 - .../operations/system-tables/stack_trace.md | 108 - .../system-tables/stack_trace.md.hash | 1 - .../system-tables/storage_policies.md | 35 - .../system-tables/storage_policies.md.hash | 1 - .../operations/system-tables/symbols.md | 42 - .../operations/system-tables/symbols.md.hash | 1 - .../system-tables/system_warnings.md | 54 - .../system-tables/system_warnings.md.hash | 1 - .../operations/system-tables/table_engines.md | 49 - .../system-tables/table_engines.md.hash | 1 - .../operations/system-tables/tables.md | 163 - .../operations/system-tables/tables.md.hash | 1 - .../operations/system-tables/text_log.md | 89 - .../operations/system-tables/text_log.md.hash | 1 - .../operations/system-tables/time_zones.md | 40 - .../system-tables/time_zones.md.hash | 1 - .../operations/system-tables/trace_log.md | 89 - .../system-tables/trace_log.md.hash | 1 - .../system-tables/user_processes.md | 39 - .../system-tables/user_processes.md.hash | 1 - .../current/operations/system-tables/users.md | 45 - .../operations/system-tables/users.md.hash | 1 - .../system-tables/view_refreshes.md | 54 - .../system-tables/view_refreshes.md.hash | 1 - .../operations/system-tables/workloads.md | 50 - .../system-tables/workloads.md.hash | 1 - .../operations/system-tables/zookeeper.md | 99 - .../system-tables/zookeeper.md.hash | 1 - .../system-tables/zookeeper_connection.md | 45 - .../zookeeper_connection.md.hash | 1 - .../operations/system-tables/zookeeper_log.md | 145 - .../system-tables/zookeeper_log.md.hash | 1 - .../current/operations/tips.md | 333 - .../current/operations/tips.md.hash | 1 - .../current/operations/update.md | 104 - .../current/operations/update.md.hash | 1 - .../operations/userspace-page-cache.md | 72 - .../operations/userspace-page-cache.md.hash | 1 - .../operations/utilities/backupview.md | 62 - .../operations/utilities/backupview.md.hash | 1 - .../utilities/clickhouse-benchmark.md | 150 - .../utilities/clickhouse-benchmark.md.hash | 1 - .../utilities/clickhouse-compressor.md | 33 - .../utilities/clickhouse-compressor.md.hash | 1 - .../operations/utilities/clickhouse-disks.md | 71 - .../utilities/clickhouse-disks.md.hash | 1 - .../operations/utilities/clickhouse-format.md | 119 - .../utilities/clickhouse-format.md.hash | 1 - .../utilities/clickhouse-keeper-client.md | 73 - .../clickhouse-keeper-client.md.hash | 1 - .../operations/utilities/clickhouse-local.md | 315 - .../utilities/clickhouse-local.md.hash | 1 - .../utilities/clickhouse-obfuscator.md | 77 - .../utilities/clickhouse-obfuscator.md.hash | 1 - .../current/operations/utilities/index.md | 23 - .../operations/utilities/index.md.hash | 1 - .../operations/utilities/odbc-bridge.md | 41 - .../operations/utilities/odbc-bridge.md.hash | 1 - .../current/operations/workload-scheduling.md | 298 - .../operations/workload-scheduling.md.hash | 1 - .../current/quick-start.mdx.hash | 1 - .../beta-and-experimental-features.md.hash | 1 - .../current/sql-reference/_category_.yml | 7 - .../aggregate-functions/_category_.yml | 9 - .../aggregate-functions/combinators.md | 337 - .../aggregate-functions/combinators.md.hash | 1 - .../aggregate-functions/grouping_function.md | 353 - .../grouping_function.md.hash | 1 - .../aggregate-functions/index.md | 141 - .../aggregate-functions/index.md.hash | 1 - .../parametric-functions.md | 925 -- .../parametric-functions.md.hash | 1 - .../aggregate-functions/reference/aggthrow.md | 43 - .../reference/aggthrow.md.hash | 1 - .../reference/analysis_of_variance.md | 50 - .../reference/analysis_of_variance.md.hash | 1 - .../aggregate-functions/reference/any.md | 76 - .../aggregate-functions/reference/any.md.hash | 1 - .../aggregate-functions/reference/anyheavy.md | 38 - .../reference/anyheavy.md.hash | 1 - .../aggregate-functions/reference/anylast.md | 59 - .../reference/anylast.md.hash | 1 - .../reference/approxtopk.md | 60 - .../reference/approxtopk.md.hash | 1 - .../reference/approxtopsum.md | 56 - .../reference/approxtopsum.md.hash | 1 - .../aggregate-functions/reference/argmax.md | 113 - .../reference/argmax.md.hash | 1 - .../aggregate-functions/reference/argmin.md | 121 - .../reference/argmin.md.hash | 1 - .../reference/arrayconcatagg.md | 34 - .../reference/arrayconcatagg.md.hash | 1 - .../aggregate-functions/reference/avg.md | 70 - .../aggregate-functions/reference/avg.md.hash | 1 - .../reference/avgweighted.md | 102 - .../reference/avgweighted.md.hash | 1 - .../aggregate-functions/reference/boundrat.md | 47 - .../reference/boundrat.md.hash | 1 - .../reference/categoricalinformationvalue.md | 18 - .../categoricalinformationvalue.md.hash | 1 - .../reference/contingency.md | 58 - .../reference/contingency.md.hash | 1 - .../aggregate-functions/reference/corr.md | 66 - .../reference/corr.md.hash | 1 - .../reference/corrmatrix.md | 60 - .../reference/corrmatrix.md.hash | 1 - .../reference/corrstable.md | 64 - .../reference/corrstable.md.hash | 1 - .../aggregate-functions/reference/count.md | 82 - .../reference/count.md.hash | 1 - .../aggregate-functions/reference/covarpop.md | 59 - .../reference/covarpop.md.hash | 1 - .../reference/covarpopmatrix.md | 60 - .../reference/covarpopmatrix.md.hash | 1 - .../reference/covarpopstable.md | 63 - .../reference/covarpopstable.md.hash | 1 - .../reference/covarsamp.md | 83 - .../reference/covarsamp.md.hash | 1 - .../reference/covarsampmatrix.md | 60 - .../reference/covarsampmatrix.md.hash | 1 - .../reference/covarsampstable.md | 79 - .../reference/covarsampstable.md.hash | 1 - .../aggregate-functions/reference/cramersv.md | 87 - .../reference/cramersv.md.hash | 1 - .../reference/cramersvbiascorrected.md | 58 - .../reference/cramersvbiascorrected.md.hash | 1 - .../aggregate-functions/reference/deltasum.md | 79 - .../reference/deltasum.md.hash | 1 - .../reference/deltasumtimestamp.md | 49 - .../reference/deltasumtimestamp.md.hash | 1 - .../reference/distinctdynamictypes.md | 49 - .../reference/distinctdynamictypes.md.hash | 1 - .../reference/distinctjsonpaths.md | 131 - .../reference/distinctjsonpaths.md.hash | 1 - .../aggregate-functions/reference/entropy.md | 49 - .../reference/entropy.md.hash | 1 - .../reference/estimateCompressionRatio.md | 86 - .../estimateCompressionRatio.md.hash | 1 - .../reference/exponentialmovingaverage.md | 206 - .../exponentialmovingaverage.md.hash | 1 - .../reference/exponentialtimedecayedavg.md | 108 - .../exponentialtimedecayedavg.md.hash | 1 - .../reference/exponentialtimedecayedcount.md | 108 - .../exponentialtimedecayedcount.md.hash | 1 - .../reference/exponentialtimedecayedmax.md | 109 - .../exponentialtimedecayedmax.md.hash | 1 - .../reference/exponentialtimedecayedsum.md | 109 - .../exponentialtimedecayedsum.md.hash | 1 - .../reference/first_value.md | 88 - .../reference/first_value.md.hash | 1 - .../reference/flame_graph.md | 100 - .../reference/flame_graph.md.hash | 1 - .../reference/grouparray.md | 53 - .../reference/grouparray.md.hash | 1 - .../reference/grouparrayarray.md | 54 - .../reference/grouparrayarray.md.hash | 1 - .../reference/grouparrayinsertat.md | 97 - .../reference/grouparrayinsertat.md.hash | 1 - .../reference/grouparrayintersect.md | 56 - .../reference/grouparrayintersect.md.hash | 1 - .../reference/grouparraylast.md | 45 - .../reference/grouparraylast.md.hash | 1 - .../reference/grouparraymovingavg.md | 84 - .../reference/grouparraymovingavg.md.hash | 1 - .../reference/grouparraymovingsum.md | 82 - .../reference/grouparraymovingsum.md.hash | 1 - .../reference/grouparraysample.md | 89 - .../reference/grouparraysample.md.hash | 1 - .../reference/grouparraysorted.md | 49 - .../reference/grouparraysorted.md.hash | 1 - .../reference/groupbitand.md | 52 - .../reference/groupbitand.md.hash | 1 - .../reference/groupbitmap.md | 50 - .../reference/groupbitmap.md.hash | 1 - .../reference/groupbitmapand.md | 55 - .../reference/groupbitmapand.md.hash | 1 - .../reference/groupbitmapor.md | 54 - .../reference/groupbitmapor.md.hash | 1 - .../reference/groupbitmapxor.md | 52 - .../reference/groupbitmapxor.md.hash | 1 - .../reference/groupbitor.md | 52 - .../reference/groupbitor.md.hash | 1 - .../reference/groupbitxor.md | 52 - .../reference/groupbitxor.md.hash | 1 - .../reference/groupconcat.md | 106 - .../reference/groupconcat.md.hash | 1 - .../reference/groupuniqarray.md | 18 - .../reference/groupuniqarray.md.hash | 1 - .../aggregate-functions/reference/index.md | 144 - .../reference/index.md.hash | 1 - .../reference/intervalLengthSum.md | 112 - .../reference/intervalLengthSum.md.hash | 1 - .../reference/kolmogorovsmirnovtest.md | 120 - .../reference/kolmogorovsmirnovtest.md.hash | 1 - .../aggregate-functions/reference/kurtpop.md | 31 - .../reference/kurtpop.md.hash | 1 - .../aggregate-functions/reference/kurtsamp.md | 33 - .../reference/kurtsamp.md.hash | 1 - .../reference/largestTriangleThreeBuckets.md | 75 - .../largestTriangleThreeBuckets.md.hash | 1 - .../reference/last_value.md | 85 - .../reference/last_value.md.hash | 1 - .../reference/mannwhitneyutest.md | 77 - .../reference/mannwhitneyutest.md.hash | 1 - .../aggregate-functions/reference/max.md | 26 - .../aggregate-functions/reference/max.md.hash | 1 - .../reference/maxintersections.md | 70 - .../reference/maxintersections.md.hash | 1 - .../reference/maxintersectionsposition.md | 68 - .../maxintersectionsposition.md.hash | 1 - .../aggregate-functions/reference/maxmap.md | 56 - .../reference/maxmap.md.hash | 1 - .../reference/meanztest.md | 72 - .../reference/meanztest.md.hash | 1 - .../aggregate-functions/reference/median.md | 54 - .../reference/median.md.hash | 1 - .../aggregate-functions/reference/min.md | 26 - .../aggregate-functions/reference/min.md.hash | 1 - .../aggregate-functions/reference/minmap.md | 57 - .../reference/minmap.md.hash | 1 - .../aggregate-functions/reference/quantile.md | 74 - .../reference/quantile.md.hash | 1 - .../reference/quantileGK.md | 82 - .../reference/quantileGK.md.hash | 1 - .../reference/quantilebfloat16.md | 71 - .../reference/quantilebfloat16.md.hash | 1 - .../reference/quantileddsketch.md | 64 - .../reference/quantileddsketch.md.hash | 1 - .../reference/quantiledeterministic.md | 73 - .../reference/quantiledeterministic.md.hash | 1 - .../reference/quantileexact.md | 298 - .../reference/quantileexact.md.hash | 1 - .../reference/quantileexactweighted.md | 74 - .../reference/quantileexactweighted.md.hash | 1 - .../quantileexactweightedinterpolated.md | 82 - .../quantileexactweightedinterpolated.md.hash | 1 - .../reference/quantileinterpolatedweighted.md | 74 - .../quantileinterpolatedweighted.md.hash | 1 - .../reference/quantiles.md | 176 - .../reference/quantiles.md.hash | 1 - .../reference/quantiletdigest.md | 64 - .../reference/quantiletdigest.md.hash | 1 - .../reference/quantiletdigestweighted.md | 69 - .../reference/quantiletdigestweighted.md.hash | 1 - .../reference/quantiletiming.md | 95 - .../reference/quantiletiming.md.hash | 1 - .../reference/quantiletimingweighted.md | 127 - .../reference/quantiletimingweighted.md.hash | 1 - .../aggregate-functions/reference/rankCorr.md | 64 - .../reference/rankCorr.md.hash | 1 - .../reference/simplelinearregression.md | 48 - .../reference/simplelinearregression.md.hash | 1 - .../reference/singlevalueornull.md | 64 - .../reference/singlevalueornull.md.hash | 1 - .../aggregate-functions/reference/skewpop.md | 31 - .../reference/skewpop.md.hash | 1 - .../aggregate-functions/reference/skewsamp.md | 33 - .../reference/skewsamp.md.hash | 1 - .../aggregate-functions/reference/sparkbar.md | 66 - .../reference/sparkbar.md.hash | 1 - .../reference/stddevpop.md | 60 - .../reference/stddevpop.md.hash | 1 - .../reference/stddevpopstable.md | 55 - .../reference/stddevpopstable.md.hash | 1 - .../reference/stddevsamp.md | 60 - .../reference/stddevsamp.md.hash | 1 - .../reference/stddevsampstable.md | 55 - .../reference/stddevsampstable.md.hash | 1 - .../reference/stochasticlinearregression.md | 79 - .../stochasticlinearregression.md.hash | 1 - .../reference/stochasticlogisticregression.md | 63 - .../stochasticlogisticregression.md.hash | 1 - .../reference/studentttest.md | 77 - .../reference/studentttest.md.hash | 1 - .../aggregate-functions/reference/sum.md | 66 - .../aggregate-functions/reference/sum.md.hash | 1 - .../aggregate-functions/reference/sumcount.md | 53 - .../reference/sumcount.md.hash | 1 - .../aggregate-functions/reference/sumkahan.md | 43 - .../reference/sumkahan.md.hash | 1 - .../aggregate-functions/reference/summap.md | 89 - .../reference/summap.md.hash | 1 - .../reference/summapwithoverflow.md | 99 - .../reference/summapwithoverflow.md.hash | 1 - .../reference/sumwithoverflow.md | 77 - .../reference/sumwithoverflow.md.hash | 1 - .../aggregate-functions/reference/theilsu.md | 55 - .../reference/theilsu.md.hash | 1 - .../aggregate-functions/reference/topk.md | 58 - .../reference/topk.md.hash | 1 - .../reference/topkweighted.md | 74 - .../reference/topkweighted.md.hash | 1 - .../aggregate-functions/reference/uniq.md | 45 - .../reference/uniq.md.hash | 1 - .../reference/uniqcombined.md | 75 - .../reference/uniqcombined.md.hash | 1 - .../reference/uniqcombined64.md | 89 - .../reference/uniqcombined64.md.hash | 1 - .../reference/uniqexact.md | 44 - .../reference/uniqexact.md.hash | 1 - .../reference/uniqhll12.md | 47 - .../reference/uniqhll12.md.hash | 1 - .../reference/uniqthetasketch.md | 43 - .../reference/uniqthetasketch.md.hash | 1 - .../aggregate-functions/reference/varpop.md | 59 - .../reference/varpop.md.hash | 1 - .../reference/varpopstable.md | 57 - .../reference/varpopstable.md.hash | 1 - .../aggregate-functions/reference/varsamp.md | 69 - .../reference/varsamp.md.hash | 1 - .../reference/varsampstable.md | 68 - .../reference/varsampstable.md.hash | 1 - .../reference/welchttest.md | 76 - .../reference/welchttest.md.hash | 1 - .../data-types/aggregatefunction.md | 89 - .../data-types/aggregatefunction.md.hash | 1 - .../current/sql-reference/data-types/array.md | 120 - .../sql-reference/data-types/array.md.hash | 1 - .../sql-reference/data-types/boolean.md | 43 - .../sql-reference/data-types/boolean.md.hash | 1 - .../data-types/data-types-binary-encoding.md | 117 - .../data-types-binary-encoding.md.hash | 1 - .../current/sql-reference/data-types/date.md | 55 - .../sql-reference/data-types/date.md.hash | 1 - .../sql-reference/data-types/date32.md | 52 - .../sql-reference/data-types/date32.md.hash | 1 - .../sql-reference/data-types/datetime.md | 200 - .../sql-reference/data-types/datetime.md.hash | 1 - .../sql-reference/data-types/datetime64.md | 130 - .../data-types/datetime64.md.hash | 1 - .../sql-reference/data-types/decimal.md | 127 - .../sql-reference/data-types/decimal.md.hash | 1 - .../sql-reference/data-types/domains/index.md | 34 - .../data-types/domains/index.md.hash | 1 - .../sql-reference/data-types/dynamic.md | 713 -- .../sql-reference/data-types/dynamic.md.hash | 1 - .../current/sql-reference/data-types/enum.md | 168 - .../sql-reference/data-types/enum.md.hash | 1 - .../sql-reference/data-types/fixedstring.md | 65 - .../data-types/fixedstring.md.hash | 1 - .../current/sql-reference/data-types/float.md | 126 - .../sql-reference/data-types/float.md.hash | 1 - .../current/sql-reference/data-types/geo.md | 146 - .../sql-reference/data-types/geo.md.hash | 1 - .../current/sql-reference/data-types/index.md | 17 - .../sql-reference/data-types/index.md.hash | 1 - .../sql-reference/data-types/int-uint.md | 58 - .../sql-reference/data-types/int-uint.md.hash | 1 - .../current/sql-reference/data-types/ipv4.md | 78 - .../sql-reference/data-types/ipv4.md.hash | 1 - .../current/sql-reference/data-types/ipv6.md | 79 - .../sql-reference/data-types/ipv6.md.hash | 1 - .../current/sql-reference/data-types/json.md | 92 - .../sql-reference/data-types/json.md.hash | 1 - .../data-types/lowcardinality.md | 66 - .../data-types/lowcardinality.md.hash | 1 - .../current/sql-reference/data-types/map.md | 126 - .../sql-reference/data-types/map.md.hash | 1 - .../nested-data-structures/index.md | 110 - .../nested-data-structures/index.md.hash | 1 - .../sql-reference/data-types/newjson.md | 837 -- .../sql-reference/data-types/newjson.md.hash | 1 - .../sql-reference/data-types/nullable.md | 76 - .../sql-reference/data-types/nullable.md.hash | 1 - .../data-types/simpleaggregatefunction.md | 73 - .../simpleaggregatefunction.md.hash | 1 - .../special-data-types/expression.md | 14 - .../special-data-types/expression.md.hash | 1 - .../data-types/special-data-types/index.md | 14 - .../special-data-types/index.md.hash | 1 - .../data-types/special-data-types/interval.md | 88 - .../special-data-types/interval.md.hash | 1 - .../data-types/special-data-types/nothing.md | 28 - .../special-data-types/nothing.md.hash | 1 - .../data-types/special-data-types/set.md | 14 - .../data-types/special-data-types/set.md.hash | 1 - .../sql-reference/data-types/string.md | 28 - .../sql-reference/data-types/string.md.hash | 1 - .../current/sql-reference/data-types/time.md | 115 - .../sql-reference/data-types/time.md.hash | 1 - .../sql-reference/data-types/time64.md | 111 - .../sql-reference/data-types/time64.md.hash | 1 - .../current/sql-reference/data-types/tuple.md | 188 - .../sql-reference/data-types/tuple.md.hash | 1 - .../current/sql-reference/data-types/uuid.md | 135 - .../sql-reference/data-types/uuid.md.hash | 1 - .../sql-reference/data-types/variant.md | 483 - .../sql-reference/data-types/variant.md.hash | 1 - .../_snippet_dictionary_in_cloud.md | 9 - .../_snippet_dictionary_in_cloud.md.hash | 1 - .../sql-reference/dictionaries/index.md | 2461 ---- .../sql-reference/dictionaries/index.md.hash | 1 - .../current/sql-reference/distributed-ddl.md | 25 - .../sql-reference/distributed-ddl.md.hash | 1 - .../current/sql-reference/formats.mdx | 12 - .../current/sql-reference/formats.mdx.hash | 1 - .../sql-reference/functions/_category_.yml | 7 - .../functions/arithmetic-functions.md | 586 - .../functions/arithmetic-functions.md.hash | 1 - .../functions/array-functions.md | 3421 ------ .../functions/array-functions.md.hash | 1 - .../sql-reference/functions/array-join.md | 154 - .../functions/array-join.md.hash | 1 - .../sql-reference/functions/bit-functions.md | 441 - .../functions/bit-functions.md.hash | 1 - .../functions/bitmap-functions.md | 592 - .../functions/bitmap-functions.md.hash | 1 - .../functions/comparison-functions.md | 95 - .../functions/comparison-functions.md.hash | 1 - .../functions/conditional-functions.md | 445 - .../functions/conditional-functions.md.hash | 1 - .../functions/date-time-functions.md | 4838 -------- .../functions/date-time-functions.md.hash | 1 - .../functions/distance-functions.md | 559 - .../functions/distance-functions.md.hash | 1 - .../functions/encoding-functions.md | 967 -- .../functions/encoding-functions.md.hash | 1 - .../functions/encryption-functions.md | 411 - .../functions/encryption-functions.md.hash | 1 - .../functions/ext-dict-functions.md | 520 - .../functions/ext-dict-functions.md.hash | 1 - .../current/sql-reference/functions/files.md | 34 - .../sql-reference/functions/files.md.hash | 1 - .../functions/functions-for-nulls.md | 459 - .../functions/functions-for-nulls.md.hash | 1 - .../functions/geo/coordinates.md | 177 - .../functions/geo/coordinates.md.hash | 1 - .../sql-reference/functions/geo/geohash.md | 132 - .../functions/geo/geohash.md.hash | 1 - .../current/sql-reference/functions/geo/h3.md | 1387 --- .../sql-reference/functions/geo/h3.md.hash | 1 - .../sql-reference/functions/geo/index.md | 11 - .../sql-reference/functions/geo/index.md.hash | 1 - .../sql-reference/functions/geo/polygon.md | 484 - .../functions/geo/polygon.md.hash | 1 - .../current/sql-reference/functions/geo/s2.md | 376 - .../sql-reference/functions/geo/s2.md.hash | 1 - .../sql-reference/functions/geo/svg.md | 76 - .../sql-reference/functions/geo/svg.md.hash | 1 - .../sql-reference/functions/hash-functions.md | 1840 --- .../functions/hash-functions.md.hash | 1 - .../sql-reference/functions/in-functions.md | 16 - .../functions/in-functions.md.hash | 1 - .../current/sql-reference/functions/index.md | 15 - .../sql-reference/functions/index.md.hash | 1 - .../sql-reference/functions/introspection.md | 470 - .../functions/introspection.md.hash | 1 - .../functions/ip-address-functions.md | 739 -- .../functions/ip-address-functions.md.hash | 1 - .../sql-reference/functions/json-functions.md | 1365 --- .../functions/json-functions.md.hash | 1 - .../functions/logical-functions.md | 198 - .../functions/logical-functions.md.hash | 1 - .../functions/machine-learning-functions.md | 24 - .../machine-learning-functions.md.hash | 1 - .../sql-reference/functions/math-functions.md | 1014 -- .../functions/math-functions.md.hash | 1 - .../sql-reference/functions/nlp-functions.md | 402 - .../functions/nlp-functions.md.hash | 1 - .../functions/other-functions.md | 4620 -------- .../functions/other-functions.md.hash | 1 - .../sql-reference/functions/overview.md | 72 - .../sql-reference/functions/overview.md.hash | 1 - .../functions/random-functions.md | 735 -- .../functions/random-functions.md.hash | 1 - .../functions/regular-functions-index.md | 52 - .../functions/regular-functions-index.md.hash | 1 - .../functions/rounding-functions.md | 459 - .../functions/rounding-functions.md.hash | 1 - .../functions/splitting-merging-functions.md | 429 - .../splitting-merging-functions.md.hash | 1 - .../functions/string-functions.md | 2702 ----- .../functions/string-functions.md.hash | 1 - .../functions/string-replace-functions.md | 354 - .../string-replace-functions.md.hash | 1 - .../functions/string-search-functions.md | 1896 ---- .../functions/string-search-functions.md.hash | 1 - .../functions/time-series-functions.md | 168 - .../functions/time-series-functions.md.hash | 1 - .../functions/time-window-functions.md | 249 - .../functions/time-window-functions.md.hash | 1 - .../functions/tuple-functions.md | 901 -- .../functions/tuple-functions.md.hash | 1 - .../functions/tuple-map-functions.md | 1065 -- .../functions/tuple-map-functions.md.hash | 1 - .../functions/type-conversion-functions.md | 7683 ------------- .../type-conversion-functions.md.hash | 1 - .../current/sql-reference/functions/udf.md | 325 - .../sql-reference/functions/udf.md.hash | 1 - .../sql-reference/functions/ulid-functions.md | 89 - .../functions/ulid-functions.md.hash | 1 - .../functions/uniqtheta-functions.md | 101 - .../functions/uniqtheta-functions.md.hash | 1 - .../sql-reference/functions/url-functions.md | 1064 -- .../functions/url-functions.md.hash | 1 - .../sql-reference/functions/uuid-functions.md | 938 -- .../functions/uuid-functions.md.hash | 1 - .../functions/ym-dict-functions.md | 513 - .../functions/ym-dict-functions.md.hash | 1 - .../current/sql-reference/index.md | 30 - .../current/sql-reference/index.md.hash | 1 - .../operators/distributed-ddl.md | 10 - .../operators/distributed-ddl.md.hash | 1 - .../current/sql-reference/operators/exists.md | 68 - .../sql-reference/operators/exists.md.hash | 1 - .../current/sql-reference/operators/in.md | 270 - .../sql-reference/operators/in.md.hash | 1 - .../current/sql-reference/operators/index.md | 399 - .../sql-reference/operators/index.md.hash | 1 - .../sql-reference/sql-reference-links.json | 12 - .../sql-reference/statements/_category_.yml | 7 - .../statements/alter/apply-deleted-mask.md | 27 - .../alter/apply-deleted-mask.md.hash | 1 - .../sql-reference/statements/alter/column.md | 344 - .../statements/alter/column.md.hash | 1 - .../sql-reference/statements/alter/comment.md | 89 - .../statements/alter/comment.md.hash | 1 - .../statements/alter/constraint.md | 29 - .../statements/alter/constraint.md.hash | 1 - .../statements/alter/database-comment.md | 79 - .../statements/alter/database-comment.md.hash | 1 - .../sql-reference/statements/alter/delete.md | 38 - .../statements/alter/delete.md.hash | 1 - .../sql-reference/statements/alter/index.md | 84 - .../statements/alter/index.md.hash | 1 - .../statements/alter/named-collection.md | 37 - .../statements/alter/named-collection.md.hash | 1 - .../statements/alter/order-by.md | 24 - .../statements/alter/order-by.md.hash | 1 - .../statements/alter/partition.md | 372 - .../statements/alter/partition.md.hash | 1 - .../statements/alter/projection.md | 211 - .../statements/alter/projection.md.hash | 1 - .../sql-reference/statements/alter/quota.md | 42 - .../statements/alter/quota.md.hash | 1 - .../sql-reference/statements/alter/role.md | 24 - .../statements/alter/role.md.hash | 1 - .../statements/alter/row-policy.md | 25 - .../statements/alter/row-policy.md.hash | 1 - .../statements/alter/sample-by.md | 36 - .../statements/alter/sample-by.md.hash | 1 - .../sql-reference/statements/alter/setting.md | 66 - .../statements/alter/setting.md.hash | 1 - .../statements/alter/settings-profile.md | 25 - .../statements/alter/settings-profile.md.hash | 1 - .../statements/alter/skipping-index.md | 37 - .../statements/alter/skipping-index.md.hash | 1 - .../statements/alter/statistics.md | 44 - .../statements/alter/statistics.md.hash | 1 - .../sql-reference/statements/alter/ttl.md | 95 - .../statements/alter/ttl.md.hash | 1 - .../sql-reference/statements/alter/update.md | 39 - .../statements/alter/update.md.hash | 1 - .../sql-reference/statements/alter/user.md | 113 - .../statements/alter/user.md.hash | 1 - .../sql-reference/statements/alter/view.md | 204 - .../statements/alter/view.md.hash | 1 - .../sql-reference/statements/attach.md | 136 - .../sql-reference/statements/attach.md.hash | 1 - .../sql-reference/statements/check-grant.md | 49 - .../statements/check-grant.md.hash | 1 - .../sql-reference/statements/check-table.md | 174 - .../statements/check-table.md.hash | 1 - .../statements/create/database.md | 64 - .../statements/create/database.md.hash | 1 - .../statements/create/dictionary.md | 165 - .../statements/create/dictionary.md.hash | 1 - .../statements/create/function.md | 68 - .../statements/create/function.md.hash | 1 - .../sql-reference/statements/create/index.md | 14 - .../statements/create/index.md.hash | 1 - .../statements/create/named-collection.md | 40 - .../create/named-collection.md.hash | 1 - .../sql-reference/statements/create/quota.md | 49 - .../statements/create/quota.md.hash | 1 - .../sql-reference/statements/create/role.md | 51 - .../statements/create/role.md.hash | 1 - .../statements/create/row-policy.md | 106 - .../statements/create/row-policy.md.hash | 1 - .../statements/create/settings-profile.md | 38 - .../create/settings-profile.md.hash | 1 - .../sql-reference/statements/create/table.md | 726 -- .../statements/create/table.md.hash | 1 - .../sql-reference/statements/create/user.md | 239 - .../statements/create/user.md.hash | 1 - .../sql-reference/statements/create/view.md | 437 - .../statements/create/view.md.hash | 1 - .../sql-reference/statements/delete.md | 99 - .../sql-reference/statements/delete.md.hash | 1 - .../statements/describe-table.md | 74 - .../statements/describe-table.md.hash | 1 - .../sql-reference/statements/detach.md | 82 - .../sql-reference/statements/detach.md.hash | 1 - .../current/sql-reference/statements/drop.md | 146 - .../sql-reference/statements/drop.md.hash | 1 - .../sql-reference/statements/exchange.md | 49 - .../sql-reference/statements/exchange.md.hash | 1 - .../sql-reference/statements/exists.md | 18 - .../sql-reference/statements/exists.md.hash | 1 - .../sql-reference/statements/explain.md | 481 - .../sql-reference/statements/explain.md.hash | 1 - .../current/sql-reference/statements/grant.md | 679 -- .../sql-reference/statements/grant.md.hash | 1 - .../current/sql-reference/statements/index.md | 14 - .../sql-reference/statements/index.md.hash | 1 - .../sql-reference/statements/insert-into.md | 272 - .../statements/insert-into.md.hash | 1 - .../current/sql-reference/statements/kill.md | 148 - .../sql-reference/statements/kill.md.hash | 1 - .../current/sql-reference/statements/move.md | 37 - .../sql-reference/statements/move.md.hash | 1 - .../sql-reference/statements/optimize.md | 233 - .../sql-reference/statements/optimize.md.hash | 1 - .../sql-reference/statements/parallel_with.md | 53 - .../statements/parallel_with.md.hash | 1 - .../sql-reference/statements/rename.md | 67 - .../sql-reference/statements/rename.md.hash | 1 - .../sql-reference/statements/revoke.md | 54 - .../sql-reference/statements/revoke.md.hash | 1 - .../sql-reference/statements/select/all.md | 27 - .../statements/select/all.md.hash | 1 - .../statements/select/array-join.md | 391 - .../statements/select/array-join.md.hash | 1 - .../statements/select/distinct.md | 115 - .../statements/select/distinct.md.hash | 1 - .../sql-reference/statements/select/except.md | 225 - .../statements/select/except.md.hash | 1 - .../sql-reference/statements/select/format.md | 23 - .../statements/select/format.md.hash | 1 - .../sql-reference/statements/select/from.md | 85 - .../statements/select/from.md.hash | 1 - .../statements/select/group-by.md | 390 - .../statements/select/group-by.md.hash | 1 - .../sql-reference/statements/select/having.md | 19 - .../statements/select/having.md.hash | 1 - .../sql-reference/statements/select/index.md | 294 - .../statements/select/index.md.hash | 1 - .../statements/select/intersect.md | 159 - .../statements/select/intersect.md.hash | 1 - .../statements/select/into-outfile.md | 49 - .../statements/select/into-outfile.md.hash | 1 - .../sql-reference/statements/select/join.md | 537 - .../statements/select/join.md.hash | 1 - .../statements/select/limit-by.md | 80 - .../statements/select/limit-by.md.hash | 1 - .../sql-reference/statements/select/limit.md | 72 - .../statements/select/limit.md.hash | 1 - .../sql-reference/statements/select/offset.md | 91 - .../statements/select/offset.md.hash | 1 - .../statements/select/order-by.md | 669 -- .../statements/select/order-by.md.hash | 1 - .../statements/select/prewhere.md | 74 - .../statements/select/prewhere.md.hash | 1 - .../statements/select/qualify.md | 39 - .../statements/select/qualify.md.hash | 1 - .../sql-reference/statements/select/sample.md | 120 - .../statements/select/sample.md.hash | 1 - .../sql-reference/statements/select/union.md | 90 - .../statements/select/union.md.hash | 1 - .../sql-reference/statements/select/where.md | 62 - .../statements/select/where.md.hash | 1 - .../sql-reference/statements/select/with.md | 322 - .../statements/select/with.md.hash | 1 - .../sql-reference/statements/set-role.md | 51 - .../sql-reference/statements/set-role.md.hash | 1 - .../current/sql-reference/statements/set.md | 26 - .../sql-reference/statements/set.md.hash | 1 - .../current/sql-reference/statements/show.md | 720 -- .../sql-reference/statements/show.md.hash | 1 - .../sql-reference/statements/system.md | 626 -- .../sql-reference/statements/system.md.hash | 1 - .../sql-reference/statements/truncate.md | 47 - .../sql-reference/statements/truncate.md.hash | 1 - .../sql-reference/statements/undrop.md | 88 - .../sql-reference/statements/undrop.md.hash | 1 - .../current/sql-reference/statements/use.md | 22 - .../sql-reference/statements/use.md.hash | 1 - .../current/sql-reference/statements/watch.md | 18 - .../sql-reference/statements/watch.md.hash | 1 - .../current/sql-reference/syntax.md | 448 - .../current/sql-reference/syntax.md.hash | 1 - .../table-functions/azureBlobStorage.md | 134 - .../table-functions/azureBlobStorage.md.hash | 1 - .../azureBlobStorageCluster.md | 61 - .../azureBlobStorageCluster.md.hash | 1 - .../sql-reference/table-functions/cluster.md | 67 - .../table-functions/cluster.md.hash | 1 - .../table-functions/deltalake.md | 61 - .../table-functions/deltalake.md.hash | 1 - .../table-functions/deltalakeCluster.md | 37 - .../table-functions/deltalakeCluster.md.hash | 1 - .../table-functions/dictionary.md | 66 - .../table-functions/dictionary.md.hash | 1 - .../table-functions/executable.md | 119 - .../table-functions/executable.md.hash | 1 - .../sql-reference/table-functions/file.md | 248 - .../table-functions/file.md.hash | 1 - .../table-functions/fileCluster.md | 89 - .../table-functions/fileCluster.md.hash | 1 - .../sql-reference/table-functions/format.md | 103 - .../table-functions/format.md.hash | 1 - .../sql-reference/table-functions/fuzzJSON.md | 106 - .../table-functions/fuzzJSON.md.hash | 1 - .../table-functions/fuzzQuery.md | 45 - .../table-functions/fuzzQuery.md.hash | 1 - .../sql-reference/table-functions/gcs.md | 223 - .../sql-reference/table-functions/gcs.md.hash | 1 - .../sql-reference/table-functions/generate.md | 113 - .../table-functions/generate.md.hash | 1 - .../table-functions/generateSeries.md | 12 - .../table-functions/generateSeries.md.hash | 1 - .../table-functions/generate_series.md | 44 - .../table-functions/generate_series.md.hash | 1 - .../sql-reference/table-functions/hdfs.md | 134 - .../table-functions/hdfs.md.hash | 1 - .../table-functions/hdfsCluster.md | 67 - .../table-functions/hdfsCluster.md.hash | 1 - .../sql-reference/table-functions/hudi.md | 39 - .../table-functions/hudi.md.hash | 1 - .../table-functions/hudiCluster.md | 42 - .../table-functions/hudiCluster.md.hash | 1 - .../sql-reference/table-functions/iceberg.md | 290 - .../table-functions/iceberg.md.hash | 1 - .../table-functions/icebergCluster.md | 50 - .../table-functions/icebergCluster.md.hash | 1 - .../sql-reference/table-functions/index.md | 138 - .../table-functions/index.md.hash | 1 - .../sql-reference/table-functions/input.md | 41 - .../table-functions/input.md.hash | 1 - .../sql-reference/table-functions/jdbc.md | 47 - .../table-functions/jdbc.md.hash | 1 - .../sql-reference/table-functions/loop.md | 64 - .../table-functions/loop.md.hash | 1 - .../sql-reference/table-functions/merge.md | 30 - .../table-functions/merge.md.hash | 1 - .../table-functions/mergeTreeIndex.md | 93 - .../table-functions/mergeTreeIndex.md.hash | 1 - .../table-functions/mergeTreeProjection.md | 76 - .../mergeTreeProjection.md.hash | 1 - .../sql-reference/table-functions/mongodb.md | 106 - .../table-functions/mongodb.md.hash | 1 - .../sql-reference/table-functions/mysql.md | 155 - .../table-functions/mysql.md.hash | 1 - .../sql-reference/table-functions/null.md | 48 - .../table-functions/null.md.hash | 1 - .../sql-reference/table-functions/numbers.md | 44 - .../table-functions/numbers.md.hash | 1 - .../sql-reference/table-functions/odbc.md | 114 - .../table-functions/odbc.md.hash | 1 - .../table-functions/postgresql.md | 160 - .../table-functions/postgresql.md.hash | 1 - .../sql-reference/table-functions/redis.md | 67 - .../table-functions/redis.md.hash | 1 - .../sql-reference/table-functions/remote.md | 180 - .../table-functions/remote.md.hash | 1 - .../sql-reference/table-functions/s3.md | 359 - .../sql-reference/table-functions/s3.md.hash | 1 - .../table-functions/s3Cluster.md | 97 - .../table-functions/s3Cluster.md.hash | 1 - .../sql-reference/table-functions/sqlite.md | 51 - .../table-functions/sqlite.md.hash | 1 - .../table-functions/timeSeriesData.md | 33 - .../table-functions/timeSeriesData.md.hash | 1 - .../table-functions/timeSeriesMetrics.md | 33 - .../table-functions/timeSeriesMetrics.md.hash | 1 - .../table-functions/timeSeriesTags.md | 33 - .../table-functions/timeSeriesTags.md.hash | 1 - .../sql-reference/table-functions/url.md | 90 - .../sql-reference/table-functions/url.md.hash | 1 - .../table-functions/urlCluster.md | 70 - .../table-functions/urlCluster.md.hash | 1 - .../sql-reference/table-functions/values.md | 195 - .../table-functions/values.md.hash | 1 - .../sql-reference/table-functions/view.md | 72 - .../table-functions/view.md.hash | 1 - .../sql-reference/table-functions/zeros.md | 42 - .../table-functions/zeros.md.hash | 1 - .../current/sql-reference/transactions.md | 307 - .../sql-reference/transactions.md.hash | 1 - .../window-functions/dense_rank.md | 80 - .../window-functions/dense_rank.md.hash | 1 - .../window-functions/first_value.md | 86 - .../window-functions/first_value.md.hash | 1 - .../sql-reference/window-functions/index.md | 782 -- .../window-functions/index.md.hash | 1 - .../window-functions/lagInFrame.md | 92 - .../window-functions/lagInFrame.md.hash | 1 - .../window-functions/last_value.md | 86 - .../window-functions/last_value.md.hash | 1 - .../window-functions/leadInFrame.md | 83 - .../window-functions/leadInFrame.md.hash | 1 - .../window-functions/nth_value.md | 80 - .../window-functions/nth_value.md.hash | 1 - .../window-functions/percent_rank.md | 77 - .../window-functions/percent_rank.md.hash | 1 - .../sql-reference/window-functions/rank.md | 78 - .../window-functions/rank.md.hash | 1 - .../window-functions/row_number.md | 72 - .../window-functions/row_number.md.hash | 1 - .../current/starter-guides/index.md | 18 - .../current/starter-guides/index.md.hash | 1 - .../static-files-disk-uploader.md | 90 - .../static-files-disk-uploader.md.hash | 1 - .../current/tutorial.md | 506 - .../current/tutorial.md.hash | 1 - .../use-cases/data_lake/glue_catalog.md | 314 - .../use-cases/data_lake/glue_catalog.md.hash | 1 - .../current/use-cases/data_lake/index.md | 16 - .../current/use-cases/data_lake/index.md.hash | 1 - .../use-cases/data_lake/unity_catalog.md | 176 - .../use-cases/data_lake/unity_catalog.md.hash | 1 - .../current/use-cases/index.md | 17 - .../current/use-cases/index.md.hash | 1 - .../observability/demo-application.md | 17 - .../observability/demo-application.md.hash | 1 - .../use-cases/observability/grafana.md | 215 - .../use-cases/observability/grafana.md.hash | 1 - .../current/use-cases/observability/index.md | 22 - .../use-cases/observability/index.md.hash | 1 - .../integrating-opentelemetry.md | 758 -- .../integrating-opentelemetry.md.hash | 1 - .../use-cases/observability/introduction.md | 104 - .../observability/introduction.md.hash | 1 - .../use-cases/observability/managing-data.md | 451 - .../observability/managing-data.md.hash | 1 - .../use-cases/observability/schema-design.md | 1636 --- .../observability/schema-design.md.hash | 1 - .../time-series/analysis-functions.md | 192 - .../time-series/analysis-functions.md.hash | 1 - .../use-cases/time-series/basic-operations.md | 240 - .../time-series/basic-operations.md.hash | 1 - .../time-series/date-time-data-types.md | 207 - .../time-series/date-time-data-types.md.hash | 1 - .../current/use-cases/time-series/index.md | 25 - .../use-cases/time-series/index.md.hash | 1 - .../time-series/query-performance.md | 260 - .../time-series/query-performance.md.hash | 1 - .../time-series/storage-efficiency.md | 125 - .../time-series/storage-efficiency.md.hash | 1 - .../current/whats-new/_category_.yml | 7 - .../current/whats-new/changelog/2017.md | 269 - .../current/whats-new/changelog/2018.md | 1064 -- .../current/whats-new/changelog/2019.md | 2075 ---- .../current/whats-new/changelog/2020.md | 3535 ------ .../current/whats-new/changelog/2021.md | 2056 ---- .../current/whats-new/changelog/2022.md | 1838 --- .../current/whats-new/changelog/2023.md | 2170 ---- .../current/whats-new/changelog/2024.md | 1905 ---- .../whats-new/changelog/_category_.yml | 7 - .../current/whats-new/changelog/cloud.md | 13 - .../current/whats-new/changelog/index.md | 888 -- .../current/whats-new/roadmap.md | 22 - .../current/whats-new/security-changelog.md | 218 - scripts/autogenerate-table-of-contents.sh | 1 + scripts/translate/translate.py | 94 +- 2658 files changed, 82 insertions(+), 262987 deletions(-) delete mode 100644 docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/_04_sql_translation_reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/adopters.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/asyncinserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidmutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidnullablecolumns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidoptimizefinal.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/bulkinserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/partitioningkey.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-10.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-12.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-5.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-6.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-8.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-25_1-25_4.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/fast-release-24-2.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-reference-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/invitations-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/keys-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/members-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/organizations-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/privateEndpointConfig-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/prometheus-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/services-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/usageCost-api-reference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/compute-compute-separation.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/adding_test_queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/build.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/images/concurrency.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/images/find-build-artifact.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/style.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/style.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/fast-release-24-2.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sizing-and-hardware-recommendations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/home_links/deployment_links.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/home_links/links_101.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/assets/static-ips.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/arrays.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/capnp.bin delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns-array.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/custom.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.arrow delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.avro delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.binary delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.bson delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.clickhouse delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.msgpk delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.orc delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.parquet delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_csv_types.csv delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.csv delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.tsv delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_custom.txt delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_headers.csv delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/dump.sql delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/error.log delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/export.parquet delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.results delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.row delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list-nested.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/mysql.sql delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/object-per-line.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/objects.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/out.html delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.results delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.rows delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/proto.bin delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/row.template delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.capnp delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.proto delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.sql delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.tsv delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/time.parquet delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/templates-regex.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/templates-regex.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-ingestion-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-ingestion-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-sources-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-sources-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/dynamodb/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/dynamodb/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/mysql/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/mysql/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/odbc-with-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/odbc-with-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/data-type-mappings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/inserting-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/inserting-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/postgres-vs-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/rewriting-postgres-queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/emqx/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/emqx/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/apache-beam.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/apache-beam.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/dbt/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/dbt/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/dlt-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/dlt-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/fivetran/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/fivetran/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/vector-to-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/vector-to-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/insert-local-files.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/insert-local-files.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/kafka-connect-http.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/kafka-connect-http.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client-v1.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql0.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql1.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql2.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql3.png delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/jdbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/native-clients-and-interfaces-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/native-clients-interfaces-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/native-clients-interfaces-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/odbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/odbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/prometheus.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/prometheus.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/ssh.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/ssh.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/tcp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/tcp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/client-libraries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/client-libraries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/integrations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/integrations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/intro.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/intro.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/introduction-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/introduction-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/academic_overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/academic_overview.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/academic_overview.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/merges.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/merges.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/partitions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/partitions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/parts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/parts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/primary-indexes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/primary-indexes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/shards.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/core-concepts/shards.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/delete_mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/delete_mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/deleting-data/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/drop_partition.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/drop_partition.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/truncate.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/truncate.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/update_mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/managing-data/updating-data/update_mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/incremental-materialized-view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/incremental-materialized-view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/refreshable-materialized-view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/materialized-view/refreshable-materialized-view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/equivalent-concepts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/equivalent-concepts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/loading-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/loading-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/migrating-to-clickhouse-cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/bigquery/migrating-to-clickhouse-cloud.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/appendix.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/appendix.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/data-modeling-techniques.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/data-modeling-techniques.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/dataset.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/dataset.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/designing-schemas.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/replacing-merge-tree.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/rewriting-queries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/postgres/rewriting-queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/snowflake.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/migrations/snowflake.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/basics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/basics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/client.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/client.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/columns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/columns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/hash.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/hash.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/server.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/native-protocol/server.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/_troubleshooting.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/_troubleshooting.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/allocation-profiling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/allocation-profiling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/analyzer.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/analyzer.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/backup.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/backup.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/caches.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/caches.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/cluster-discovery.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/cluster-discovery.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/configuration-files.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/configuration-files.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/http.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/http.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/kerberos.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/kerberos.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/ldap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/ldap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/ssl-x509.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/external-authenticators/ssl-x509.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/monitoring.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/monitoring.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/named-collections.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/named-collections.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/opentelemetry.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/opentelemetry.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/optimizing-performance/profile-guided-optimization.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/optimizing-performance/profile-guided-optimization.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/optimizing-performance/sampling-query-profiler.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/optimizing-performance/sampling-query-profiler.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/performance-test.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/performance-test.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/query-cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/query-cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/query-condition-cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/query-condition-cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/quotas.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/quotas.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/_server_settings_outside_source.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/_server_settings_outside_source.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/_snippets/_system-log-parameters.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/_snippets/_system-log-parameters.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/server-configuration-parameters/settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/composable-protocols.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/composable-protocols.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/constraints-on-settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/constraints-on-settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/memory-overcommit.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/memory-overcommit.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/merge-tree-settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/merge-tree-settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/permissions-for-queries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/permissions-for-queries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/query-complexity.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/query-complexity.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/server-overload.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/server-overload.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-formats.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-formats.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-profiles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-profiles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-query-level.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-query-level.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-users.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings-users.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/settings/settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/ssl-zookeeper.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/ssl-zookeeper.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/startup-scripts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/startup-scripts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/storing-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/storing-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_insert_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_insert_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_inserts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_inserts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_loader.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_loader.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_metric_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_metric_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_metrics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/asynchronous_metrics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/azure_queue_settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/azure_queue_settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/backup_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/backup_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/blob_storage_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/blob_storage_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/build_options.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/build_options.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/clusters.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/clusters.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/columns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/columns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/contributors.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/contributors.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/crash-log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/crash-log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/current-roles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/current-roles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dashboards.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dashboards.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/data_skipping_indices.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/data_skipping_indices.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/data_type_families.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/data_type_families.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/database_engines.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/database_engines.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/databases.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/databases.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/detached_parts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/detached_parts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/detached_tables.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/detached_tables.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dictionaries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dictionaries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/disks.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/disks.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/distributed_ddl_queue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/distributed_ddl_queue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/distribution_queue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/distribution_queue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dns_cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dns_cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dropped_tables.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dropped_tables.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dropped_tables_parts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/dropped_tables_parts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/enabled-roles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/enabled-roles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/error_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/error_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/errors.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/errors.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/events.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/events.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/grants.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/grants.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/graphite_retentions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/graphite_retentions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/histogram_metrics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/histogram_metrics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/iceberg_history.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/iceberg_history.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/information_schema.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/information_schema.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/jemalloc_bins.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/jemalloc_bins.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/kafka_consumers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/kafka_consumers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/latency_buckets.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/latency_buckets.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/latency_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/latency_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/licenses.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/licenses.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/merge_tree_settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/merge_tree_settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/merges.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/merges.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/metric_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/metric_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/metrics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/metrics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/moves.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/moves.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/mutations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/mutations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/numbers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/numbers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/numbers_mt.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/numbers_mt.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/one.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/one.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/opentelemetry_span_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/opentelemetry_span_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/part_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/part_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/parts.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/parts.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/parts_columns.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/parts_columns.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/processes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/processes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/processors_profile_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/processors_profile_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/projections.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/projections.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_condition_cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_condition_cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_metric_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_metric_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_thread_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_thread_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_views_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/query_views_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quota_limits.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quota_limits.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quota_usage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quota_usage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quotas.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quotas.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quotas_usage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/quotas_usage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replicas.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replicas.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replicated_fetches.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replicated_fetches.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replication_queue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/replication_queue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/resources.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/resources.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/role-grants.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/role-grants.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/roles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/roles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/row_policies.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/row_policies.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/s3_queue_settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/s3_queue_settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/scheduler.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/scheduler.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/schema_inference_cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/schema_inference_cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/server_settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/server_settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/session_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/session_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_changes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_changes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_profile_elements.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_profile_elements.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_profiles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/settings_profiles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/stack_trace.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/stack_trace.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/storage_policies.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/storage_policies.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/symbols.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/symbols.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/system_warnings.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/system_warnings.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/table_engines.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/table_engines.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/tables.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/tables.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/text_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/text_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/time_zones.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/time_zones.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/trace_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/trace_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/user_processes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/user_processes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/users.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/users.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/view_refreshes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/view_refreshes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/workloads.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/workloads.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper_connection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper_connection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper_log.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/system-tables/zookeeper_log.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/tips.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/tips.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/update.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/update.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/userspace-page-cache.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/userspace-page-cache.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/backupview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/backupview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-benchmark.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-benchmark.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-compressor.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-compressor.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-disks.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-disks.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-format.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-format.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-keeper-client.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-keeper-client.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-local.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-local.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-obfuscator.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/clickhouse-obfuscator.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/odbc-bridge.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/utilities/odbc-bridge.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/workload-scheduling.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/operations/workload-scheduling.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/quick-start.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/settings/beta-and-experimental-features.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/combinators.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/combinators.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/grouping_function.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/grouping_function.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/parametric-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/parametric-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/aggthrow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/aggthrow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/analysis_of_variance.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/analysis_of_variance.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/any.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/any.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/anyheavy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/anyheavy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/anylast.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/anylast.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/approxtopk.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/approxtopk.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/approxtopsum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/approxtopsum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/argmax.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/argmax.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/argmin.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/argmin.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/arrayconcatagg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/arrayconcatagg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/avg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/avg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/avgweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/avgweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/boundrat.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/boundrat.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/contingency.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/contingency.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corr.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corr.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corrmatrix.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corrmatrix.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corrstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/corrstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/count.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/count.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpopmatrix.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpopmatrix.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpopstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarpopstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsampmatrix.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsampmatrix.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsampstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/covarsampstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/cramersv.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/cramersv.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/deltasum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/deltasum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/deltasumtimestamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/deltasumtimestamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/distinctdynamictypes.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/distinctdynamictypes.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/distinctjsonpaths.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/distinctjsonpaths.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/entropy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/entropy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/estimateCompressionRatio.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/estimateCompressionRatio.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedavg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedavg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedcount.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedcount.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedmax.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedmax.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedsum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/exponentialtimedecayedsum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/first_value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/first_value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/flame_graph.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/flame_graph.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparray.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparray.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayarray.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayarray.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayinsertat.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayinsertat.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayintersect.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparrayintersect.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraylast.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraylast.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraymovingavg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraymovingavg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraymovingsum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraymovingsum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraysample.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraysample.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraysorted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/grouparraysorted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitand.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitand.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapand.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapand.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapor.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapor.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapxor.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitmapxor.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitor.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitor.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitxor.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupbitxor.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupconcat.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupconcat.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupuniqarray.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/groupuniqarray.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/intervalLengthSum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/intervalLengthSum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kurtpop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kurtpop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kurtsamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/kurtsamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/last_value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/last_value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/mannwhitneyutest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/mannwhitneyutest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/max.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/max.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxintersections.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxintersections.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxintersectionsposition.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxintersectionsposition.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/maxmap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/meanztest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/meanztest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/median.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/median.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/min.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/min.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/minmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/minmap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantile.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantile.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileGK.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileGK.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantilebfloat16.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantilebfloat16.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileddsketch.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileddsketch.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiledeterministic.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiledeterministic.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexact.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexact.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexactweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexactweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexactweightedinterpolated.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileexactweightedinterpolated.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiles.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiles.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletdigest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletdigest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletiming.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletiming.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletimingweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/quantiletimingweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/rankCorr.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/rankCorr.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/simplelinearregression.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/simplelinearregression.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/singlevalueornull.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/singlevalueornull.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/skewpop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/skewpop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/skewsamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/skewsamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sparkbar.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sparkbar.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevpop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevpop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevpopstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevpopstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevsamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevsamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevsampstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stddevsampstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stochasticlinearregression.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stochasticlinearregression.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/studentttest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/studentttest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumcount.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumcount.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumkahan.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumkahan.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/summap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/summap.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/summapwithoverflow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/summapwithoverflow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumwithoverflow.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/sumwithoverflow.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/theilsu.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/theilsu.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/topk.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/topk.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/topkweighted.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/topkweighted.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniq.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniq.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqcombined.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqcombined.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqcombined64.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqcombined64.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqexact.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqexact.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqhll12.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqhll12.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqthetasketch.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/uniqthetasketch.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varpop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varpop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varpopstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varpopstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varsamp.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varsamp.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varsampstable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/varsampstable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/welchttest.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/aggregate-functions/reference/welchttest.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/aggregatefunction.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/aggregatefunction.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/array.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/array.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/boolean.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/boolean.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/data-types-binary-encoding.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/data-types-binary-encoding.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/date.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/date.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/date32.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/date32.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/datetime.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/datetime.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/datetime64.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/datetime64.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/decimal.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/decimal.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/domains/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/domains/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/dynamic.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/dynamic.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/enum.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/enum.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/fixedstring.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/fixedstring.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/float.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/float.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/geo.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/geo.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/int-uint.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/int-uint.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/ipv4.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/ipv4.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/ipv6.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/ipv6.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/json.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/json.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/lowcardinality.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/lowcardinality.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/map.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/map.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/nested-data-structures/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/nested-data-structures/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/newjson.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/newjson.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/nullable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/nullable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/simpleaggregatefunction.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/simpleaggregatefunction.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/expression.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/expression.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/interval.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/interval.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/nothing.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/nothing.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/set.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/special-data-types/set.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/string.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/string.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/time.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/time.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/time64.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/time64.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/tuple.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/tuple.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/uuid.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/uuid.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/variant.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/data-types/variant.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/dictionaries/_snippet_dictionary_in_cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/dictionaries/_snippet_dictionary_in_cloud.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/dictionaries/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/dictionaries/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/distributed-ddl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/distributed-ddl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/formats.mdx delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/formats.mdx.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/arithmetic-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/arithmetic-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/array-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/array-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/array-join.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/array-join.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/bit-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/bit-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/bitmap-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/bitmap-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/comparison-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/comparison-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/conditional-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/conditional-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/date-time-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/date-time-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/distance-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/distance-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/encoding-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/encoding-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/encryption-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/encryption-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ext-dict-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ext-dict-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/files.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/files.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/functions-for-nulls.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/functions-for-nulls.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/coordinates.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/coordinates.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/svg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/svg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/hash-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/hash-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/introspection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/introspection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ip-address-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ip-address-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/json-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/json-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/logical-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/logical-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/math-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/math-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/nlp-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/nlp-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/other-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/other-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/random-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/random-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/regular-functions-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/regular-functions-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/rounding-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/rounding-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/splitting-merging-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/splitting-merging-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-replace-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-replace-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-search-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/string-search-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-series-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-series-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/uniqtheta-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/uniqtheta-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/url-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/url-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/uuid-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/uuid-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ym-dict-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ym-dict-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/distributed-ddl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/distributed-ddl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/exists.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/exists.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/in.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/in.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/sql-reference-links.json delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/apply-deleted-mask.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/apply-deleted-mask.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/constraint.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/constraint.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/named-collection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/named-collection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/order-by.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/order-by.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/partition.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/partition.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/quota.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/quota.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/role.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/role.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/row-policy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/row-policy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/sample-by.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/sample-by.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/setting.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/setting.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/settings-profile.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/settings-profile.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/statistics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/statistics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/ttl.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/ttl.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/attach.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/attach.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/database.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/database.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/dictionary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/dictionary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/function.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/function.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/named-collection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/named-collection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/quota.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/quota.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/settings-profile.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/settings-profile.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/describe-table.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/describe-table.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/detach.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/detach.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/drop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/drop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/exchange.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/exchange.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/exists.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/exists.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/kill.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/kill.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/move.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/move.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/optimize.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/optimize.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/rename.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/rename.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/all.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/all.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/having.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/having.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/intersect.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/intersect.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/into-outfile.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/into-outfile.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit-by.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit-by.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/offset.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/offset.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/sample.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/sample.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/union.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/union.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/with.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/with.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/set-role.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/set-role.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/set.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/set.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/truncate.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/truncate.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/undrop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/undrop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/use.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/use.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/watch.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/watch.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/syntax.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/syntax.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generateSeries.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generateSeries.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate_series.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate_series.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/input.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/input.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/jdbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/jdbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/numbers.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/numbers.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesData.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesData.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesMetrics.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesMetrics.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesTags.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesTags.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/zeros.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/zeros.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/dense_rank.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/dense_rank.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/first_value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/first_value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/lagInFrame.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/lagInFrame.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/last_value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/last_value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/leadInFrame.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/leadInFrame.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/nth_value.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/nth_value.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/percent_rank.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/percent_rank.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/rank.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/rank.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/row_number.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/row_number.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/starter-guides/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/starter-guides/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/demo-application.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/demo-application.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/grafana.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/grafana.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/integrating-opentelemetry.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/integrating-opentelemetry.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/introduction.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/introduction.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/managing-data.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/managing-data.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/schema-design.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/schema-design.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/analysis-functions.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/analysis-functions.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/basic-operations.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/basic-operations.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/date-time-data-types.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/date-time-data-types.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/index.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/query-performance.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/query-performance.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/storage-efficiency.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/use-cases/time-series/storage-efficiency.md.hash delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2017.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2018.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2019.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2020.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2021.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2022.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2023.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/2024.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/_category_.yml delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/cloud.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/changelog/index.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/roadmap.md delete mode 100644 i18n/jp/docusaurus-plugin-content-docs/current/whats-new/security-changelog.md diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/_04_sql_translation_reference.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/_04_sql_translation_reference.md deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md b/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md deleted file mode 100644 index b24607d9a2b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -{} ---- - - - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md.hash deleted file mode 100644 index e716610c2a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_clients/go/README.md.hash +++ /dev/null @@ -1 +0,0 @@ -70a50a0eef638444 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md deleted file mode 100644 index 8857a5a8885..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -sidebar_label: '招待' -title: '招待' ---- - - - -## List all invitations {#list-all-invitations} - -このファイルはビルドプロセス中に `clickhouseapi.js` によって生成されます。 内容を変更する必要がある場合は、 `clickhouseapi.js` を編集してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md.hash deleted file mode 100644 index 4537140b7bc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_invitations-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -e0a35465deb99d56 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md deleted file mode 100644 index 1b905bd85f3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -sidebar_label: 'キー' -title: 'キー' ---- - - - -## Get list of all keys {#get-list-of-all-keys} - -このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。コンテンツを変更する必要がある場合は、`clickhouseapi.js` を編集してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md.hash deleted file mode 100644 index 3b75d09e7d0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_keys-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -7be3bf52f87fcb23 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md deleted file mode 100644 index 7d5987a4cbb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -sidebar_label: 'メンバー' -title: 'メンバー' ---- - - - -## List organization members {#list-organization-members} - -このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。 コンテンツに変更が必要な場合は、 `clickhouseapi.js` を編集してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md.hash deleted file mode 100644 index 7a27935e4c1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_members-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -8ba59ed8d689efc4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md deleted file mode 100644 index d008fa1910b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -sidebar_label: '組織' -title: '組織' ---- - - - -## 組織の詳細を取得 {#get-organization-details} - -このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、 `clickhouseapi.js` を編集してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md.hash deleted file mode 100644 index b72529e146b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_organizations-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -43d9a3a08ee303a2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md deleted file mode 100644 index d4d78845994..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -sidebar_label: 'サービス' -title: 'サービス' ---- - - - -## List of organization services {#list-of-organization-services} - -このファイルはビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md.hash deleted file mode 100644 index 4c963b8edd4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/api/_services-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -0bf9371323aac313 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md deleted file mode 100644 index 99b5cd01068..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: '2025年の変更履歴' -note: 'This file is autogenerated by the yarn new-build' -slug: '/whats-new/changelog/' -sidebar_position: 2 -sidebar_label: '2025' -title: '2025年の変更履歴' ---- - - - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md.hash deleted file mode 100644 index 0798c1349a8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_placeholders/changelog/_index.md.hash +++ /dev/null @@ -1 +0,0 @@ -cff204a8b011fa88 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md deleted file mode 100644 index 540e668aea6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{} ---- - -import GCS_bucket_1 from '@site/static/images/integrations/data-ingestion/s3/GCS-bucket-1.png'; -import GCS_bucket_2 from '@site/static/images/integrations/data-ingestion/s3/GCS-bucket-2.png'; -import GCS_create_service_account_key from '@site/static/images/integrations/data-ingestion/s3/GCS-create-a-service-account-key.png'; -import GCS_create_service_account_0 from '@site/static/images/integrations/data-ingestion/s3/GCS-create-service-account-0.png'; -import GCS_create_service_account_a from '@site/static/images/integrations/data-ingestion/s3/GCS-create-service-account-a.png'; -import GCS_create_service_account_2 from '@site/static/images/integrations/data-ingestion/s3/GCS-create-service-account-2.png'; -import GCS_create_service_account_3 from '@site/static/images/integrations/data-ingestion/s3/GCS-create-service-account-3.png'; -import GCS_guide_key from '@site/static/images/integrations/data-ingestion/s3/GCS-guide-key.png'; -import Image from '@theme/IdealImage'; - -
- GCSバケットとHMACキーの作成 - -### ch_bucket_us_east1 {#ch_bucket_us_east1} - -US East 1でGCSバケットを作成する - -### ch_bucket_us_east4 {#ch_bucket_us_east4} - -US East 4でGCSバケットを作成する - -### アクセスキーの生成 {#generate-an-access-key} - -### サービスアカウントのHMACキーと秘密キーを作成する {#create-a-service-account-hmac-key-and-secret} - -**Cloud Storage > 設定 > 相互運用性** を開き、既存の **アクセスキー** を選択するか、**サービスアカウント用のキーを作成** を選択します。このガイドでは、新しいサービスアカウント用の新しいキーを作成する手順を説明します。 - -GCSでサービスアカウントのHMACキーを生成する - -### 新しいサービスアカウントを追加する {#add-a-new-service-account} - -既存のサービスアカウントがないプロジェクトの場合は、**新しいアカウントを作成** を選択します。 - -GCSに新しいサービスアカウントを追加する - -サービスアカウントを作成するには、3つのステップがあります。最初のステップでは、アカウントに意味のある名前、ID、説明を付けます。 - -GCSで新しいサービスアカウントの名前とIDを定義する - -相互運用性設定ダイアログでは、IAMロールとして **Storage Object Admin** が推奨されます。ステップ2でそのロールを選択します。 - -GCSでIAMロールStorage Object Adminを選択する - -ステップ3はオプションであり、このガイドでは使用されません。ポリシーに基づいてユーザーにこれらの権限を与えることができます。 - -GCSで新しいサービスアカウントの追加設定を構成する - -サービスアカウントのHMACキーが表示されます。この情報はClickHouseの設定で使用されるため、保存してください。 - -GCSで生成されたHMACキーを取得する - -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md.hash deleted file mode 100644 index 76487a75a15..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_GCS_authentication_and_bucket.md.hash +++ /dev/null @@ -1 +0,0 @@ -e28c24c6b6f89390 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md deleted file mode 100644 index 52db18c522d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -{} ---- - -import Image from '@theme/IdealImage'; -import s3_1 from '@site/static/images/_snippets/s3/s3-1.png'; -import s3_2 from '@site/static/images/_snippets/s3/s3-2.png'; -import s3_3 from '@site/static/images/_snippets/s3/s3-3.png'; -import s3_4 from '@site/static/images/_snippets/s3/s3-4.png'; -import s3_5 from '@site/static/images/_snippets/s3/s3-5.png'; -import s3_6 from '@site/static/images/_snippets/s3/s3-6.png'; -import s3_7 from '@site/static/images/_snippets/s3/s3-7.png'; -import s3_8 from '@site/static/images/_snippets/s3/s3-8.png'; -import s3_9 from '@site/static/images/_snippets/s3/s3-9.png'; -import s3_a from '@site/static/images/_snippets/s3/s3-a.png'; -import s3_b from '@site/static/images/_snippets/s3/s3-b.png'; -import s3_c from '@site/static/images/_snippets/s3/s3-c.png'; -import s3_d from '@site/static/images/_snippets/s3/s3-d.png'; -import s3_e from '@site/static/images/_snippets/s3/s3-e.png'; -import s3_f from '@site/static/images/_snippets/s3/s3-f.png'; -import s3_g from '@site/static/images/_snippets/s3/s3-g.png'; -import s3_h from '@site/static/images/_snippets/s3/s3-h.png'; - -
- S3バケットとIAMユーザーの作成 - -この記事では、AWS IAMユーザーの基本設定、S3バケットの作成、およびClickHouseをそのバケットをS3ディスクとして使用するように設定する方法を示します。使用する権限についてはセキュリティチームと相談し、これらを出発点として考慮してください。 - -### AWS IAMユーザーの作成 {#create-an-aws-iam-user} -この手順では、ログインユーザーではなく、サービスアカウントユーザーを作成します。 -1. AWS IAM管理コンソールにログインします。 - -2. 「ユーザー」で**ユーザーの追加**を選択します。 - -AWS IAM Management Console - 新しいユーザーの追加 - -3. ユーザー名を入力し、認証情報の種類を**アクセスキー - プログラムによるアクセス**に設定し、**次へ: 権限**を選択します。 - -IAMユーザーのユーザー名とアクセスタイプの設定 - -4. ユーザーをいかなるグループにも追加せず、**次へ: タグ**を選択します。 - -IAMユーザーのグループ割り当てのスキップ - -5. タグを追加する必要がない限り、**次へ: 確認**を選択します。 - -IAMユーザーのタグ割り当てのスキップ - -6. **ユーザーの作成**を選択します。 - - :::note - ユーザーに権限がないという警告メッセージは無視できます。権限は次のセクションでバケットに対してユーザーに付与されます。 - ::: - -権限なしの警告でIAMユーザーを作成 - -7. ユーザーが作成されました。**表示**をクリックし、アクセスキーとシークレットキーをコピーします。 -:::note -キーは他の場所に保存してください。これはシークレットアクセスキーが利用可能な唯一の時点です。 -::: - -IAMユーザーのアクセスキーの表示とコピー - -8. 閉じるをクリックし、ユーザーの画面でユーザーを見つけます。 - -ユーザーリストで新しく作成されたIAMユーザーを見つける - -9. ARN(Amazonリソースネーム)をコピーし、バケットのアクセスポリシーを設定する際に使用するために保存します。 - -IAMユーザーのARNをコピー - -### S3バケットの作成 {#create-an-s3-bucket} -1. S3バケットセクションで、**バケットの作成**を選択します。 - -S3バケット作成プロセスの開始 - -2. バケット名を入力し、他のオプションはデフォルトのままにします。 -:::note -バケット名はAWS全体で一意である必要があります。同一の組織内だけではエラーが発生します。 -::: -3. `すべてのパブリックアクセスをブロック`を有効なままにします。公共のアクセスは必要ありません。 - -パブリックアクセスをブロックしたS3バケット設定の構成 - -4. ページの下部で**バケットの作成**を選択します。 - -S3バケット作成の最終確認 - -5. リンクを選択し、ARNをコピーし、バケットのアクセスポリシーを設定する際に使用するために保存します。 - -6. バケットが作成されたら、S3バケットリストで新しいS3バケットを見つけ、リンクを選択します。 - -バケットリストで新しく作成されたS3バケットを見つける - -7. **フォルダーの作成**を選択します。 - -S3バケットに新しいフォルダーを作成 - -8. ClickHouse S3ディスクのターゲットとなるフォルダー名を入力し、**フォルダーの作成**を選択します。 - -ClickHouse S3ディスク使用のためのフォルダー名の設定 - -9. フォルダーは現在バケットリストに表示されるはずです。 - -S3バケットで新しく作成されたフォルダーの表示 - -10. 新しいフォルダーのチェックボックスを選択し、**URLをコピー**をクリックします。コピーされたURLを、次のセクションでClickHouseストレージ構成に使用するために保存します。 - -ClickHouse構成のためのS3フォルダーURLをコピー - -11. **Permissions**タブを選択し、**バケットポリシー**セクションの**編集**ボタンをクリックします。 - -S3バケットポリシー設定のアクセス - -12. 以下のようにバケットポリシーを追加します: -```json -{ - "Version" : "2012-10-17", - "Id" : "Policy123456", - "Statement" : [ - { - "Sid" : "abc123", - "Effect" : "Allow", - "Principal" : { - "AWS" : "arn:aws:iam::921234567898:user/mars-s3-user" - }, - "Action" : "s3:*", - "Resource" : [ - "arn:aws:s3:::mars-doc-test", - "arn:aws:s3:::mars-doc-test/*" - ] - } - ] -} -``` - -```response -|パラメーター | 説明 | 例の値 | -|----------|-------------|----------------| -|Version | ポリシーインタープリターのバージョン、そのままにしておく | 2012-10-17 | -|Sid | ユーザー定義のポリシーID | abc123 | -|Effect | ユーザーのリクエストを許可または拒否するか | Allow | -|Principal | 許可されるアカウントまたはユーザー | arn:aws:iam::921234567898:user/mars-s3-user | -|Action | バケットで許可される操作 | s3:*| -|Resource | バケット内で操作が許可されるリソース | "arn:aws:s3:::mars-doc-test", "arn:aws:s3:::mars-doc-test/*" | -``` - -:::note -使用する权限の決定はセキュリティチームと相談してください。これを出発点と考えて実施してください。 -ポリシーおよび設定についての詳細はAWSのドキュメントを参照してください: -https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-policy-language-overview.html -::: - -13. ポリシー設定を保存します。 - -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md.hash deleted file mode 100644 index b8f7b3e3b86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md.hash +++ /dev/null @@ -1 +0,0 @@ -b9c9ed44893ea378 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md deleted file mode 100644 index 5b6ba48abe3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -{} ---- - -import Image from '@theme/IdealImage'; -import ip_allow_list_check_list from '@site/static/images/_snippets/ip-allow-list-check-list.png'; -import ip_allow_list_add_current_ip from '@site/static/images/_snippets/ip-allow-list-add-current-ip.png'; - -
- IP アクセスリストの管理 - -ClickHouse Cloud サービスリストから作業するサービスを選択し、**設定**に切り替えます。IP アクセスリストに ClickHouse Cloud サービスに接続する必要があるリモートシステムの IP アドレスまたは範囲が含まれていない場合、**IP の追加**で問題を解決できます: - -IP アクセスリストでサービスがあなたの IP アドレスからのトラフィックを許可しているか確認する - -接続する必要がある個々の IP アドレスまたはアドレスの範囲を追加します。フォームを適宜修正し、次に **保存**します。 - -ClickHouse Cloud の IP アクセスリストに現在の IP アドレスを追加 - -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md.hash deleted file mode 100644 index a9c5c4b4293..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md.hash +++ /dev/null @@ -1 +0,0 @@ -1772421486a1f7ad diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md deleted file mode 100644 index 977983cae96..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{} ---- - - - -
- DockerでApache Supersetを起動する - -Supersetは[Docker Composeを使用したSupersetのローカルインストール](https://superset.apache.org/docs/installation/installing-superset-using-docker-compose/)の手順を提供しています。 GitHubからApache Supersetリポジトリをチェックアウトした後、最新の開発コードまたは特定のタグを実行できます。 `pre-release`とマークされていない最新リリースであるバージョン2.0.0を推奨します。 - -`docker compose`を実行する前に、いくつかの作業を行う必要があります: - -1. 公式のClickHouse Connectドライバーを追加する -2. Mapbox APIキーを取得し、環境変数として追加する (オプション) -3. 実行するSupersetのバージョンを指定する - -:::tip -以下のコマンドは、GitHubリポジトリのトップレベルである `superset` から実行する必要があります。 -::: - -## 公式のClickHouse Connectドライバー {#official-clickhouse-connect-driver} - -SupersetのデプロイメントでClickHouse Connectドライバーを利用できるようにするために、ローカルのrequirementsファイルに追加します: - -```bash -echo "clickhouse-connect" >> ./docker/requirements-local.txt -``` - -## Mapbox {#mapbox} - -これはオプションです。Mapbox APIキーなしでSupersetに地理データをプロットできますが、キーを追加するように指示するメッセージが表示され、マップの背景画像が欠落します(データポイントのみが表示され、マップの背景は表示されません)。使用したい場合は、Mapboxは無料のティアを提供しています。 - -ガイドで作成するサンプルビジュアリゼーションのいくつかは、経度や緯度などの位置データを使用します。SupersetはMapboxマップをサポートしています。Mapboxビジュアリゼーションを使用するには、Mapbox APIキーが必要です。 [Mapboxの無料ティア](https://account.mapbox.com/auth/signup/)にサインアップし、APIキーを生成してください。 - -APIキーをSupersetに利用可能にします: - -```bash -echo "MAPBOX_API_KEY=pk.SAMPLE-Use-your-key-instead" >> docker/.env-non-dev -``` - -## Supersetバージョン2.0.0をデプロイ {#deploy-superset-version-200} - -リリース2.0.0をデプロイするには、次のコマンドを実行します: - -```bash -git checkout 2.0.0 -TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml pull -TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml up -``` - -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md.hash deleted file mode 100644 index b65c6c2310b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md.hash +++ /dev/null @@ -1 +0,0 @@ -ccc68e13430eae91 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md deleted file mode 100644 index 515c0ffe146..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -{} ---- - - - -| Region | VPCサービス名 | AZ IDs | -|--------------|------------------------------------------------------------|------------------------------| -|ap-south-1 | com.amazonaws.vpce.ap-south-1.vpce-svc-0a786406c7ddc3a1b | aps1-az1 aps1-az2 aps1-az3 | -|ap-southeast-1| com.amazonaws.vpce.ap-southeast-1.vpce-svc-0a8b096ec9d2acb01| apse1-az1 apse1-az2 apse1-az3| -|ap-southeast-2| com.amazonaws.vpce.ap-southeast-2.vpce-svc-0ca446409b23f0c01| apse2-az1 apse2-az2 apse2-az3| -|eu-central-1 | com.amazonaws.vpce.eu-central-1.vpce-svc-0536fc4b80a82b8ed | euc1-az2 euc1-az3 euc1-az1 | -|eu-west-1 | com.amazonaws.vpce.eu-west-1.vpce-svc-066b03c9b5f61c6fc | euw1-az2 euw1-az3 euw1-az1 | -|us-east-1 c0 | com.amazonaws.vpce.us-east-1.vpce-svc-0a0218fa75c646d81 | use1-az6 use1-az1 use1-az2 | -|us-east-1 c1 | com.amazonaws.vpce.us-east-1.vpce-svc-096c118db1ff20ea4 | use1-az6 use1-az4 use1-az2 | -|us-east-2 | com.amazonaws.vpce.us-east-2.vpce-svc-0b99748bf269a86b4 | use2-az1 use2-az2 use2-az3 | -|us-west-2 | com.amazonaws.vpce.us-west-2.vpce-svc-049bbd33f61271781 | usw2-az2 usw2-az1 usw2-az3 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md.hash deleted file mode 100644 index a9b21aa06f6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_aws_regions.md.hash +++ /dev/null @@ -1 +0,0 @@ -09158e75c5e2a869 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx deleted file mode 100644 index b5005cff628..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -{} ---- - -import mysql_1 from '@site/static/images/_snippets/mysql1.png'; -import mysql_2 from '@site/static/images/_snippets/mysql2.png'; -import mysql_3 from '@site/static/images/_snippets/mysql3.png'; -import mysql_4 from '@site/static/images/_snippets/mysql4.png'; -import mysql_5 from '@site/static/images/_snippets/mysql5.png'; -import Image from '@theme/IdealImage'; - -
-1. ClickHouse Cloudサービスを作成した後、`アプリを接続`画面で、ドロップダウンからMySQLを選択します。 -
- -ClickHouse Cloud資格情報画面がMySQLインターフェース選択のドロップダウンを表示 - - -2. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これによりこのサービスのポート`3306`が公開され、あなたのユニークなMySQLユーザー名を含むMySQL接続画面が表示されます。 - -ClickHouse Cloud MySQLインターフェース有効化スイッチと接続詳細 -
- -既存のサービスに対してMySQLインターフェースを有効にするには、以下の手順を実行します: - -3. サービスが`実行中`の状態であることを確認し、MySQLインターフェースを有効にしたいサービスをクリックします。左側のメニューから「接続」を選択します: - -
-ClickHouse Cloudサービス接続画面が接続オプションをハイライト表示 -
- - -4. `接続先`ドロップダウンからMySQLを選択します。 - -
-ClickHouse Cloud接続画面がMySQLオプション選択を表示 -
- -5. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これによりこのサービスのポート`3306`が公開され、あなたのユニークなMySQLユーザー名を含むMySQL接続画面が表示されます。 - -ClickHouse Cloud接続画面が有効化されたMySQLインターフェースを表示し、接続詳細を示す - -## ClickHouse Cloudでの複数のMySQLユーザーの作成 {#creating-multiple-mysql-users-in-clickhouse-cloud} - -デフォルトでは、`mysql4`ユーザーが組み込まれており、`default`ユーザーと同じパスワードを使用します。``部分はClickHouse Cloudホスト名の最初のセグメントです。この形式は、安全な接続を実装するツールと共に動作するために必要ですが、[TLSハンドシェイクでSNI情報を提供しない](https://www.cloudflare.com/learning/ssl/what-is-sni)ため、ユーザー名に追加のヒントがなければ内部ルーティングが不可能になります(MySQLコンソールクライアントがそのようなツールの一つです)。 - -このため、MySQLインターフェースと共に使用される新しいユーザーを作成する際には、`mysql4_`形式に従うことを _強く推奨_ します。ここで、``はあなたのCloudサービスを識別するためのヒントであり、``はあなたの選択の任意の接尾辞です。 - -:::tip -ClickHouse Cloudのホスト名が`foobar.us-east1.aws.clickhouse.cloud`の場合、``部分は`foobar`に等しく、カスタムMySQLユーザー名は`mysql4foobar_team1`のようになります。 -::: - -MySQLインターフェースで使用するための追加ユーザーを作成することができます。たとえば、追加の設定を適用したい場合などです。 - -1. オプション - カスタムユーザーに適用するための[設定プロファイル](/sql-reference/statements/create/settings-profile)を作成します。たとえば、`my_custom_profile`という名前の設定プロファイルを作成し、後で作成するユーザーで接続する際にデフォルトで適用される追加設定を含めます: - - ```sql - CREATE SETTINGS PROFILE my_custom_profile SETTINGS prefer_column_name_to_alias=1; - ``` - - `prefer_column_name_to_alias`は単なる例として使用されており、他の設定を使用することができます。 -2. [ユーザーの作成](/sql-reference/statements/create/user)を以下の形式で行います:`mysql4_` ([上記を参照](#creating-multiple-mysql-users-in-clickhouse-cloud))。パスワードはダブルSHA1形式で指定する必要があります。例えば: - - ```sql - CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$'; - ``` - - または、このユーザーにカスタムプロファイルを使用したい場合: - - ```sql - CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$' SETTINGS PROFILE 'my_custom_profile'; - ``` - - ここで、`my_custom_profile`は以前に作成したプロファイルの名前です。 -3. [権限の付与](/sql-reference/statements/grant)を行い、新しいユーザーに希望のテーブルまたはデータベースと対話するための必要な権限を付与します。たとえば、`system.query_log`へのアクセスのみを付与したい場合: - - ```sql - GRANT SELECT ON system.query_log TO mysql4foobar_team1; - ``` - -4. 作成したユーザーを使用して、MySQLインターフェースでClickHouse Cloudサービスに接続します。 - -### ClickHouse Cloudでの複数のMySQLユーザーに関するトラブルシューティング {#troubleshooting-multiple-mysql-users-in-clickhouse-cloud} - -新しいMySQLユーザーを作成し、MySQL CLIクライアントを通じて接続中に以下のエラーが表示された場合: - -``` -ERROR 2013 (HY000): Lost connection to MySQL server at 'reading authorization packet', system error: 54 -``` - -この場合、ユーザー名が`mysql4_`形式に従っていることを確認してください([上記](#creating-multiple-mysql-users-in-clickhouse-cloud)を参照)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx.hash deleted file mode 100644 index 4a421a33bbf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -8e7d8c4d0d2a0126 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx deleted file mode 100644 index 6fdadcaee04..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -{} ---- - - - -Please refer to [the official documentation](/interfaces/mysql) on how to set up a ClickHouse server with enabled MySQL interface. - -サーバーの `config.xml` にエントリを追加することに加えて、次のようにすることが _必須_ です。 - -```xml - - 9004 - -``` - -MySQL インターフェースを使用するユーザーには、[Double SHA1 パスワード暗号化](/operations/settings/settings-users#user-namepassword) を使用する必要があります。 - -シェルから Double SHA1 で暗号化されたランダムなパスワードを生成するには、次のコマンドを使用します。 - -```shell -PASSWORD=$(base64 < /dev/urandom | head -c16); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' -``` - -出力は次のようになります。 - -``` -LZOQYnqQN4L/T6L0 -fbc958cc745a82188a51f30de69eebfc67c40ee4 -``` - -最初の行は生成されたパスワードで、2行目は ClickHouse の設定に使用できるハッシュです。 - -生成されたハッシュを使用する `mysql_user` の構成の一例は次の通りです。 - -`/etc/clickhouse-server/users.d/mysql_user.xml` - -```xml - - - fbc958cc745a82188a51f30de69eebfc67c40ee4 - - ::/0 - - default - default - - -``` - -`password_double_sha1_hex` エントリは、生成された Double SHA1 ハッシュに置き換えてください。 - -さらに、`use_mysql_types_in_show_columns` を使用して、`SHOW [FULL] COLUMNS` クエリの結果で ClickHouse の型の代わりにネイティブ MySQL 型を表示することが推奨されます。これにより、BI ツールが MySQL コネクタを使用してデータベーススキーマを正しく調査できるようになります。 - -例えば: - -`/etc/clickhouse-server/users.d/mysql_user.xml` - -```xml - - - 1 - - -``` - -またはデフォルトのプロファイルの代わりに別のプロファイルに割り当てます。 - -`mysql` バイナリが利用可能であれば、コマンドラインから接続をテストできます。 -上記のサンプルユーザー名(`mysql_user`)とパスワード(`LZOQYnqQN4L/T6L0`)を使用した場合、コマンドラインは次のようになります。 - -```bash -mysql --protocol tcp -h localhost -u mysql_user -P 9004 --password=LZOQYnqQN4L/T6L0 -``` - -``` -mysql> show databases; -+--------------------+ -| name | -+--------------------+ -| INFORMATION_SCHEMA | -| default | -| information_schema | -| system | -+--------------------+ -4 rows in set (0.00 sec) -Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. -``` - -最後に、Clickhouse Server を希望の IP アドレスでリッスンするように設定します。例えば、`config.xml` で、すべてのアドレスでリッスンするように次の行のコメントを外します。 - -```bash -:: -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx.hash deleted file mode 100644 index 16ba1824d1b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -83dfea16bfda545e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md deleted file mode 100644 index 82581a01d45..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -{} ---- - - - -:::important best practices -ClickHouse Serverの設定を行う際、設定ファイルを追加または編集する場合は次の点に注意してください: -- `/etc/clickhouse-server/config.d/` ディレクトリにファイルを追加する -- `/etc/clickhouse-server/users.d/` ディレクトリにファイルを追加する -- `/etc/clickhouse-server/config.xml` ファイルはそのままにする -- `/etc/clickhouse-server/users.xml` ファイルはそのままにする -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md.hash deleted file mode 100644 index 34af9f87af7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md.hash +++ /dev/null @@ -1 +0,0 @@ -416f9c795895e089 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx deleted file mode 100644 index 33a1782ca67..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -{} ---- - -import cloud_connect_button from '@site/static/images/_snippets/cloud-connect-button.png'; -import connection_details_https from '@site/static/images/_snippets/connection-details-https.png'; -import Image from '@theme/IdealImage'; - -To connect to ClickHouse with HTTP(S) you need this information: - -- The HOST and PORT: typically, the port is 8443 when using TLS or 8123 when not using TLS. - -- The DATABASE NAME: out of the box, there is a database named `default`, use the name of the database that you want to connect to. - -- The USERNAME and PASSWORD: out of the box, the username is `default`. Use the username appropriate for your use case. - -The details for your ClickHouse Cloud service are available in the ClickHouse Cloud console. Select the service that you will connect to and click **Connect**: - -ClickHouse Cloud service connect button - -Choose **HTTPS**, and the details are available in an example `curl` command. - -ClickHouse Cloud HTTPS connection details - -If you are using self-managed ClickHouse, the connection details are set by your ClickHouse administrator. - ---- - -ClickHouseにHTTP(S)で接続するには、次の情報が必要です: - -- HOSTとPORT: 通常、ポートはTLSを使用する場合は8443、TLSを使用しない場合は8123です。 - -- DATABASE NAME: デフォルトでは、`default`という名前のデータベースがあります。接続したいデータベースの名前を使用してください。 - -- USERNAMEとPASSWORD: デフォルトでは、ユーザー名は`default`です。ご利用のケースに適したユーザー名を使用してください。 - -ClickHouse Cloudサービスの詳細はClickHouse Cloudコンソールで確認できます。接続するサービスを選択し、**Connect**をクリックします: - -ClickHouse Cloud service connect button - -**HTTPS**を選択すると、詳細が例の`curl`コマンドで提供されます。 - -ClickHouse Cloud HTTPS connection details - -セルフマネージドのClickHouseを使用している場合、接続の詳細はClickHouseの管理者によって設定されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx.hash deleted file mode 100644 index 26d2960d065..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -6b933c8a85198fcb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md deleted file mode 100644 index 0de518cf9b2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -{} ---- - -import cloud_connect_button from '@site/static/images/_snippets/cloud-connect-button.png'; -import connection_details_native from '@site/static/images/_snippets/connection-details-native.png'; -import Image from '@theme/IdealImage'; - -ClickHouse にネイティブ TCP で接続するには、次の情報が必要です: - -- HOST と PORT: 通常、TLS を使用する場合はポート 9440、TLS を使用しない場合は 9000 です。 - -- DATABASE NAME: デフォルトでは `default` というデータベースがあり、接続したいデータベースの名前を使用します。 - -- USERNAME と PASSWORD: デフォルトではユーザー名は `default` です。使用ケースに適したユーザー名を使用してください。 - -ClickHouse Cloud サービスの詳細は ClickHouse Cloud コンソールで確認できます。接続するサービスを選択し、**Connect** をクリックします: - -ClickHouse Cloud service connect button - -**Native** を選択すると、例の `clickhouse-client` コマンドで詳細が表示されます。 - -ClickHouse Cloud Native TCP connection details - -セルフマネージドの ClickHouse を使用している場合、接続の詳細は ClickHouse 管理者によって設定されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md.hash deleted file mode 100644 index da794d24fc8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md.hash +++ /dev/null @@ -1 +0,0 @@ -069dd5014869e2d8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md deleted file mode 100644 index 5d4c797dac8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -{} ---- - - - -| Region | Service Attachment | Private DNS domain | -|----------------|-----------------------------------------------------------------|------------------------------------| -|`asia-southeast1`| `projects/dataplane-production/regions/asia-southeast1/serviceAttachments/production-asia-southeast1-clickhouse-cloud`| `asia-southeast1.p.gcp.clickhouse.cloud`| -|`europe-west4` | `projects/dataplane-production/regions/europe-west4/serviceAttachments/production-europe-west4-clickhouse-cloud` | `europe-west4.p.gcp.clickhouse.cloud` | -|`us-central1` | `projects/dataplane-production/regions/us-central1/serviceAttachments/production-us-central1-clickhouse-cloud` | `us-central1.p.gcp.clickhouse.cloud` | -|`us-east1` | `projects/dataplane-production/regions/us-east1/serviceAttachments/production-us-east1-clickhouse-cloud` | `us-east1.p.gcp.clickhouse.cloud` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md.hash deleted file mode 100644 index 7f08f13748d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gcp_regions.md.hash +++ /dev/null @@ -1 +0,0 @@ -f6c9640aa81292e6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md deleted file mode 100644 index ec7e82c57f4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -{} ---- - - - -:::important best practices -ClickHouse Keeperを設定する際には、構成ファイルを編集することによって以下のことを行うべきです: -- `/etc/clickhouse-keeper/keeper_config.xml` をバックアップする -- `/etc/clickhouse-keeper/keeper_config.xml` ファイルを編集する -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md.hash deleted file mode 100644 index 9dfd16df44c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md.hash +++ /dev/null @@ -1 +0,0 @@ -7115b85be810fffc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md deleted file mode 100644 index 04286494af8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -{} ---- - -import cloud_connect_to_sql_console from '@site/static/images/_snippets/cloud-connect-to-sql-console.png'; -import createservice8 from '@site/static/images/_snippets/createservice8.png'; -import Image from '@theme/IdealImage'; - -:::tip SQLコンソール -SQLクライアント接続が必要な場合、あなたのClickHouse Cloudサービスには関連するウェブベースのSQLコンソールがあります。詳細については、下の**SQLコンソールに接続**を展開してください。 -::: - -
- SQLコンソールに接続 - -あなたのClickHouse Cloudサービスリストから、サービスをクリックします。 - -SQLコンソールに接続 - -これにより、SQLコンソールにリダイレクトされます。 - -SQLコンソール - -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md.hash deleted file mode 100644 index 98dbf7031b2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md.hash +++ /dev/null @@ -1 +0,0 @@ -0e66060e54cc6a3f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md deleted file mode 100644 index f9b7433ebc5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -{} ---- - - - -## Terminology {#terminology} -### Replica {#replica} -データのコピー。ClickHouse は常にデータのコピーを少なくとも 1 つ持っており、最小限の **レプリカ** の数は 1 つです。これは重要な詳細であり、データのオリジナルコピーをレプリカとしてカウントすることに慣れていないかもしれませんが、ClickHouse のコードとドキュメントで使用される用語です。データの 2 番目のレプリカを追加することにより、フォールトトレランスを提供します。 - -### Shard {#shard} -データのサブセット。ClickHouse は常にデータのためのシャードを少なくとも 1 つ持っているため、データを複数のサーバーに分割しない場合、データは 1 つのシャードに格納されます。データを複数のサーバーにシャーディングすることで、単一のサーバーの容量を超えた場合に負荷を分散させることができます。宛先サーバーは **シャーディングキー** によって決定され、分散テーブルを作成する際に定義されます。シャーディングキーはランダムであったり、[ハッシュ関数](/sql-reference/functions/hash-functions) の出力として定義されることがあります。シャーディングに関する導入例では、`rand()` をシャーディングキーとして使用し、異なるシャーディングキーを選択するタイミングと方法についてのさらなる情報を提供します。 - -### Distributed coordination {#distributed-coordination} -ClickHouse Keeper はデータレプリケーションおよび分散DDLクエリ実行のためのコーディネーションシステムを提供します。ClickHouse Keeper は Apache ZooKeeper と互換性があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md.hash deleted file mode 100644 index f79064183b1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md.hash +++ /dev/null @@ -1 +0,0 @@ -32a816ac6c4af903 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md deleted file mode 100644 index 1a064dbea7c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -{} ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -:::note -このページは [ClickHouse Cloud](https://clickhouse.com/cloud) には適用されません。ここに記載された手順は、ClickHouse Cloud サービスで自動化されています。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md.hash deleted file mode 100644 index da0637c7a65..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md.hash +++ /dev/null @@ -1 +0,0 @@ -a4a2eca16045091d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md deleted file mode 100644 index 6282e7015b4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -{} ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -:::note -このページは [ClickHouse Cloud](https://clickhouse.com/cloud) には適用されません。ここで文書化されている機能は、ClickHouse Cloud サービスでは利用できません。 -詳細については、ClickHouse の [Cloud Compatibility](/whats-new/cloud-compatibility) ガイドを参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md.hash deleted file mode 100644 index 667e118e246..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md.hash +++ /dev/null @@ -1 +0,0 @@ -3cd5258eb315569d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md deleted file mode 100644 index c364c753f53..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -{} ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -:::note -このページは[ClickHouse Cloud](https://clickhouse.com/cloud)には適用されません。ここに記載されている手順は、セルフマネージドのClickHouseデプロイメントにのみ必要です。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md.hash deleted file mode 100644 index 70531059dd8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_not_applicable.md.hash +++ /dev/null @@ -1 +0,0 @@ -aebb810ae89528cb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md deleted file mode 100644 index b400cd59f47..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -{} ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -:::note -このページは [ClickHouse Cloud](https://clickhouse.com/cloud) には適用されません。ここで文書化されている機能は、ClickHouse Cloud サービスではまだ利用できません。 -詳細については、ClickHouse の [Cloud Compatibility](/whats-new/cloud-compatibility#roadmap) ガイドを参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md.hash deleted file mode 100644 index 87a0356fbcb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_roadmap.md.hash +++ /dev/null @@ -1 +0,0 @@ -4f9ce60ad2d130a4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md deleted file mode 100644 index 3597784ad71..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -{} ---- - -import Image from '@theme/IdealImage'; -import cloud_service_action_menu from '@site/static/images/_snippets/cloud-service-actions-menu.png'; - -Select your service, followed by `Data souces` -> `Predefined sample data`. - -ClickHouse Cloud サービスアクションメニューが Data sources と Predefined sample data オプションを表示しています diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md.hash deleted file mode 100644 index b970aee4cff..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md.hash +++ /dev/null @@ -1 +0,0 @@ -0292037de914fe0d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md deleted file mode 100644 index 7655055b0c2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -{} ---- - - - -:::note Querying in ClickHouse Cloud -このシステムテーブルのデータは、ClickHouse Cloudの各ノードにローカルで保管されています。そのため、すべてのデータの完全なビューを取得するには、`clusterAllReplicas` 関数が必要です。詳細については [こちら](/operations/system-tables/overview#system-tables-in-clickhouse-cloud) をご覧ください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md.hash deleted file mode 100644 index 878f6f1a69c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_system_table_cloud.md.hash +++ /dev/null @@ -1 +0,0 @@ -5b553283b13dd9ff diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md deleted file mode 100644 index 21f56ec8e21..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -sidebar_label: 'タブのサンプル' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; - -## Step 1. {#step-1} - - - - -クラウド - - - - -セルフマネージド - - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md.hash deleted file mode 100644 index c677ccf99f6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_tabs.md.hash +++ /dev/null @@ -1 +0,0 @@ -339d948a28eac4fd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md deleted file mode 100644 index cfea69d66c6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md +++ /dev/null @@ -1,453 +0,0 @@ ---- -{} ---- - - - -## Test admin privileges {#test-admin-privileges} - -ユーザー `default` からログアウトし、ユーザー `clickhouse_admin` として再ログインします。 - -これらすべてが成功するはずです: - -```sql -SHOW GRANTS FOR clickhouse_admin; -``` - -```sql -CREATE DATABASE db1 -``` - -```sql -CREATE TABLE db1.table1 (id UInt64, column1 String) ENGINE = MergeTree() ORDER BY id; -``` - -```sql -INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc'); -``` - -```sql -SELECT * FROM db1.table1; -``` - -```sql -DROP TABLE db1.table1; -``` - -```sql -DROP DATABASE db1; -``` - -## Non-admin users {#non-admin-users} - -ユーザーは必要な権限を持っている必要があり、すべてが管理者ユーザーである必要はありません。この文書の残りの部分では、例となるシナリオと必要な役割が提供されます。 - -### Preparation {#preparation} - -例で使用するために、これらのテーブルとユーザーを作成します。 - -#### Creating a sample database, table, and rows {#creating-a-sample-database-table-and-rows} - -1. テストデータベースを作成します - - ```sql - CREATE DATABASE db1; - ``` - -2. テーブルを作成します - - ```sql - CREATE TABLE db1.table1 ( - id UInt64, - column1 String, - column2 String - ) - ENGINE MergeTree - ORDER BY id; - ``` - -3. テーブルにサンプル行を入力します - - ```sql - INSERT INTO db1.table1 - (id, column1, column2) - VALUES - (1, 'A', 'abc'), - (2, 'A', 'def'), - (3, 'B', 'abc'), - (4, 'B', 'def'); - ``` - -4. テーブルを確認します: - - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: 475015cc-6f51-4b20-bda2-3c9c41404e49 - - ┌─id─┬─column1─┬─column2─┐ - │ 1 │ A │ abc │ - │ 2 │ A │ def │ - │ 3 │ B │ abc │ - │ 4 │ B │ def │ - └────┴─────────┴─────────┘ - ``` - -5. 特定のカラムへのアクセス制限をデモするために使用される通常のユーザーを作成します: - - ```sql - CREATE USER column_user IDENTIFIED BY 'password'; - ``` - -6. 特定の値を持つ行へのアクセス制限をデモするために使用される通常のユーザーを作成します: - ```sql - CREATE USER row_user IDENTIFIED BY 'password'; - ``` - -#### Creating roles {#creating-roles} - -以下の例セットで: - -- カラムや行に対するさまざまな権限のためのロールが作成されます -- 権限がロールに付与されます -- 各ロールにユーザーが割り当てられます - -ロールは、各ユーザーを個別に管理する代わりに、特定の権限のためのユーザーグループを定義するために使用されます。 - -1. このロールのユーザーがデータベース `db1` の `table1` で `column1` のみを表示できるように制限するロールを作成します: - - ```sql - CREATE ROLE column1_users; - ``` - -2. `column1` の閲覧を許可する権限を設定します - - ```sql - GRANT SELECT(id, column1) ON db1.table1 TO column1_users; - ``` - -3. `column_user` ユーザーを `column1_users` ロールに追加します - - ```sql - GRANT column1_users TO column_user; - ``` - -4. このロールのユーザーが選択された行のみを表示できるように制限するロールを作成します。この場合、`column1` に `A` を含む行のみです。 - - ```sql - CREATE ROLE A_rows_users; - ``` - -5. `row_user` を `A_rows_users` ロールに追加します - - ```sql - GRANT A_rows_users TO row_user; - ``` - -6. `column1` が `A` の値を持つ行のみを表示できるポリシーを作成します - - ```sql - CREATE ROW POLICY A_row_filter ON db1.table1 FOR SELECT USING column1 = 'A' TO A_rows_users; - ``` - -7. データベースおよびテーブルに権限を設定します - - ```sql - GRANT SELECT(id, column1, column2) ON db1.table1 TO A_rows_users; - ``` - -8. 他のロールがすべての行にアクセスできるように明示的な権限を付与します - - ```sql - CREATE ROW POLICY allow_other_users_filter - ON db1.table1 FOR SELECT USING 1 TO clickhouse_admin, column1_users; - ``` - - :::note - テーブルにポリシーを添付すると、システムはそのポリシーを適用し、定義されたユーザーおよびロールのみがテーブルで操作を行うことができるようになり、それ以外はすべての操作が拒否されます。他のユーザーに制限を適用しないためには、通常のアクセスや他のタイプのアクセスを許可する別のポリシーを定義する必要があります。 - ::: - -## Verification {#verification} - -### Testing role privileges with column restricted user {#testing-role-privileges-with-column-restricted-user} - -1. `clickhouse_admin` ユーザーで ClickHouse クライアントにログインします - - ```bash - clickhouse-client --user clickhouse_admin --password password - ``` - -2. 管理者ユーザーとしてデータベース、テーブル、およびすべての行へのアクセスを確認します。 - - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: f5e906ea-10c6-45b0-b649-36334902d31d - - ┌─id─┬─column1─┬─column2─┐ - │ 1 │ A │ abc │ - │ 2 │ A │ def │ - │ 3 │ B │ abc │ - │ 4 │ B │ def │ - └────┴─────────┴─────────┘ - ``` - -3. `column_user` ユーザーで ClickHouse クライアントにログインします - - ```bash - clickhouse-client --user column_user --password password - ``` - -4. すべてのカラムを使用して `SELECT` をテストします - - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: 5576f4eb-7450-435c-a2d6-d6b49b7c4a23 - - 0 rows in set. Elapsed: 0.006 sec. - - Received exception from server (version 22.3.2): - Code: 497. DB::Exception: Received from localhost:9000. - DB::Exception: column_user: Not enough privileges. - To execute this query it's necessary to have grant - SELECT(id, column1, column2) ON db1.table1. (ACCESS_DENIED) - ``` - - :::note - すべてのカラムが指定されたため、アクセスが拒否されています。ユーザーは `id` と `column1` のみアクセス権を持っています。 - ::: - -5. 許可されたカラムのみを指定した `SELECT` クエリを確認します: - - ```sql - SELECT - id, - column1 - FROM db1.table1 - ``` - - ```response - Query id: cef9a083-d5ce-42ff-9678-f08dc60d4bb9 - - ┌─id─┬─column1─┐ - │ 1 │ A │ - │ 2 │ A │ - │ 3 │ B │ - │ 4 │ B │ - └────┴─────────┘ - ``` - -### Testing role privileges with row restricted user {#testing-role-privileges-with-row-restricted-user} - -1. `row_user` を使用して ClickHouse クライアントにログインします - - ```bash - clickhouse-client --user row_user --password password - ``` - -2. 利用可能な行を表示します - - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: a79a113c-1eca-4c3f-be6e-d034f9a220fb - - ┌─id─┬─column1─┬─column2─┐ - │ 1 │ A │ abc │ - │ 2 │ A │ def │ - └────┴─────────┴─────────┘ - ``` - - :::note - 上記の2行のみが返されることを確認します。`column1` に `B` の値を持つ行は除外されるべきです。 - ::: - -## Modifying Users and Roles {#modifying-users-and-roles} - -ユーザーには必要な権限の組み合わせのために複数のロールが割り当てられることがあります。複数のロールを使用する場合、システムは権限を決定するためにロールを組み合わせ、ロールの権限は累積的な効果を持つことになります。 - -たとえば、`role1` が `column1` のみを選択することを許可し、`role2` が `column1` と `column2` の選択を許可する場合、ユーザーは両方のカラムにアクセスできるようになります。 - -1. 管理者アカウントを使用して、デフォルトのロールで行とカラムの両方で制限する新しいユーザーを作成します - - ```sql - CREATE USER row_and_column_user IDENTIFIED BY 'password' DEFAULT ROLE A_rows_users; - ``` - -2. `A_rows_users` ロールの以前の権限を削除します - - ```sql - REVOKE SELECT(id, column1, column2) ON db1.table1 FROM A_rows_users; - ``` - -3. `A_rows_users` ロールに `column1` のみを選択することを許可します - - ```sql - GRANT SELECT(id, column1) ON db1.table1 TO A_rows_users; - ``` - -4. `row_and_column_user` を使用して ClickHouse クライアントにログインします - - ```bash - clickhouse-client --user row_and_column_user --password password; - ``` - -5. すべてのカラムを含むクエリをテストします: - - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: 8cdf0ff5-e711-4cbe-bd28-3c02e52e8bc4 - - 0 rows in set. Elapsed: 0.005 sec. - - Received exception from server (version 22.3.2): - Code: 497. DB::Exception: Received from localhost:9000. - DB::Exception: row_and_column_user: Not enough privileges. - To execute this query it's necessary to have grant - SELECT(id, column1, column2) ON db1.table1. (ACCESS_DENIED) - ``` - -6. 許可されたカラムのみを指定してテストします: - - ```sql - SELECT - id, - column1 - FROM db1.table1 - ``` - - ```response - Query id: 5e30b490-507a-49e9-9778-8159799a6ed0 - - ┌─id─┬─column1─┐ - │ 1 │ A │ - │ 2 │ A │ - └────┴─────────┘ - ``` - -## Troubleshooting {#troubleshooting} - -権限が交差または組み合わさることで予期しない結果が生じることがあります。以下のコマンドを使用して、管理者アカウントを使用して問題を特定できます。 - -### Listing the grants and roles for a user {#listing-the-grants-and-roles-for-a-user} - -```sql -SHOW GRANTS FOR row_and_column_user -``` - -```response -Query id: 6a73a3fe-2659-4aca-95c5-d012c138097b - -┌─GRANTS FOR row_and_column_user───────────────────────────┐ -│ GRANT A_rows_users, column1_users TO row_and_column_user │ -└──────────────────────────────────────────────────────────┘ -``` - -### List roles in ClickHouse {#list-roles-in-clickhouse} - -```sql -SHOW ROLES -``` - -```response -Query id: 1e21440a-18d9-4e75-8f0e-66ec9b36470a - -┌─name────────────┐ -│ A_rows_users │ -│ column1_users │ -└─────────────────┘ -``` - -### Display the policies {#display-the-policies} - -```sql -SHOW ROW POLICIES -``` - -```response -Query id: f2c636e9-f955-4d79-8e80-af40ea227ebc - -┌─name───────────────────────────────────┐ -│ A_row_filter ON db1.table1 │ -│ allow_other_users_filter ON db1.table1 │ -└────────────────────────────────────────┘ -``` - -### View how a policy was defined and current privileges {#view-how-a-policy-was-defined-and-current-privileges} - -```sql -SHOW CREATE ROW POLICY A_row_filter ON db1.table1 -``` - -```response -Query id: 0d3b5846-95c7-4e62-9cdd-91d82b14b80b - -┌─CREATE ROW POLICY A_row_filter ON db1.table1────────────────────────────────────────────────┐ -│ CREATE ROW POLICY A_row_filter ON db1.table1 FOR SELECT USING column1 = 'A' TO A_rows_users │ -└─────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Example commands to manage roles, policies, and users {#example-commands-to-manage-roles-policies-and-users} - -以下のコマンドを使用して: - -- 権限を削除する -- ポリシーを削除する -- ユーザーをロールから外す -- ユーザーとロールを削除する -
- -:::tip -これらのコマンドは管理者ユーザーまたは `default` ユーザーとして実行してください。 -::: - -### Remove privilege from a role {#remove-privilege-from-a-role} - -```sql -REVOKE SELECT(column1, id) ON db1.table1 FROM A_rows_users; -``` - -### Delete a policy {#delete-a-policy} - -```sql -DROP ROW POLICY A_row_filter ON db1.table1; -``` - -### Unassign a user from a role {#unassign-a-user-from-a-role} - -```sql -REVOKE A_rows_users FROM row_user; -``` - -### Delete a role {#delete-a-role} - -```sql -DROP ROLE A_rows_users; -``` - -### Delete a user {#delete-a-user} - -```sql -DROP USER row_user; -``` - -## Summary {#summary} - -この記事では、SQLユーザーとロールの基本的な作成方法を示し、ユーザーとロールの権限を設定および変更する手順を提供しました。各詳細については、ユーザーガイドおよびリファレンス文書を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md.hash deleted file mode 100644 index acd90d1753c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md.hash +++ /dev/null @@ -1 +0,0 @@ -a7a06887bcf479a8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/_category_.yml deleted file mode 100644 index dfa8c8a3912..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/_category_.yml +++ /dev/null @@ -1,6 +0,0 @@ -label: 'About Us' -collapsible: true -collapsed: true -link: - type: generated-index - title: About Us diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md deleted file mode 100644 index 21d574348cf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: 'FAQ' -slug: '/about-us/faq' -description: 'Landing page' ---- - - - -| FAQ | -|-------------------------------------------------------------------------------------------------------------------------------| -| [カラム指向データベースとは何ですか?](/faq/general/columnar-database) | -| [「ClickHouse」とは何ですか?](/faq/general/dbms-naming) | -| [ClickHouseを他のシステムと統合する](/faq/integration) | -| [JSONをClickHouseにインポートする方法は?](/faq/integration/json-import) | -| [ODBC経由でOracleを使用する際、エンコーディングに問題がある場合はどうすればよいですか?](/faq/integration/oracle-odbc) | -| [ClickHouseのテーブルから古いレコードを削除することは可能ですか?](/faq/operations/delete-old-data) | -| [ClickHouseサーバーおよびクラスターの運用に関する質問](/faq/operations) | -| [ストレージと計算を分離してClickHouseをデプロイすることは可能ですか?](/faq/operations/deploy-separate-storage-and-compute) | -| [ClickHouseのユースケースに関する質問](/faq/use-cases) | -| [ClickHouseをキー・バリュー型ストレージとして使用できますか?](/faq/use-cases/key-value) | -| [ClickHouseを時系列データベースとして使用できますか?](/faq/use-cases/time-series) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md.hash deleted file mode 100644 index 4c17d3b30ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/about-faq-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -2720730d1d47f395 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/adopters.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/adopters.md deleted file mode 100644 index cebf3eea33c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/adopters.md +++ /dev/null @@ -1,561 +0,0 @@ ---- -slug: /about-us/adopters -sidebar_label: 'Adopters' -title: 'ClickHouse Adopters' -sidebar_position: 60 -description: 'A list of companies using ClickHouse and their success stories' ---- - -The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We'd appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/clickhouse-docs/blob/main/docs/about-us/adopters.md), but please make sure you won't have any NDA issues by doing so. Providing updates with publications from other companies is also useful. - -
- -| Company | Industry | Use case | Cluster Size | (Un)Compressed Data Size\* | Reference | -|----------------------------------------------------------------------------------------------------|-------------------------------------------------|-------------------------------------------------------------|--------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [1Flow](https://1flow.ai/) | Feedback automation | - | — | — | ClickHouse Cloud user | -| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | -| [3xpl](https://3xpl.com/) | Software & Technology | Blockchain Explorer | — | — | [Reddit, February 2023](https://www.reddit.com/r/ethereum/comments/1159pdg/new_ethereum_explorer_by_3xpl_no_ads_super_fast/) | -| [5CNetwork](https://www.5cnetwork.com/) | Software | Analytics | — | — | [Community Slack](https://clickhouse.com/slack) | -| [ABTasty](https://www.abtasty.com/) | Web Analytics | Analytics | — | — | [Paris Meetup, March 2024](https://www.meetup.com/clickhouse-france-user-group/events/298997115/) | -| [Arkhn](https://www.arkhn.com) | Healthcare | Data Warehouse | — | — | [Paris Meetup, March 2024](https://www.meetup.com/clickhouse-france-user-group/events/298997115/) | -| [ASO.dev](https://aso.dev/) | Software & Technology | App store optimisation | — | — | [Twitter, April 2023](https://twitter.com/gorniv/status/1642847791226445828) | -| [AdGreetz](https://www.adgreetz.com/) | Software & Technology | AdTech & MarTech | — | — | [Blog, April 2023](https://clickhouse.com/blog/adgreetz-processes-millions-of-daily-ad-impressions) | -| [AdGuard](https://adguard.com/) | Anti-Ads | AdGuard DNS | — | 1,000,000 DNS requests per second from over 50 million users | [Official Website, August 2022](https://adguard.com/en/blog/adguard-dns-2-0-goes-open-source.html) | -| [AdScribe](http://www.adscribe.tv/) | Ads | TV Analytics | — | — | [A quote from CTO](https://altinity.com/24x7-support/) | -| [Adapty](https://adapty.io/) | Subscription Analytics | Main product | — | — | [Twitter, November 2021](https://twitter.com/iwitaly/status/1462698148061659139) | -| [Adevinta](https://www.adevinta.com/) | Software & Technology | Online Classifieds | — | — | [Blog, April 2023](https://clickhouse.com/blog/serving-real-time-analytics-across-marketplaces-at-adevinta) | -| [Admiral](https://getadmiral.com/) | MarTech | Engagement Management | — | — | [Webinar Slides, June 2020](https://altinity.com/presentations/2020/06/16/big-data-in-real-time-how-clickhouse-powers-admirals-visitor-relationships-for-publishers) | -| [Admixer](https://admixer.com/) | Media & Entertainment | Ad Analytics | — | — | [Blog Post](https://clickhouse.com/blog/admixer-aggregates-over-1-billion-unique-users-a-day-using-clickhouse) | -| [Aggregations.io](https://aggregations.io/) | Real-time analytics | Main product | - | - | [Twitter](https://twitter.com/jsneedles/status/1734606200199889282) | -| [Ahrefs](https://ahrefs.com/) | SEO | Analytics | Main cluster is 100k+ CPU cores, 800TB RAM. | 110PB NVME storage, uncompressed data size on main cluster is 1EB. | [Job listing](https://ahrefs.com/jobs/data-scientist-search) | -| [Airfold](https://www.airfold.co/) | API platform | Main Product | - | - | [Documentation](https://docs.airfold.co/workspace/pipes) | -| [Aiven](https://aiven.io/) | Cloud data platform | Managed Service | - | - | [Blog post](https://aiven.io/blog/introduction-to-clickhouse) | -| [Akamai](https://www.akamai.com/) | Software & Technology | CDN | — | — | [LinkedIn](https://www.linkedin.com/in/david-piatek-bb27368/) | -| [Akvorado](https://demo.akvorado.net/) | Network Monitoring | Main Product | — | — | [Documentation](https://demo.akvorado.net/docs/intro) | -| [Alauda](https://alauda.io) | Software & Technology | Analytics, Logs | — | — | [Alauda, November 2024](https://www.alauda.io) | -| [AlgoNode](https://algonode.io/) | Software & Technology | Algorand Hosting | — | — | [Twitter, April 2023](https://twitter.com/AlgoNode_io/status/1650594948998213632) | -| [Alibaba Cloud](https://cn.aliyun.com/) | Cloud | E-MapReduce | — | — | [Official Website](https://help.aliyun.com/document_detail/212195.html) | -| [Alibaba Cloud](https://cn.aliyun.com/) | Cloud | Managed Service | — | — | [Official Website](https://help.aliyun.com/product/144466.html) | -| [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://presentations.clickhouse.com/meetup22/aloha.pdf) | -| [Altinity](https://altinity.com/) | Cloud, SaaS | Main product | — | — | [Official Website](https://altinity.com/) | -| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| [AMP](https://useamp.com/) | Software & Technology | e-Commerce Metrics | — | — | [Twitter Post, May 2024](https://x.com/pc_barnes/status/1793846059724357832) [Meetup Slides](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-melbourne-2/Talk%20Track%201%20-%20AMP's%20data%20journey%20from%20OSS%20to%20ClickHouse%20Cloud%20-%20Chris%20Lawrence%20.pdf) | -| [Android Hub](https://bestforandroid.com/) | Blogging, Analytics, Advertising data | — | — | — | [Official Website](https://bestforandroid.com/) | -| [AnswerAI](https://www.answerai.co.uk/) | Software & Technology | AI Customer Support | — | — | [Twitter, May 2024](https://twitter.com/TomAnswerAi/status/1791062219678998880) | -| [Anton](https://anton.tools/) | Software & Technology | Blockchain Indexer | — | — | [GitHub](https://github.com/tonindexer/anton) | -| [Antrea](https://antrea.io/) | Software & Technology | Kubernetes Network Security | — | — | [Documentation](https://antrea.io/docs/main/docs/network-flow-visibility/) | -| [ApiRoad](https://apiroad.net/) | API marketplace | Analytics | — | — | [Blog post, November 2018, March 2020](https://pixeljets.com/blog/clickhouse-vs-elasticsearch/) | -| [Apitally](https://apitally.io/) | Software & Technology | API Monitoring | — | — | [Twitter, March 2024](https://twitter.com/simongurcke/status/1766005582971170926) | -| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| [Aptabase](https://aptabase.com/) | Analytics | Privacy-first / open-source Firebase Analytics alternative | — | — | [GitHub Repository](https://github.com/aptabase/aptabase/tree/main/etc/clickhouse) | -| [ArenaData](https://arenadata.tech/) | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| [Argedor](https://www.argedor.com/en/clickhouse/) | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) | -| [Atani](https://atani.com/en/) | Software & Technology | Crypto Platform | — | — | [CTO LinkedIn](https://www.linkedin.com/in/fbadiola/) | -| [Attentive](https://www.attentive.com/) | Email Marketing | Main product | — | — | [Blog Post](https://clickhouse.com/blog/confoundingly-fast-inside-attentives-migration-to-clickhouse) | -| [Astronomer](https://www.astronomer.io/) | Software & Technology | Observability | — | — | [Slide Deck](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-san-francisco/2024.12.09%20Clickhouse%20_%20Powering%20Astro%20Observe%20with%20Clickhouse.pdf) | -| [Autoblocks](https://autoblocks.ai) | Software & Technology | LLM Monitoring & Deployment | — | — | [Twitter, August 2023](https://twitter.com/nolte_adam/status/1690722237953794048) | -| [Aviso](https://www.aviso.com/) | AI Platform | Reporting | — | — | ClickHouse Cloud user | -| [Avito](https://avito.ru/) | Classifieds | Monitoring | — | — | [Meetup, April 2020](https://www.youtube.com/watch?v=n1tm4j4W8ZQ) | -| [Axis Communications](https://www.axis.com/en-ca) | Video surveillance | Main product | - | - | [Blog post](https://engineeringat.axis.com/schema-changes-clickhouse/) | -| [Azura](https://azura.xyz/) | Crypto |Analytics | — | — | [Meetup Video](https://youtu.be/S3uroekuYuQ)| -| [AzurePrice](https://azureprice.net/) | Analytics | Main Product | — | — | [Blog, November 2022](https://blog.devgenius.io/how-i-migrate-to-clickhouse-and-speedup-my-backend-7x-and-decrease-cost-by-6x-part-1-2553251a9059) | -| [AzurGames](https://azurgames.com/) | Gaming | Analytics | — | — | [AWS Blog, Aug 2024](https://aws.amazon.com/blogs/gametech/azur-games-migrates-all-game-analytics-data-to-clickhouse-cloud-on-aws/) | -| [B2Metric](https://b2metric.com/) | Marketing | Analytics | — | — | [ProductHunt, July 2023](https://www.producthunt.com/posts/b2metric-decision-intelligence?bc=1) | -| [BIGO](https://www.bigo.sg/) | Video | Computing Platform | — | — | [Blog Article, August 2020](https://www.programmersought.com/article/44544895251/) | -| [Badoo](https://badoo.com) | Dating | Time series | — | 1.6 mln events/sec (2018) | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/forecast.pdf) | -| [Baidu](https://www.baidu.com/) | Internet services | Data warehousing | - | - | [GitHub](https://github.com/ClickHouse/ClickHouse/pull/60361) | -| [Baselime](https://baselime.io/) | Software & Technology | Observability for Serverless | — | — | [Official website](https://baselime.io/) | -| [Basic RUM](https://www.basicrum.com/) | Software & Technology | Real User Monitoring | — | — | [Official website](https://www.basicrum.com/) | -| [Beehiiv](https://www.beehiiv.com/) | Marketing | Analytics | — | — | [Blog, Aug 2024](https://clickhouse.com/blog/data-hive-the-story-of-beehiivs-journey-from-postgres-to-clickhouse) | -| [Beeline](https://beeline.ru/) | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) | -| [Beekeeper](https://www.beekeeper.io/) | Workforce Enablement | Analytics | — | — | [Blog post, April 2024](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/299628922) | -| [Beetested](https://www.beetested.com/) | Software & Technology | Game Testing | — | — | [Case Study, June 2023](https://double.cloud/resources/case-studies/beetested-analyze-millions-of-gamers-emotions-with-doublecloud/) | -| [Benocs](https://www.benocs.com/) | Network Telemetry and Analytics | Main Product | — | — | [Meetup Video, December 2022](https://www.youtube.com/watch?v=48pAVShkeCY&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=12) [Slides, December 2022](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup66/Self%20repairing%20processing%20using%20ClickHouse.pdf) [Blog Post, March 2022](https://clickhouse.com/blog/-indexing-for-data-streams-benocs-telco/) [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| [Bento](https://bento.me/en/home) | Software & Technology | Personal Portfolio | — | — | [Twitter, May 2023](https://twitter.com/gubmee/status/1653405962542219264) | -| [Better Stack](https://betterstack.com/) | Cloud, SaaS | Log Management | - | - | [Official Website](https://betterstack.com/logtail) | -| [BiliBili](https://www.bilibili.com/) | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) | -| [Binom](https://binom.org/) | Analytics | Website analytics | — | — | [Twitter, 2023](https://twitter.com/BinomTracker/status/1722948130948206940) | -| [Bitquery](https://bitquery.io/) | Software & Technology | Blockchain Data Company | — | — | [Hacker News, December 2020](https://bitquery.io/blog/blockchain-intelligence-system) | -| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | — | — | [Meetup Video, December 2022](https://www.youtube.com/watch?v=HmJTIrGyVls&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=9) [Slides, December 2022](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup67/ClickHouse%20for%20Financial%20Analytics%20-%20Bloomberg.pdf) | -| [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| [Bonree](https://www.bonree.com/) | Software & Technology | Performance Monitoring & Observability | — | — | ClickHouse Meetup in Hangzhou, May 2024 | -| [Bonside](https://www.bonside.com/) | FinTech | - | — | — | [Hacker News, July 2023](https://news.ycombinator.com/item?id=36619722) | -| [BoundaryML](https://www.boundaryml.com/) | Software Development | AI Platform | — | — | [Meetup, March 2025](https://youtu.be/DV-zkQUvuPc) | -| [Botify](https://www.botify.com/) | SaaS | SEO | — | — | [Blog Article, September 2022](https://tech.marksblogg.com/billion-taxi-rides-doublecloud-clickhouse.html) | -| [Braintrust](https://www.usebraintrust.com/) | Software & Technology | Real-time Analytics | — | — | [Written Blog from Meetup Video, July 2024](https://clickhouse.com/blog/building-better-ai-products-faster-how-braintrust-uses-clickhouse-for-real-time-data-analysis) -| [Braze](https://www.braze.com/) | Software & Technology | Real-time Analytics | — | — | [Meetup Video](https://youtu.be/NmEyElaa_xI) -| [Buildkite](https://buildkite.com/) | Software & Technology | Real-time analytics | — | — | [Wellington meetup, February 2025](https://clickhouse.com/videos/wellington-meetup-buildkite-clickhouse-test-analytics) | -| [ByConity](https://byconity.github.io/) | Software & Technology | Big Data Analysis Engine | — | — | [GitHub](https://github.com/ByConity/ByConity) | -| [Bytedance](https://www.bytedance.com) | Social platforms | — | — | — | [The ClickHouse Meetup East, October 2020](https://www.youtube.com/watch?v=ckChUkC3Pns) | -| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [CHEQ](https://cheq.ai/) | Software & Technology | GTM Security | — | — | [Meetup Video, January 2023](https://www.youtube.com/watch?v=rxIO6w4er3k&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=7) [Slides, January 2023](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup68/ClickHouse%20Meetup%20-%20CHEQ.pptx.pdf) | -| [Campaign Deputy](https://campaigndeputy.com/) | SaaS | Analytics, Logs | — | — | [Twitter, February 2023](https://twitter.com/joshabartley/status/1627669208074014721), [Tweet, July 2023](https://twitter.com/joshabartley/status/1677008728711651331) | -| [Canopus Networks](https://canopusnetworks.com/) | AI for Telecom | Real-time analytics | - | - | [Meetup Presentation](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-sydney/Talk%20Track%201%20-%20Canopus%20Networks.pdf) | -| [Capgo.app](https://capgo.app/) | App development | Real-time statistics | - | - | [Twitter](https://twitter.com/martindonadieu/status/1735728943406219736) | -| [CardsMobile](https://cardsmobile.ru/) | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) | -| [Castle](https://castle.io/) | Fraud Detection | Main product | — | — | [Community Slack](https://clickhouse.com/slack) | -| [Cato Networks](https://www.catonetworks.com/) | Network Security | Security event analytics | — | 8B (4TB) new events per day | [Full Stack Developers Israel, Jan 2023](https://www.youtube.com/watch?v=Is4TC2gf5EM) | -| [CDN77](https://www.cdn77.com/) | Software & Technology | Content Delivery Network | — | — | [GitHub Comment, April 2024](https://github.com/ClickHouse/ClickHouse/issues/61093#issuecomment-2070150654) | -| [Chainbase](https://chainbase.online/) | Blockchain | Main product | — | — | [Documentation](https://docs.chainbase.online/r/data-cloud-studio/data-cloud-api) -| [ChartMetric](https://chartmetric.com/) | Music Industry | Analytics | — | — | [Meetup Video](https://youtu.be/gd1yWbnaalk) | -| [ChatLayer](https://chatlayer.ai/) | AI virtual assistants | Analytics | — | — | [Press Release, December 2021](https://aiven.io/blog/aiven-for-clickhouse-now-generally-available) | -| [Checkly](https://www.checklyhq.com/) | Software Development | Analytics | — | — | [Twitter, October 2021](https://twitter.com/tim_nolet/status/1445810665743081474?s=20) | -| [ChelPipe Group](https://chelpipegroup.com/) | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) | -| [Chroma](https://www.trychroma.com/) | Software & Technology | AI-native embedded database | — | — | [GitHub Repository](https://github.com/chroma-core/chroma) [Twitter, February 2023](https://twitter.com/atroyn/status/1625605732644298752) | -| [CipherStash](https://cipherstash.com/) | Software & Technology | Analytics | — | — | [Meetup Presentation](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-sydney/Talk%20Track%203%20-%20CipherStash.pdf) | -| [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | -| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Citymobil](https://city-mobil.ru) | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| [Clearbit](https://clearbit.com/) | AI | Product usage | — | — | ClickHouse Cloud user | -| [ClickFunnels](https://www.clickfunnels.com/) | Website Builder | | — | — | ClickHouse Cloud user | -| [ClickVisual](https://clickvisual.net/) | Software | Logging Platform | — | — | [Blog Post, May 2022](https://golangexample.com/a-light-weight-log-visual-analytic-platform-for-clickhouse/) | -| [Clog](https://www.hybridlogic.co.uk/) | Software & Technology | Logging | — | — | [Blog, February 2023](https://www.hybridlogic.co.uk/2023/02/clog/) | -| [Cloud Circus, Inc.](https://cloudcircus.jp/) | Software & Technology | Logging | — | — | [Tokyo Meetup, January 2025](https://clickhouse.com/videos/tokyo-meetup-cloudcircus-accelerating-cloudfront-log-analysis) | -| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| [CloudRaft](https://www.cloudraft.io/) | Software & Technology | Consulting Services | — | — | [Twitter, May 2024](https://x.com/anjuls/status/1792048331805606156) | -| [Codegiant](https://codegiant.io/) | Security | Main product | — | — | [Blog, December 2023](https://blog.codegiant.io/clickhouse-in-codegiant-observability-ecosystem/) | -| [Cognitiv](https://cognitiv.ai/) | AdTech | Offline Feature Store | — | — | [Blog, Aug 2024](https://clickhouse.com/blog/transforming-ad-tech-how-cognitiv-uses-clickhouse-to-build-better-machine-learning-models) | -| [Coinhall](https://coinhall.org/) | Web3 | Blockchain Data Platform | — | — | [Blog, Aug 2024](https://clickhouse.com/blog/trade-secrets-how-coinhall-uses-clickhouse-to-power-its-blockchain-data-platform) | -| [Coinpaprika](https://coinpaprika.com/) | Software & Technology | Cryptocurrency Market Data Analysis | — | — | [Blog, May 2023](https://clickhouse.com/blog/coinpaprika-aggregates-pricing-data) | -| [Comcast](https://corporate.comcast.com/) | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) | -| [Common Room](https://www.commonroom.io/) | Marketing SaaS | Real-Time Analytics | — | — | [Seattle Meetup, March 2024](https://www.youtube.com/watch?v=liTgGiTuhJE) | -| [Constructor](https://constructor.io/) | E-commerce Search | E-commerce Search | — | — | ClickHouse Cloud user | [Constant Contact](https://www.constantcontact.com/) | Marketing Saas | Real-Time Analytics | — | — | [Meetup Video](https://youtu.be/6SeEurehp10) | -| [Contentsquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Meetup Video, January 2023](https://www.youtube.com/watch?v=zvuCBAl2T0Q&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=5) [Blog Post, October 2022](https://clickhouse.com/blog/contentsquare-migration-from-elasticsearch-to-clickhouse) [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| [Coroot](https://coroot.com/) | Software & Technology | Observability | — | — | [Twitter, July 2023](https://twitter.com/coroot_com/status/1680993372385804288?s=20) | -| [Corsearch](https://corsearch.com/) | Marketing SaaS (Brand Protection) | Main Datastore | — | — | [Seattle Meetup, March 2023](https://www.youtube.com/watch?v=BuS8jFL9cvw&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=10) | -| [Corunet](https://coru.net/) | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| [Covalent](https://www.covalenthq.com/) | Financial - Crypto | Blockchain analysis | — | — | ClickHouse Cloud user | -| [CraiditX 氪信](https://www.creditx.com) | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| [Craigslist](https://sfbay.craigslist.org/) | Classifieds | Rate limiting (Redis replacement) | — | — | [SF Meetup, March 2024](https://www.youtube.com/watch?v=wRwqrbUjRe4&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=9) | -| [Crazypanda](https://crazypanda.ru/en/) | Games | | — | — | Live session on ClickHouse meetup | -| [Criteo](https://www.criteo.com/) | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| [Cryptology](https://cryptology.com/) | Digital Assets Trading Platform | — | — | — | [Job advertisement, March 2021](https://career.habr.com/companies/cryptology/vacancies) | -| [Culver Max Entertainment/Sony Pictures](https://www.sonypicturesnetworks.com/overview) | Television/Entertainment | Media streaming analytics | — | — | ClickHouse Cloud user | -| [Cumul.io](https://www.cumul.io) | Software & Technology | Customer Analytics | — | — | [Blog Post, June 2022](https://clickhouse.com/blog/optimizing-your-customer-facing-analytics-experience-with-cumul-io-and-clickhouse) | -| [DB Pilot](https://www.dbpilot.io/) | Software & Technology | Database GUI | — | — | [Twitter, August 2023](https://twitter.com/dennis_hellweg/status/1701349566354686143) | -| [DENIC](https://www.denic.de/) | Software & Technology | Data Science Analytics | — | — | [Blog Post, May 2022](https://clickhouse.com/blog/denic-improves-query-times-by-10x-with-clickhouse) | -| [DNSMonster](https://dnsmonster.dev/) | Software & Technology | DNS Monitoring | — | — | [GitHub Repository](https://github.com/mosajjal/dnsmonster) | -| [Darwinium](https://www.darwinium.com/) | Software & Technology | Security and Fraud Analytics | — | — | [Blog Post, July 2022](https://clickhouse.com/blog/fast-feature-rich-and-mutable-clickhouse-powers-darwiniums-security-and-fraud-analytics-use-cases) | -| [Dash0](https://www.dash0.com/) | APM Platform | Main product | — | — | [Careers page](https://careers.dash0.com/senior-product-engineer-backend/en) | -| [Dashdive](https://www.dashdive.com/) | Infrastructure management | Analytics | — | — | [Hacker News, 2024](https://news.ycombinator.com/item?id=39178753) | -| [Dassana](https://lake.dassana.io/) | Cloud data platform | Main product | - | - | [Blog Post, Jan 2023](https://clickhouse.com/blog/clickhouse-powers-dassanas-security-data-lake) [Direct reference, April 2022](https://news.ycombinator.com/item?id=31111432) | -| [Datafold](https://www.datafold.com/) | Data Reliability Platform | — | — | — | [Job advertisement, April 2022](https://www.datafold.com/careers) | -| [Dataliance for China Telecom](https://www.chinatelecomglobal.com/) | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [DeepFlow](https://deepflow.io) | Software & Technology | Observability | — | — | [GitHub](https://github.com/deepflowio/deepflow) | -| [DeepL](https://www.deepl.com/) | Machine Learning | — | — | — | [Blog Post, July 2022](https://clickhouse.com/blog/deepls-journey-with-clickhouse) [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) | -| [Deepglint 格灵深瞳](https://www.deepglint.com/) | AI, Computer Vision | OLAP | — | — | [Official Website](https://www.deepglint.com/) | -| [Deeplay](https://deeplay.io/eng/) | Gaming Analytics | — | — | — | [Job advertisement, 2020](https://career.habr.com/vacancies/1000062568) | -| [Depot](https://depot.dev/) | Software & Technology | CI & Build Acceleration | — | — | [Twitter, April 2024](https://twitter.com/jacobwgillespie/status/1778463642150695048) | -| [Deutsche Bank](https://db.com) | Finance | BI Analytics | — | — | [Meetup Video, December 2022](https://www.youtube.com/watch?v=O3GJ6jag3Hc&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=11) [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| [DevHubStack](http://devhubstack.com/) | Software & Technology | Community Management | — | — | [Twitter, May 2024](https://twitter.com/thedevhubstack/status/1790655455229771789) | -| [DeWu Poizon](https://www.dewu.com/) | E-commerce | Real-Time Analytics | — | — | [Blog, March 2025](https://clickhouse.com/blog/observing-in-style-how-poizon-rebuilt-its-data-platform-with-clickhouse-enterprise-edition) | -| [Didi](https://web.didiglobal.com/) | Transportation & Ride Sharing | Observability | 400+ logging, 40 tracing | PBs/day / 40GB/s write throughput, 15M queries/day, 200 QPS peak | [Blog, Apr 2024](https://clickhouse.com/blog/didi-migrates-from-elasticsearch-to-clickHouse-for-a-new-generation-log-storage-system) | -| [DigiCert](https://www.digicert.com) | Network Security | DNS Platform | — | over 35 billion events per day | [Job posting, Aug 2022](https://www.indeed.com/viewjob?t=Senior+Principal+Software+Engineer+Architect&c=DigiCert&l=Lehi,+UT&jk=403c35f96c46cf37&rtk=1g9mnof7qk7dv800) | -| [Disney+](https://www.disneyplus.com/) | Video Streaming | Analytics | — | 395 TiB | [Meetup Video, December 2022](https://www.youtube.com/watch?v=CVVp6N8Xeoc&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=8) [Slides, December 2022](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup67/Disney%20plus%20ClickHouse.pdf) | -| [Dittofeed](https://dittofeed.com/) | Software & Technology | Open Source Customer Engagement | — | — | [Hacker News, June 2023](https://news.ycombinator.com/item?id=36061344) | -| [Diva-e](https://www.diva-e.com) | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| [Dolphin Emulator](https://dolphin-emu.org/) | Games | Analytics | — | — | [Twitter, September 2022](https://twitter.com/delroth_/status/1567300096160665601) | -| [DoorDash](https://www.doordash.com/home) | E-commerce | Monitoring | — | — | [Meetup, December 2024](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-san-francisco/Clickhouse%20Meetup%20Slides%20(1).pdf) | -| [Dopple.io](https://dolphin-emu.org/) | E-commerce | 3D Analytics | — | — | [Meetup, September 2024](https://docs.google.com/presentation/d/1_i7H1EIfEttPKtP9CCAB_4Ajs_Li4N6S/edit#slide=id.p4) | -| [DotSentry](https://forum.polkadot.network/t/dotsentry-ecosystem-wide-monitoring-solution/8210) | Software & Technology | Monitoring for Polkadot Ecosystem | — | — | [Forum Post, May 2024](https://forum.polkadot.network/t/dotsentry-ecosystem-wide-monitoring-solution/8210) | -| [DrDroid](https://www.drdroid.io/) | Software & Technology | Monitoring | — | — | [Slack, August 2023](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1694151014185729) | -| [Duckbill Group](https://www.duckbillgroup.com/) | Software & Technology | — | — | — | [Twitter, May 2024](https://twitter.com/mike_julian/status/1789737184192315876) | -| [eBay](https://www.ebay.com/) | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) | -| [Ecommpay](https://ecommpay.com/) | Payment Processing | Logs | — | — | [Video, Nov 2019](https://www.youtube.com/watch?v=d3GdZTOWGLk) | -| [Ecwid](https://www.ecwid.com/) | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) | -| [Effodio](https://www.effodio.com/) | Observability, Root cause analysis | Event storage | - | - | [Blog, 2024](https://peng.fyi/post/factorial-growth-of-clickhouse-with-clause/) | -| [Egg](https://github.com/ducc/egg) | Error Aggregation | Main Product | — | — | [GitHub repository](https://github.com/ducc/egg) | -| [Electrum](https://www.electrum.id/) | Technology | Real-time Analytics | — | — | [Meetup Blog, October 2024](https://clickhouse.com/videos/driving-jakarta-electric-motorcycle-transformation-with-clickhouse) | -| [Engage](https://engage.so/) | Software & Technology | Customer Engagement | — | — | [Twitter Post, May 2024](https://x.com/kehers/status/1793935987778724038) | -| [Embrace](https://embrace.io/) | Observability | Logs | — | — | [Blog post, June 2024](https://embrace.io/blog/solving-large-logs-with-clickhouse/) | -| [Ensemble](https://ensembleanalytics.io/) | Analytics | Analytics | — | — | [Official website, Sep 2020](https://ensembleanalytics.io/blog/why-we-went-all-in-clickhouse) | -| [EventBunker.io](https://www.eventbunker.io/) | Serverless Data Processing | — | — | — | [Twitter, April 2021](https://twitter.com/Halil_D_/status/1379839133472985091) | -| [ExitLag](http://www.exitlag.com/) | Software & Technology | Gaming Data Routing | — | — | [Blog, June 2023](https://clickhouse.com/blog/boosting-game-performance-exitlag-quest-for-a-better-data-management-system) | -| [ExitLag](https://www.exitlag.com/) | Software & Technology | Optimized Gaming Experience | — | — | [ClickHouse Blog, June 2023](https://clickhouse.com/blog/boosting-game-performance-exitlag-quest-for-a-better-data-management-system) | -| [Exness](https://www.exness.com/) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Explo](https://www.explo.co/) | Analytics | - | — | — | [Meetup Video](https://youtu.be/FZyPvKpFiDk) | -| [Fastly](https://www.fastly.com/) | Internet Services | Metrics (Graphite replacement) | — | — | [Boston Meetup, Dec 2023](https://clickhouse.com/videos/scaling-graphite-with-clickhouse) | -| [FastNetMon](https://fastnetmon.com/) | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) | -| [Fastnear](https://fastnear.com/) | Infrastructure | Main product | — | — | [Twitter, 2024](https://twitter.com/ekuzyakov/status/1762500731154698421) | -| [FeatBit](https://www.featbit.co/) | Software & Technology | Feature Flag Management | — | — | [GitHub, August 2023](https://github.com/featbit/featbit) | -| [FinBox](https://finbox.in/) | Software & Technology | Financial Services | — | — | [Slack](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1688198501884219) | -| [Fingerprint](https://fingerprint.com/) | Fraud detection | Fraud detection | — | — | [Meetup](https://www.linkedin.com/posts/system29a_clickhouse-meetup-in-berlin-tue-may-16-activity-7063805876570050561-UE-n/) | -| [Firebolt](https://www.firebolt.io/) | Analytics | Main product | - | - | [VLDB 2022 paper](https://www.firebolt.io/content/firebolt-vldb-cdms-2022), [VLDB 2022 slides](https://cdmsworkshop.github.io/2022/Slides/Fri_C2.5_MoshaPasumansky.pdf) | -| [Fl0](https://www.fl0.com/) | Cloud | Server management | - | - | [Meetup presentation](https://presentations.clickhouse.com/?path=meetup94) | -| [Flipkart](https://www.flipkart.com/) | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) | -| [Flipt](https://www.flipt.io/) | Software | Software development management | - | - | [Blog, 2024](https://www.flipt.io/blog/analytics-with-clickhouse) | -| [Flock Safety](https://www.flocksafety.com/) | Crime Surveillance | Real Time Traffic Analytics | - | - | [Meetup,December 2024](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-new-york/flock-safety-clickhouse-presentation.pdf) | -| [FortiSIEM](https://www.fortinet.com/) | Information Security | Supervisor and Worker | — | — | [Documentation](https://help.fortinet.com/fsiem/6-6-0/Online-Help/HTML5_Help/clickhouse_config.htm) | -| [Fortis Games](https://fortisgames.com/) | Game studio | Online data analytics | - | — | [Blog post, July 2023](https://thenewstack.io/a-real-time-data-platform-for-player-driven-game-experiences/) | -| [Foxway](https://www.foxway.com/en/) | Software & Technology | e-Commerce | — | — | [ClickHouse Meetup, April 2024](https://twitter.com/ClickHouseDB/status/1782833838886121492) | -| [Friendly Captcha](https://friendlycaptcha.com) | Bot Protection | — | — | — | [Job Posting, Aug 2022](https://news.ycombinator.com/item?id=32311825) | -| [FunCorp](https://fun.co/rp) | Games | | — | 14 bn records/day as of Jan 2021 | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | -| [Futurra Group](https://futurragroup.com/) | Analytics | — | — | — | [Article in Russian, December 2021](https://dou.ua/forums/topic/35587/) | -| [G-Core Labs](https://gcorelabs.com/) | Security | Main product | — | — | [Job posting, May 2022](https://careers.gcorelabs.com/jobs/Careers) | -| [Galaxy-Future](https://www.galaxy-future.com/en/home) | Software & Technology | Metric Monitoring & Measurement | — | — | [CudgX GitHub Repository](https://github.com/galaxy-future/cudgx) | -| [Geniee](https://geniee.co.jp) | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| [Genotek](https://www.genotek.ru/) | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) | -| [Gigapipe](https://gigapipe.com/) | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) | -| [Gigasheet](https://gigasheet.co/) | Analytics | Main product | — | — | Direct Reference, February 2022 | -| [GitLab](https://gitlab.com/) | Code and DevOps | APM | — | — | [Official website](https://gitlab.com/gitlab-org/incubation-engineering/apm/apm) | -| [Glaber](https://glaber.io/) | Monitoring | Main product | — | — | [Website](https://glaber.io/) | -| [Glenrose Group](https://www.glenrosegroup.com/) | Expense management | — | — | — | [Twitter](https://twitter.com/EncodedRose/status/1706145758897180783) | -| [Gluten](https://github.com/oap-project/gluten) | Software & Technology | Spark performance | — | — | [Github, July 2023](https://github.com/oap-project/gluten) | -| [Goldsky](https://goldsky.com/) | Software & Technology | Blockchain data analytics | - | — | [Documentation, July 2023](https://docs.goldsky.com/) | -| [Good Job Games](https://goodjobgames.com/) | Games | Event Processing | — | — | [Job Posting, Aug 2022](https://news.ycombinator.com/item?id=32313170) | -| [Goodpeople](https://twitter.com/_suzychoi/status/1702113350258180245) | Human Resources | OLAP | — | — | [Twitter, 2023](https://twitter.com/_suzychoi/status/1702113350258180245) | -| [Gorgias](https://www.gorgias.com/) | Software & Technology | eCommerce Helpdesk Analytics | — | — | [ClickHouse Slack, April 2023](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1682502827729909) | -| [Grafbase](https://grafbase.com/) | Software & Technology | GraphQL API Management | — | — | [Blog, June 2023](https://grafbase.com/blog/how-to-build-your-own-realtime-analytics-dashboards) | -| [GraphCDN](https://graphcdn.io/) | CDN | Traffic Analytics | — | — | [Blog Post in English, August 2021](https://altinity.com/blog/delivering-insight-on-graphql-apis-with-clickhouse-at-graphcdn/) | -| [GraphJSON](https://www.graphjson.com) | Cloud analytics platform | Main product | - | - | [Official Website, November 2021](https://www.graphjson.com/guides/about) | -| [GraphQL Hive](https://graphql-hive.com/) | Software Development | Traffic analysis | — | — | [Source code](https://github.com/kamilkisiela/graphql-hive) | -| [Groundcover](https://groundcover.com/) | Observability | Kubernetes Observability | - | — | [Documentation, July 2023](https://docs.groundcover.com/docs/learn-more/architecture) | -| [Grouparoo](https://www.grouparoo.com) | Data Warehouse Integrations | Main product | — | — | [Official Website, August 2021](https://www.grouparoo.com/integrations) | -| [Growthbook](https://www.growthbook.io) | Open Source Feature Flagging | Integration | — | — | [Meetup Video](https://youtu.be/pVNxXfVB2cE) | -| [Gumlet](https://www.gumlet.com/) | CDN | Analytics | — | — | [Meetup Presentation](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-bangalore-2/Talk%20Track%202%20-%20Gumlet.pdf) | -| [Harvey](https://www.harvey.ai/) | AI for legal | Network analytics | — | — | [San Francisco Meetup, September 2024](https://clickhouse.com/videos/effective-network-threat-detection) | -| [HUYA](https://www.huya.com/) | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Haibo 海博科技](https://www.botech.com.cn/) | Big Data | OLAP | — | — | [Personal reference](https://github.com/ClickHouse/clickhouse-docs/pull/279) | -| [Helicone](https://helicone.ai) | Software & Technology | LLM monitoring | — | — | [Meetup, August 2023](https://clickhouse.com/blog/helicones-migration-from-postgres-to-clickhouse-for-advanced-llm-monitoring) | -| [Hewlett-Packard](https://www.hp.com) | Software & Technology | - | — | — | [LinkedIn post, November 2023](https://www.indeed.com/viewjob?t=Machine+Learning+Engineer&c=Hewlett-Packard+CDS+GmbH&l=Houston,+TX&jk=109385f349350746&rtk=1hg3128s9kkf6800) | -| [Hi-Fi](https://hi.fi/) | Software & Technology | Music Industry Analytics | — | — | [Blog Post, January 2023](https://clickhouse.com/blog/hifis-migration-from-bigquery-to-clickhouse) | -| [Highlight](https://www.highlight.io/) | Software & Technology | Monitoring | — | — | [Hacker News, February 2023](https://news.ycombinator.com/item?id=34897645), [GitHub](https://github.com/highlight/highlight/tree/87f7e3882b88e9019d690847a134231e943890fe/backend/clickhouse) | -| [HockeyStack](https://hockeystack.com/) | Analytics platform | OLAP | — | — | [Blog](https://hockeystack.com/blog/a-new-database/) | -| [Honeybadger](https://www.honeybadger.io/) | Software | Error tracking | - | - | [Mastadon 2024](https://hachyderm.io/@wood/111904268945097226) | -| [Hookdeck](https://hookdeck.com/) | Software & Technology | Webhook | — | — | [Twitter, June 2023](https://twitter.com/mkherlakian/status/1666214460824997889) | -| [Hopsteiner](https://www.hopsteiner.com/) | Agriculture | - | — | — | [Job post, July 2023](https://www.indeed.com/viewjob?t=Systems+Administrator&c=S+S+STEINER&l=Yakima,+WA&jk=5b9b7336de0577d5&rtk=1h45ruu32j30q800&from=rss) | -| [Horizon](https://horizon.io/) | Software & Technology | Gaming Analytics | — | — | [Twitter, July 2023](https://twitter.com/peterk/status/1677099027110805504) | -| [Huawei](https://www.huaweicloud.com/intl/en-us/) | Software & Technology | Cloud data platform | — | — | [Documentation](https://doc.hcs.huawei.com/usermanual/mrs/mrs_01_2344.html) -| [Hubalz](https://hubalz.com) | Web analytics | Main product | — | — | [Twitter, July 2023](https://twitter.com/Derinilkcan/status/1676197439152312321) | -| [Huntress](https://www.huntress.com) | Security analytics | Main product | — | — | [Blog Post, November 2024](https://clickhouse.com/blog/how-huntress-improved-performance-and-slashed-costs-with-clickHouse) | -| [Hydrolix](https://www.hydrolix.io/) | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) | -| [HyperDx](https://www.hyperdx.io/) | Software & Technology | Open Telemetry | — | — | [HackerNews, May 2023](https://news.ycombinator.com/item?id=35881942) | -| [Hystax](https://hystax.com) | Cloud Operations | Observability Analytics | - | - | [Blog](https://hystax.com/clickhouse-for-real-time-cost-saving-analytics-how-to-stop-hammering-screws-and-use-an-electric-screwdriver/) | -| [IBM](https://www.ibm.com) | APM Platform | Ex-Instana | — | — | See Instana | -| [IBM QRadar](https://www.ibm.com) | IBM QRadar Log Insights | Main Product | — | — | [IBM Blog](https://www.ibm.com/blog/closing-breach-window-from-data-to-action/) | -| [ICA](https://www.the-ica.com/) | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) | -| [Idealista](https://www.idealista.com) | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.com/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Idea Clan](https://ideaclan.com/) | Digital Marketing | Real-Time Analytics | — | — | [Gurgaon Meetup talk, March 2025](https://clickhouse.com/videos/gurgaon-meetup-fabfunnel-and-clickhouse-delivering-real-time-marketing-analytics) | -| [Improvado](https://improvado.io/) | Revenue Operations | Data Stack | — | — | [Blog Post, December 2021](https://improvado.io/blog/clickhouse-warehousing-pricing) | -| [INCREFF](https://www.increff.com/) | Retail Technology | Business Intelligence | — | — | [Meetup Presentation](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-bangalore-2/Talk%20Track%203%20-%20Scaling%20BI%20at%20Increff%20with%20Clickhouse.pdf) | -| [Inigo](https://inigo.io/) | Software & Technology | GraphQL Gateway | — | — | [Blog, March 2023](https://inigo.io/blog/materialized_views_and_clickhouse) [Blog, June 2023](https://clickhouse.com/blog/harnessing-the-power-of-materialized-views-and-clickhouse-for-high-performance-analytics-at-inigo) | -| [Infobaleen](https://infobaleen.com) | AI markting tool | Analytics | — | — | [Official site](https://infobaleen.com) | -| [Infovista](https://www.infovista.com/) | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| [Inngest](https://www.inngest.com/) | Software & Technology | Serverless queues and jobs | — | — | [TechCrunch, July 2023](https://techcrunch.com/2023/07/12/inngest-helps-developers-build-their-backend-workflows-raises-3m/) | -| [InnoGames](https://www.innogames.com) | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| [Instabug](https://instabug.com/) | APM Platform | Main product | — | — | [Blog Post, May 2022](https://clickhouse.com/blog/10x-improved-response-times-cheaper-to-operate-and-30-storage-reduction-why-instabug-chose-clickhouse-for-apm) | -| [Instacart](https://instacart.com/) | Delivery | Data Engineering and Infrastructure | — | — | [Blog Post, May 2022](https://www.instacart.com/company/how-its-made/data-engineering-and-infrastructure-at-instacart-with-engineering-manager-abhi-kalakuntla/) | -| [Instana](https://www.instana.com) | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) | -| [Integros](https://integros.com) | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [inwt](https://www.inwt-statistics.com/) | Software & Technology | Data Science | — | — | [Blog Post, December 2023](https://www.inwt-statistics.com/blog/business_case_air_pollution_forecast) | -| [Ippon Technologies](https://ippon.tech) | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) | -| [Ivi](https://www.ivi.ru/) | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) | -| [Jerry](https://getjerry.com/) | Automotive SaaS | Analytics (Migrate from Redshift) | — | — | [Blog, May 2024](https://juicefs.com/en/blog/user-stories/read-write-separation) | -| [Jinshuju 金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -| [Jitsu](https://jitsu.com) | Cloud Software | Data Pipeline | — | — | [Documentation](https://jitsu.com/docs/destinations-configuration/clickhouse-destination), [Hacker News post](https://news.ycombinator.com/item?id=29106082) | -| [JuiceFS](https://juicefs.com/) | Storage | Shopping Cart | - | - | [Blog](https://juicefs.com/blog/en/posts/shopee-clickhouse-with-juicefs/) | -| [Jump Capital](https://jumpcap.com/) | Fintech | Investor | - | - | [Chicago meetup, September 2024](https://clickhouse.com/videos/fireside-chat-with-alexey-saurabh) | -| [Jump Trading](https://www.jumptrading.com/) | Financial | Analytics | - | - | [Chicago meetup, September 2024](https://clickhouse.com/videos/clikchouse-demo) | -| [June](https://www.june.so/) | Product analytics | Main product | - | - | [Job post](https://www.ycombinator.com/companies/june/jobs/SHd7fFLYG-founding-engineer) | -| [Juspay](https://juspay.in/) | Software & Technology | Payments | — | — | [Blog, March 2023](https://clickhouse.com/blog/juspay-analyzes-payment-transactions-in-real-time-with-clickhouse) | -| [KGK Global](https://www.kgk-global.com/en/) | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) | -| [KMK Online](https://www.kmkonline.co.id/) | Digital Services | Streaming analytics | — | — | ClickHouse Cloud user | -| [Kaiko](https://www.kaiko.com/) | Digital Assets Data Provider | — | — | — | [Job advertisement, April 2022](https://kaiko.talentlyft.com/) | -| [Kakaocorp](https://www.kakaocorp.com/) | Internet company | — | — | — | [if(kakao)2020](https://tv.kakao.com/channel/3693125/cliplink/414129353), [if(kakao)2021](https://if.kakao.com/session/24) | -| [Kami](https://www.kamiapp.com/) | Education, Software & Technology | Real-time Analytics | — | — | [Auckland Meetup, CTO talk, February 2025](https://clickhouse.com/videos/auckland-meetup-kami-ingesting-clickstream-data-into-clickhouse), [Auckland Meetup, Head of Data talk, Feburary 2025](https://clickhouse.com/videos/auckland-meetup-kami-evolution-of-kami-data-infrastructure) | -| [Klaviyo](https://www.klaviyo.com/) | E-Commerce Marketing Automation Platform | — | 128 nodes | — | [Klaviyo Engineering Blog, Jan 2023](https://klaviyo.tech/adaptive-concurrency-control-for-mixed-analytical-workloads-51350439aeec) , [Klaviyo Engineering Blog, July 2023](https://klaviyo.tech/taking-the-first-sip-an-overview-of-klaviyos-segmentation-improvement-project-7db997f36b39), [video](https://youtu.be/8Sk5iO9HGRY) | -| [Knock.app](https://knock.app/) | Software | Notifications management | - | - | [Twitter, 2024](https://twitter.com/cjbell_/status/1759989849577181356) | -| [Kodiak Data](https://www.kodiakdata.com/) | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| [Kontur](https://kontur.ru) | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [Kopo Kopo](https://kopokopo.co.ke/) | FinTech | Metrics | — | — | ClickHouse Cloud user | -| [Kuaishou](https://www.kuaishou.com/) | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) | -| [Kujiale 酷家乐](https://www.kujiale.com/) | VR smart interior design platform. | Use in log monitoring platform. | Main cluster is 800+ CPU cores, 4000+ GB RAM. | SSD 140+ TB, HDD 280+ TB. | [Blog, July 2023](https://juejin.cn/post/7251786922615111740/) | -| [Kyligence](https://kyligence.io/) | Managed Service | Main Product | — | — | [Website](https://kyligence.io/all-inclusive-olap/) | -| [LANCOM Systems](https://www.lancom-systems.com/) | Network Solutions | Traffic analysis | - | - | [ClickHouse Operator for Kubernetes](https://www.lancom-systems.com/), [Hacker News post](https://news.ycombinator.com/item?id=29413660) | -| [Langchain](https://www.langchain.com/) | Software & Technology | LLM Monitoring | - | - | [Blog, Apr 2024](https://clickhouse.com/blog/langchain-why-we-choose-clickhouse-to-power-langchain) | -| [LangDB](https://langdb.ai/) | Software & Technology | AI Gateway | - | - | [Singapore Meetup talk, February 2025](https://clickhouse.com/videos/singapore-meetup-langdb-building-intelligent-applications-with-clickhouse) | -| [LangFuse](https://langfuse.com/) | Software & Technology | LLM Monitoring | - | - | [Meetup, March 2025](https://youtu.be/AnghkoucpN0) | -| [Langtrace AI](https://www.langtrace.ai/) | Software & Technology | LLM Monitoring | - | - | [Twitter, May 2024](https://x.com/karthikkalyan90/status/1790483625743716703) | -| [Lago](https://www.getlago.com/) | Billing automation | - | — | — | [GitHub Wiki post](https://github.com/getlago/lago/wiki/How-ClickHouse-saved-our-events-engine-problem) | -| [Lagon](https://lagon.app/) | Software Development | Serverless Functions | — | — | [Twitter, 2023](https://twitter.com/tomlienard/status/1702759256909394010) | -| [Last9](https://last9.io/) | Software & Technology | Observability | — | — | [Mumbai Meetup, February 2025](https://clickhouse.com/videos/the-telemetry-data-platform-breaking-down-operational-silos) , [Bangalore Meetup, February 2025](https://clickhouse.com/videos/less-war-more-room-last9) , [Blog, April 2025](https://clickhouse.com/blog/last9-clickhouse-delivering-seamless-observability-minus-the-chaos) | -| [Laudspeaker](https://laudspeaker.com/) | Software & Technology | Open Source Messaging | — | — | [GitHub](https://github.com/laudspeaker/laudspeaker) | -| [Lawrence Berkeley National Laboratory](https://www.lbl.gov) | Research | Traffic analysis | 5 servers | 55 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) | -| [Lever](https://www.lever.co/) | Talent Management | Recruiting | - | - | [Hacker News post](https://news.ycombinator.com/item?id=29558544) | -| [LifeStreet](https://lifestreet.com/) | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | -| [LimeChat](https://www.limechat.ai/) | Mobile chat | Whatsapp Commerce | - | - | [LinkedIn 2024](https://www.linkedin.com/pulse/scaling-analytics-clickhouse-story-nikhil-gupta-gezcc/) | -| [LINE Digital Frontier](https://ldfcorp.com/ja) | Gaming | Real-time Analytics | - | - | [Tokyo Meetup, January 2025](https://clickhouse.com/videos/tokyo-meetup-line-from-stateles-servers-to-real-time-analytics) | -| [LiteLLM](https://github.com/BerriAI/litellm) | Software | API management | | - | [GitHub](https://github.com/BerriAI/litellm/blob/e7b88c2134a013f527304de29358238a5593f91f/cookbook/misc/clickhouse_insert_logs.py#L4) | -| [Little Red Book (Xiaohongshu)](http://www.xiaohongshu.com/) | Social Media | Data warehouse | — | — | [Presentation, March 2023](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup71/LittleRedBook.pdf) | -| [LogfireAI](https://logfire.ai/) | Software & Technology | Monitoring & Observability | — | — | [Twitter, March 2024](https://twitter.com/logfire_ai/status/1765947119200841883) | -| [LogSnag](https://logsnag.com/) | Software & Technology | Realtime Monitoring | — | — | [Interview, December 2022](https://founderbeats.com/shayan-on-building-and-growing-logsnag-as-a-solo-founder) | -| [Logtail](https://betterstack.com/logtail) | Cloud, SaaS | Log Management | - | - | [Official Website](https://betterstack.com/logtail) | -| [Loja Integrada](https://lojaintegrada.com.br/) | E-Commerce | — | — | — | [Case Study, March 2023](https://double.cloud/resources/case-studies/lojaintegrada-and-pagali-switch-to-doublecloud-to-make-running-clickhouse-easier) | -| [Longbridge Technology](https://longbridge.com/) | E-Commerce | — | — | — | [Blog, March 2025](https://clickhouse.com/blog/longbridge-technology-simplifies-their-architecture-and-achieves-10x-performance-boost-with-clickhouse) | -| [Lookforsale](https://lookforsale.ru/) | E-Commerce | — | — | — | [Job Posting, December 2021](https://telegram.me/javascript_jobs/587318) | -| [Loopme](https://loopme.com/) | AdTech | Analytics | — | — | [Blog, Aug 2024](https://clickhouse.com/blog/measuring-brand-impact-how-loopme-uses-clickhouse-to-deliver-better-brand-advertising-outcomes) | -| [Luabase](https://luabase.com/) | Software | Analytics | — | — | [Hacker News, April 2022](https://news.ycombinator.com/item?id=31040190) | -| [Lyft](https://lyft.com) | Rideshare | - | — | — | [Twitter, July 2023](https://twitter.com/riteshvaryani/status/1685160430606639104) | -| [MAXILECT](https://maxilect.com/) | Ad Tech, Blockchain, ML, AI | — | — | — | [Job advertisement, 2021](https://www.linkedin.com/feed/update/urn:li:activity:6780842017229430784/) | -| [MGID](https://www.mgid.com/) | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| [MUX](https://mux.com/) | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) | -| [Mail.ru Cloud Solutions](https://mcs.mail.ru/) | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | -| [Marfeel](https://www.marfeel.com/) | Mobile SaaS | — | — | — | Job offer, Apr 2022 | -| [Marilyn](https://tech.mymarilyn.ru) | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) | -| [MasMovil](https://www.masmovil.es/) | Telecom | Telecom services | - | - | [Blog](https://clickhouse.com/blog/how-grupo-masmovil-monitors-radio-access-networks-with-clickhouse) | -| [Match Systems](https://matchsystems.com/) | Software & Technology | Blockchain Intelligence & AML | — | — | [Job Posting, March 2024](https://telegra-ph.translate.goog/Senior-Database-Administrator-11-28?_x_tr_sl=ru&_x_tr_tl=en&_x_tr_hl=en&_x_tr_pto=wapp) | -| [Mello](https://mellodesign.ru/) | Marketing | Analytics | 1 server | — | [Article, October 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) | -| [Memfault](https://https://memfault.com/) | Software & Technology | IOT Monitoring | — | — | [Job Listing, August 2023](https://www.ycombinator.com/companies/memfault/jobs/zALzwIe-backend-engineer-systems-data) | -| [cBioPortal](https://www.cbioportal.org/) | Healthcare | Datstore backing portal for cancer genomics | — | — | [NYC Meetup, Dec 2023](https://clickhouse.com/videos/fast-answers-in-cancer-research) | -| [MessageBird](https://www.messagebird.com) | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [Metoda](https://metoda.com/) | Software & Technology | Advertisting | — | — | [ClickHouse Meetup, September 2022](https://www.youtube.com/watch?v=uS5uA-aZSlQ&t=1770s) | -| [MetricFire](https://www.metricfire.com) | Managed Service | Monitoring | — | — | [Blog, November 2022](https://www.metricfire.com/blog/using-clickhouse-with-metricfire) | -| [Microsoft - Clarity](https://clarity.microsoft.com/) | Web Analytics | Clarity (Main Product) | — | — | [Meetup Video, January 2023](https://www.youtube.com/watch?v=rUVZlquVGw0&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=2) [A question on GitHub](https://github.com/ClickHouse/ClickHouse/issues/21556) | -| [Microsoft - Titan](https://www.microsoft.com/) | Software & Technology | Internal Data Analytics (Titan) | — | — | [Meetup Video, January 2023](https://www.youtube.com/watch?v=r1ZqjU8ZbNs&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=2) | -| [Middleware](https://middleware.io/) | Software | Cloud management | - | - | [SF Meetup, March 2024](https://www.youtube.com/watch?v=xCLMuXJWx80&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=10) | -| [MindsDB](https://www.mindsdb.com/) | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) | -| [Modeo](https://modeo.ai/) | Software & Technology | Data Engineering | — | — | [Blog, June 2023](https://clickhouse.com/blog/driving-sustainable-data-management-with-clickhouse-introducing-stash-by-modeo) | -| [moosejs](https://www.moosejs.com/) | Software & Technology | Open-source developer framework | — | — | [Blog Post, May 2024](https://www.fiveonefour.com/blog/product-update-2) | -| [Motodata](https://www.motadata.com/) | Monitoring | Main Product | — | — | [Official Website](https://www.motadata.com/docs) | -| [Muse Group](https://mu.se/) | Music Software | Performance Monitoring | — | — | [Blog post in Russian, January 2021](https://habr.com/en/post/647079/) | -| [MyScale](https://myscale.com/) | Software & Technology | AI Database | — | — | [Docs](https://docs.myscale.com/en/overview/) | -| [NANO Corp](https://nanocorp.fr/en/) | Software & Technology | NOC as a Service | — | — | [Blog Post, July 2022](https://clickhouse.com/blog/from-experimentation-to-production-the-journey-to-supercolumn) | -| [NGINX](https://nginx.com/) | Application Delivery Network | NGINX Management Suite | — | — | [Documentation](https://docs.nginx.com/nginx-management-suite/admin-guides/getting-started/prerequisites/configure-clickhouse/) | -| [NIC Labs](https://niclabs.cl/) | Network Monitoring | RaTA-DNS | — | — | [Blog post, March 2021](https://niclabs.cl/ratadns/2021/03/Clickhouse) | -| [Nixys](https://nixys.io/) | Software & Technology | DevOps, SRE and DevSecOps | — | — | [Blog Post, March 2024](https://habr-com.translate.goog/ru/companies/nixys/articles/801029/?_x_tr_hist=true/ru/companies/nixys/articles/801029/?_x_tr_sl=ru&_x_tr_tl=en&_x_tr_hl=en&_x_tr_pto=wapp&_x_tr_hist=true) | -| [NLMK](https://nlmk.com/en/) | Steel | Monitoring | — | — | [Article in Russian, Jan 2022](https://habr.com/en/company/nlmk/blog/645943/) | -| [NOC Project](https://getnoc.com/) | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) | -| [Nansen](https://www.nansen.ai/) | Finance - Crypto | Analytics | — | — | [Press release](https://clickhouse.com/blog/clickhouse-cloud-on-google-cloud-platform-gcp-is-generally-available) | -| [Narrative](https://www.trynarrative.com/) | Software & Technology | AI Automation | — | — | [Hacker News, May 2024](https://news.ycombinator.com/item?id=40225998) | -| [Nationale Databank Wegverkeers](https://www.ndw.nu/) | Software & Technology | Road Traffic Monitoring | — | — | [Presentation at Foss4G, August 2019](https://av.tib.eu/media/43434) | -| [Nebius](https://nebius.com/il/docs/managed-clickhouse/) | SaaS | Main product | — | — | [Official website](https://nebius.com/il/docs/managed-clickhouse/) | -| [Neocom](https://www.neocom.ai/) | AI SaaS | Main product | - | - | [Job listing](https://news.ycombinator.com/item?id=38497724) | -| [Neocom](https://www.neocom.ai/) | Software & Technology | Sales Platform | — | — | [Hacker News, September 2023](https://news.ycombinator.com/item?id=37359122) | -| [NeonDB](https://neon.tech/) | Cloud | Postgres management | - | - | [Blog, 2024](https://double.cloud/resources/case-studies/neon-increases-data-granularity-with-managed-clickhouse/) | -| [NetMeta](https://github.com/monogon-dev/NetMeta/blob/main/README.md) | Observability | Main Product | — | — | [Twitter, December 2022](https://twitter.com/leolukde/status/1605643470239977475) | -| [Netflix](https://www.netflix.com/) | Software & Technology | Video Streaming | — | — | [Meetup, March 2025](https://youtu.be/64TFG_Qt5r4) | -| [Netskope](https://www.netskope.com/) | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) | -| [Nexpath Networks](https://www.nexpath.net/) | Software & Technology | Network Analysis | — | — | [Slides, September 2021](https://opensips.org/events/Summit-2021Distributed/assets/presentations/2021-jon-abrams-big-telco-data-with-clickhouse.pdf) [Video, September 2021](https://www.youtube.com/watch?v=kyu_wDcO0S4&t=3840s) | -| [NineData](https://www.ninedata.cloud/) | Software & Technology | DMaaS | — | — | ClickHouse Meetup in Hangzhou, May 2024 | -| [Noction](https://www.noction.com) | Network Technology | Main Product | — | — | [Official Website](https://www.noction.com/news/irp-3-11-remote-triggered-blackholing-capability) | -| [Notionlytics](https://notionlytics.com/) | Software & Technology | Page analytics for Notion | — | — | [Twitter, July 2023](https://twitter.com/MaxPrilutskiy/status/1675428469403004928) | -| [Ntop](https://www.ntop.org/) | Network Monitoning | Monitoring | — | — | [Official website, January 2022](https://www.ntop.org/ntop/historical-traffic-analysis-at-scale-using-clickhouse-with-ntopng/) | -| [Nuna Inc.](https://www.nuna.com/) | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) -| [Nuon](https://nuon.co/) | Software & Technology | — | — | — | [Meetup video](https://youtu.be/2rHfWt6epIQ) | -| [Nutanix](https://www.nutanix.com/) | Software & Technology | Main Product | — | — | [Slides, March 2024](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-bengaluru/2-Unified_data_platform_with_clickhouse_by_Sachidanad_Gaurav_Nutanix.pdf) | -| [Nvidia](https://www.nvidia.com/) | Software & Technology | NVIDIA AI Aerial | — | — | [Documentation](https://docs.nvidia.com/aerial/archive/aerial-dt/1.0/text/overview.html#clickhouse) | -| [Oden](https://oden.io/) | Software & Technology | Manufacturing -| [Oden](https://oden.io/) | Software & Technology | Manufacturing | — | — | [Meetup, April 2023](https://www.youtube.com/watch?v=pAKGJDOO6lo) | -| [Odoscope](https://www.odoscope.com/) | Software & Technology | Customer Engagement Platform | — | — | [Awards Submission, February 2023](https://ecommercegermanyawards.com/vote/164051) | -| [Ok.ru](https://ok.ru) | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, October 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) | -| [OLX India](https://www.olx.in/) | E-commerce | Log Management | - | - | [Gurgaon Meetup talk, March 2025](https://clickhouse.com/videos/gurgaon-meetup-olx-india-optimizing-log-management) | -| [Omnicomm](https://omnicomm.ru/) | Transportation Monitoring | — | — | — | [Facebook post, October 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) | -| [OneAPM](https://www.oneapm.com/) | Monitoring and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| [One Fact Foundation](https://www.onefact.org/) | Healthcare | CDN/Web data | — | 2PB | [GitHub repository](https://github.com/ClickHouse/ClickHouse/issues/67296) | -| [OneUptime](https://oneuptime.com/) | Observability platform | Analytics | — | — | [GitHub repository](https://github.com/OneUptime/oneuptime) | -| [Onepixel.link](https://onepixel.link/) | Software | URL shorterner | - | - | [Twitter, 2024](https://twitter.com/championswimmer/status/1759195487134220415) | -| [Ongage](https://www.ongage.com/) | Marketing | Analytics | — | — | [Blog](https://clickhouse.com/blog/ongages-strategic-shift-to-clickhouse-for-real-time-email-marketing) | -| [Ookla](https://www.ookla.com/) | Software & Technology | Network Intelligence | — | — | [Presentation at J on the Beach, June 2023](https://www.youtube.com/watch?v=OZ0XpfDM8J0) | -| [OONI](https://ooni.org/) | Open Observatory of Network Interference (OONI) | Main product | — | — | [Blog, May 2023]( https://clickhouse.com/blog/ooni-analyzes-internet-censorship-data-with-clickhouse)[Twitter August 2022](https://twitter.com/OpenObservatory/status/1558014810746265600?s=20&t=hvcDU-LIrgCApP0rZCzuoA) | -| [Open Targets](https://www.opentargets.org/) | Genome Research | Genome Search | — | — | [Twitter, October 2021](https://twitter.com/OpenTargets/status/1452570865342758913?s=20), [Blog](https://blog.opentargets.org/graphql/) | -| [OpenLIT](https://openlit.io/) | Software & Technology | OTEL Monitoring with AI | — | — | [GitHub](https://github.com/openlit/openlit) | -| [OpenMeter](https://openmeter.io) | Expense Management | Main product | — | — | [Offical blog post, 2023](https://openmeter.io/blog/how-openmeter-uses-clickhouse-for-usage-metering#heading-querying-historical-usage) | -| [OpenReplay](https://openreplay.com/) | Product Analytics | Session Replay | — | — | [Docs](https://docs.openreplay.com/en/deployment/openreplay-admin/) | -| [Opensee](https://opensee.io/) | Financial Analytics | Main product | - | - | [Blog Post, February 2022](https://clickhouse.com/blog/opensee-analyzing-terabytes-of-financial-data-a-day-with-clickhouse/) [Blog Post, December 2021](https://opensee.io/news/from-moscow-to-wall-street-the-remarkable-journey-of-clickhouse/) | -| [Oppo](https://www.oppo.com/cn/) | Hardware | Consumer Electronics Company | — | — | ClickHouse Meetup in Chengdu, April 2024 | -| [OpsVerse](https://opsverse.io/) | Observability | — | — | — | [Twitter, 2022](https://twitter.com/OpsVerse/status/1584548242100219904) | -| [Opstrace](https://opstrace.com/) | Observability | — | — | — | [Source code](https://gitlab.com/gitlab-org/opstrace/jaeger-clickhouse/-/blob/main/README.md) | -| [Oxide](https://oxide.computer/) | Hardware & Software | Server Control Plane | — | — | [GitHub Repository](https://github.com/oxidecomputer/omicron) | -| [OZON](https://corp.ozon.com/) | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) | -| [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/) | Data Recovery | Analytics | — | — | | -| [PRANA](https://prana-system.com/en/) | Industrial predictive analytics | Main product | — | — | [News (russian), Feb 2021](https://habr.com/en/news/t/541392/) | -| [Pace](https://www.paceapp.com/) | Marketing & Sales | Internal app | — | — | ClickHouse Cloud user | -| [Panelbear](https://panelbear.com/) | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) | -| [Papermark](https://www.papermark.io/) | Software & Technology | Document Sharing & Analytics | — | — | [Twitter, September 2023](https://twitter.com/mfts0/status/1698670144367567263) | -| [Parcel Perform](https://www.parcelperform.com/) | E-commerce | Real-Time Analaytics | — | — | [Ho Chi Minh Meetup talk, April 2025](https://clickhouse.com/videos/hochiminh-meetup-parcel-perform-clickhouse-at-a-midsize-company) | -| [Percent 百分点](https://www.percent.cn/) | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| [Percona](https://www.percona.com/) | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) | -| [Phare](https://phare.io/) | Uptime Monitoring | Main Product | — | — | [Official website, Aug 2023](https://docs.phare.io/changelog/platform/2023#faster-monitoring-statistics) | -| [PheLiGe](https://phelige.com/about) | Software & Technology | Genetic Studies | — | — | [Academic Paper, November 2020](https://academic.oup.com/nar/article/49/D1/D1347/6007654?login=false) | -| [Physics Wallah](https://www.pw.live/) | Education Technology | Real-Time Analytics | — | — | [Gurgaon Meetup talk, March 2025](https://clickhouse.com/videos/gurgaon-meetup-clickhouse-at-physics-wallah) | -| [PingCAP](https://pingcap.com/) | Analytics | Real-Time Transactional and Analytical Processing | - | - | [GitHub, TiFlash/TiDB](https://github.com/pingcap/tiflash) | -| [Pirsch](https://pirsch.io/) | Software & Technology | Web Analytics | — | — | [Hacker News, April 2023](https://news.ycombinator.com/item?id=35692201) | -| [Piwik PRO](https://piwik.pro/) | Web Analytics | — | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) | -| [Plane](https://plane.so/) | Software & Technology | Project Management | — | — | [Twitter, September 2023](https://twitter.com/vamsi_kurama/status/1699593472704176441) | -| [Plausible](https://plausible.io/) | Analytics | Main Product | — | — | [Blog Post, December 2021](https://clickhouse.com/blog/plausible-analytics-uses-click-house-to-power-their-privacy-friendly-google-analytics-alternative) [Twitter, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | -| [PoeticMetric](https://www.poeticmetric.com/) | Metrics | Main Product | — | — | Community Slack, April 2022 | -| [Portkey AI](https://portkey.ai/) | LLMOps | Main Product | — | — | [LinkedIn post, August 2023](https://www.linkedin.com/feed/update/urn:li:activity:7094676373826330626/) | -| [PostHog](https://posthog.com/) | Product Analytics | Main Product | — | — | [Release Notes, October 2020](https://posthog.com/blog/the-posthog-array-1-15-0), [Blog, November 2021](https://posthog.com/blog/how-we-turned-clickhouse-into-our-eventmansion) | -| [Postmates](https://postmates.com/) | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | -| [Pragma Innovation](http://www.pragma-innovation.fr/) | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| [Prefect](https://www.prefect.io/) | Software & Technology | Main Product | — | — | [Blog, May 2024](https://clickhouse.com/blog/prefect-event-driven-workflow-orchestration-powered-by-clickhouse) | -| [Propel](https://www.propeldata.com/) | Analytics | Main product | — | — | [Blog, January 2024](https://www.propeldata.com/blog/how-to-store-json-in-clickhouse-the-right-way) | -| [Property Finder](https://www.propertyfinder.com/) | Real Estate | - | — | — | ClickHouse Cloud user | -| [QINGCLOUD](https://www.qingcloud.com/) | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| [Qualified](https://www.qualified.com/) | Sales Pipeline Management | Data and Messaging layers | — | — | [Job posting, Nov 2022](https://news.ycombinator.com/item?id=33425109) | -| [Qube Research & Technologies](https://www.qube-rt.com/) | FinTech | Analysis | — | — | ClickHouse Cloud user | -| [QuickCheck](https://quickcheck.ng/) | FinTech | Analytics | — | — | [Blog post, May 2022](https://clickhouse.com/blog/how-quickcheck-uses-clickhouse-to-bring-banking-to-the-unbanked/) | -| [R-Vision](https://rvision.pro/en/) | Information Security | — | — | — | [Article in Russian, December 2021](https://www.anti-malware.ru/reviews/R-Vision-SENSE-15) | -| [RELEX](https://relexsolutions.com) | Supply Chain Planning | Forecasting | — | — | [Meetup Video, December 2022](https://www.youtube.com/watch?v=wyOSMR8l-DI&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=16) [Slides, December 2022](https://presentations.clickhouse.com/meetup65/CRUDy%20OLAP.pdf) | -| [Raiffeisenbank](https://www.rbinternational.com/) | Banking | Analytics | — | — | [Lecture in Russian, December 2020](https://cs.hse.ru/announcements/421965599.html) | -| [Railway](https://railway.app/) | Software & Technology | PaaS Software Tools | — | — | [Changelog, May 2023](https://railway.app/changelog/2023-05-19-horizontal-scaling#logs-are-getting-faster) | -| [Rambler](https://rambler.ru) | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| [Ramp](https://ramp.com/) | Financial Services | Real-Time Analytics, Fraud Detection | — | — | [NYC Meetup, March 2024](https://www.youtube.com/watch?v=7BtUgUb4gCs) | -| [Rapid Delivery Analytics](https://rda.team/) | Retail | Analytics | — | — | ClickHouse Cloud user | -| [Real Estate Analytics](https://rea-global.com/) | Software & Technology | Real-time Analytics | - | - | [Singapore meetup, February 2025](https://clickhouse.com/videos/singapore-meetup-real-estate-analytics-clickhouse-journey) , [Blog, April 2025](https://clickhouse.com/blog/how-real-estate-analytics-made-its-data-pipeline-50x-faster-with-clickhouse) | -| [Releem](https://releem.com/) | Databases | MySQL management | - | - | [Blog 2024](https://releem.com/blog/whats-new-at-releem-june-2023) | -| [Replica](https://replicahq.com) | Urban Planning | Analytics | — | — | [Job advertisement](https://boards.greenhouse.io/replica/jobs/5547732002?gh_jid=5547732002) | -| [Request Metrics](https://requestmetrics.com/) | Software & Technology | Observability | — | — | [Hacker News, May 2023](https://news.ycombinator.com/item?id=35982281) | -| [Rengage](https://rengage.ai/) | Marketing Analytics | Main product | - | - | [Bellevue Meetup, August 2024](https://github.com/user-attachments/files/17135804/Rengage.-.clickhouse.1.pptx) -| [Resmo](https://replicahq.com) | Software & Technology | Cloud Security & Asset Management | 1 c7g.xlarge node, -| [Retell](https://retell.cc/) | Speech synthesis | Analytics | — | — | [Blog Article, August 2020](https://vc.ru/services/153732-kak-sozdat-audiostati-na-vashem-sayte-i-zachem-eto-nuzhno) | -| [Rivet](https://rivet.gg/) | Software & Technology | Gamer Server Scaling | — | — | [HackerNews, August 2023](https://news.ycombinator.com/item?id=37188659) | -| [Roblox](https://www.roblox.com/) | Gaming | Safety operations | — | 100M events per day | [San Francisco Meetup, September 2024](https://github.com/user-attachments/files/17135964/2024-09-05-ClickHouse-Meetup-Roblox.1.pdf) | -| [Rokt](https://www.rokt.com/) | Software & Technology | eCommerce | — | — | [Meetup Video, December 2022](https://www.youtube.com/watch?v=BEP07Edor-0&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=10) [Slides, December 2022](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup67/Building%20the%20future%20of%20reporting%20at%20Rokt.pdf) | -| [Rollbar](https://www.rollbar.com) | Software Development | Main Product | — | — | [Official Website](https://www.rollbar.com) | -| [Rspamd](https://rspamd.com/) | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) | -| [RuSIEM](https://rusiem.com/en) | SIEM | Main Product | — | — | [Official Website](https://rusiem.com/en/products/architecture) | -| [RunReveal](https://runreveal.com/) | SIEM | Main Product | — | — | [SF Meetup, Nov 2023](https://www.youtube.com/watch?v=rVZ9JnbzHTQ&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=25) | -| [S7 Airlines](https://www.s7.ru) | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| [SEMrush](https://www.semrush.com/) | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| [SESCO Trading](https://www.sescotrading.com/) | Financial | Analysis | — | — | ClickHouse Cloud user | -| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| [SMI2](https://smi2.ru/) | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | -| [SQLPad](https://getsqlpad.com/en/introduction/) | Software & Technology | Web-based SQL editor. | — | — | [GitHub, March 2023](https://github.com/sqlpad/sqlpad/blob/master/server/package.json#L43) | -| [Santiment](https://www.santiment.net) | Behavioral analytics for the crypto market | Main Product | — | — | [Github repo](https://github.com/santiment/sanbase2) | -| [Sber](https://www.sberbank.com/index) | Banking, Fintech, Retail, Cloud, Media | — | 128 servers | >1 PB | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) | -| [Scale8](https://scale8.com) | Tag Management and Analytics | Main product | - | - | [Source Code](https://github.com/scale8/scale8) | -| [Scarf](https://about.scarf.sh/) | Open source analytics | Main product | - | - | [Meetup, December 2024](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-san-francisco/ClickHouse%20Meet-up%20talk_%20Scarf%20%26%20Clickhouse.pdf) | -| [Scireum GmbH](https://www.scireum.de/) | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [ScrapingBee](https://www.scrapingbee.com/) | Software & Technology | Web scraping API | — | — | [Twitter, January 2024](https://twitter.com/PierreDeWulf/status/1745464855723986989) | -| [ScratchDB](https://scratchdb.com/) | Software & Technology | Serverless Analytics | — | — | [GitHub](https://github.com/scratchdata/ScratchDB) | -| [Segment](https://segment.com/) | Data processing | Main product | 9 * i3en.3xlarge nodes 7.5TB NVME SSDs, 96GB Memory, 12 vCPUs | — | [Slides, 2019](https://slides.com/abraithwaite/segment-clickhouse) | -| [sembot.io](https://sembot.io/) | Shopping Ads | — | — | — | A comment on LinkedIn, 2020 | -| [Sendinblue](https://www.sendinblue.com/) | Software & Technology | Segmentation | 100 nodes | — | [Blog, February 2023](https://engineering.sendinblue.com/segmentation-to-target-the-right-audience/) | -| [Sentio](https://www.sentio.xyz/) | Software & Technology | Observability | — | — | [Twitter, April 2023](https://twitter.com/qiaokan/status/1650736518955438083) | -| [Sentry](https://sentry.io/) | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| [seo.do](https://seo.do/) | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| [Serif Health](https://www.serifhealth.com/) | Healthcare | Price transparency platform | — | — | [Chicago meetup, Sempteber 2019](https://clickhouse.com/videos/price-transparency-made-easy) | -| [Serverless](https://www.serverless.com/) | Serverless Apps | Metrics | — | — | ClickHouse Cloud user | -| [ServiceNow](https://www.servicenow.com/) | Managed Services | Qualitative Mobile Analytics | — | — | [Meetup Video, January 2023](https://www.youtube.com/watch?v=b4Pmpx3iRK4&list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U&index=6) [Slides, January 2023](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup68/Appsee%20Remodeling%20-%20ClickHouse.pdf) |== -| [Sewer AI](https://www.sewerai.com/) | Software & Technology | - | — | — | ClickHouse Cloud user | -| [Shopee](https://www.shopee.com/) | E-Commerce | Distributed Tracing | - | - | [Meetup Video, April 2024](https://youtu.be/_BVy-V2wy9s?feature=shared) [Slides, April 2024](https://raw.githubusercontent.com/ClickHouse/clickhouse-presentations/master/2024-meetup-singapore-1/Shopee%20-%20Distributed%20Tracing%20in%20ClickHouse.pdf) [Blog Post, June 2024](https://clickhouse.com/blog/seeing-the-big-picture-shopees-journey-to-distributed-tracing-with-clickhouse) | -| [SigNoz](https://signoz.io/) | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) , [Bangalore Meetup, February 2025](https://clickhouse.com/videos/lessons-from-building-a-scalable-observability-backend) | -| [Sina](http://english.sina.com/index.html) | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| [Sinch](https://www.sinch.com/) | Software & Technology | Customer Communications Cloud | — | — | [HackerNews, May 2023](https://news.ycombinator.com/item?id=36042104) | -| [Sipfront](https://www.sipfront.com/) | Software Development | Analytics | — | — | [Twitter, October 2021](https://twitter.com/andreasgranig/status/1446404332337913895?s=20) | -| [SiteBehaviour Analytics](https://www.sitebehaviour.com/) | Software | Analytics | - | - | [Twitter, 2024](https://twitter.com/developer_jass/status/1763023792970883322) | -| [Skool](https://www.skool.com/) | Community platform | Behavioral/Experimentation Analytics | — | 100m rows/day | [SoCal Meetup, August 2024](https://github.com/user-attachments/files/17081161/ClickHouse.Meetup.pptx) -| [slido](https://www.slido.com/) | Software & Technology | Q&A and Polling | — | — | [Meetup, April 2023](https://www.linkedin.com/events/datameetup-3-spotlightondataeng7048914766324473856/about/) | -| [Solarwinds](https://www.solarwinds.com/) | Software & Technology | Main product | — | — | [Talk in English, March 2018](https://www.youtube.com/watch?v=w8eTlqGEkkw) | -| [Sonrai Security](https://sonraisecurity.com/) | Cloud Security | - | — | — | Slack comments | -| [Spark New Zealand](https://www.spark.co.nz/) | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) | -| [Spec](https://www.specprotected.com/) | Software & Technology | Online Fraud Detection | — | — | [HackerNews, August 2023](https://news.ycombinator.com/item?id=36965317) -| [spectate](https://spectate.net/) | Software & Technology | Monitoring & Incident Management | — | — | [Twitter, August 2023](https://twitter.com/BjarnBronsveld/status/1700458569861112110) | -| [Splio](https://splio.com/en/) | Software & Technology | Individuation Marketing | — | — | [Slack, September 2023](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1693995069023669) | -| [Splitbee](https://splitbee.io) | Analytics | Main Product | — | — | [Blog Post, Mai 2021](https://splitbee.io/blog/new-pricing) | -| [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| [Staffbase](https://staffbase.com/en/) | Software & Technology | Internal Communications | — | — | [ClickHouse Slack, April 2023](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1682781081062859) | -| [Staffcop](https://www.staffcop.ru/) | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) | -| [Statsig](https://statsig.com/) | Software & Technology | Real-time analytics | — | — | [Video](https://clickhouse.com/videos/statsig) | -| [Streamkap](https://streamkap.com/) | Data Platform | - | — | — | [Video](https://clickhouse.com/videos/switching-from-elasticsearch-to-clickhouse) | -| [Suning](https://www.suning.com/) | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) | -| [Superology](https://superology.com/) | Software & Technology | Customer Analytics | — | — | [Blog Post, June 2022](https://clickhouse.com/blog/collecting-semi-structured-data-from-kafka-topics-using-clickhouse-kafka-engine) | -| [Superwall](https://superwall.me/) | Monetization Tooling | Main product | — | — | [Word of mouth, Jan 2022](https://github.com/ClickHouse/ClickHouse/pull/33573) | -| [SwarmFarm Robotics](https://www.swarmfarm.com/) | Agriculture & Technology | Main Product | — | — | [Meetup Slides](https://github.com/ClickHouse/clickhouse-presentations/blob/master/2024-meetup-melbourne-2/Talk%20Track%202%20-%20Harvesting%20Big%20Data%20at%20SwarmFarm%20Robotics%20-%20Angus%20Ross.pdf) | -| [Swetrix](https://swetrix.com) | Analytics | Main Product | — | — | [Source code](https://github.com/swetrix/swetrix-api) | -| [Swift Navigation](https://www.swiftnav.com/) | Geo Positioning | Data Pipelines | — | — | [Job posting, Nov 2022](https://news.ycombinator.com/item?id=33426590) | -| [Synerise](https://synerise.com/) | ML&AI | Feature Store | - | - | [Presentation, April 2020](https://www.slideshare.net/AndrzejMichaowski/feature-store-solving-antipatterns-in-mlsystems-232829863) | -| [Synpse](https://synpse.net/) | Application Management | Main Product | - | - | [Twitter, January 2022](https://twitter.com/KRusenas/status/1483571168363880455) | -| [Synq](https://www.synq.io) | Software & Technology | Main Product | — | — | [Blog Post, July 2023](https://clickhouse.com/blog/building-a-unified-data-platform-with-clickhouse) | -| [sumsub](https://sumsub.com/) | Software & Technology | Verification platform | — | — | [Meetup, July 2022](https://www.youtube.com/watch?v=F74bBGSMwGo) | -| [Talo Game Services](https://trytalo.com) | Gaming Analytics | Event-based player analytics | — | — | [Blog, August 2024](https://trytalo.com/blog/events-clickhouse-migration) | -| [Tasrie IT Services](https://tasrieit.com) | Software & Technology | Analytics | — | — | [Blog, January 2025](https://tasrieit.com/how-tasrie-it-services-uses-clickhouse) | -| [TURBOARD](https://www.turboard.com/) | BI Analytics | — | — | — | [Official website](https://www.turboard.com/blogs/clickhouse) | -| [TeamApt](https://www.teamapt.com/) | FinTech | Data Processing | — | — | [Official Website](https://www.teamapt.com/) | -| [Teamtailor](https://www.teamtailor.com/en/) | Recruitment Software | - | — | — | ClickHouse Cloud user | -| [Tekion](https://tekion.com/) | Automotive Retail | Clickstream Analytics | — | — | [Blog Post, June 2024](https://clickhouse.com/blog/tekion-adopts-clickhouse-cloud-to-power-application-performance-and-metrics-monitoring) | -| [Temporal](https://www.tencentmusic.com/) | Infrastructure software | Observability product | — | — | [Bellevue Meetup, August 2024](https://github.com/user-attachments/files/17135746/Temporal.Supercharged.Observability.with.ClickHouse.pdf) | -| [Tencent Music Entertainment (TME)](https://www.tencentmusic.com/) | BigData | Data processing | — | — | [Blog in Chinese, June 2020](https://cloud.tencent.com/developer/article/1637840) | -| [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Tencent](https://www.tencent.com) | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Teralytics](https://www.teralytics.net/) | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) | -| [Tesla](https://www.tesla.com/) | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) | -| [The Guild](https://the-guild.dev/) | API Platform | Monitoring | — | — | [Blog Post, November 2022](https://clickhouse.com/blog/100x-faster-graphql-hive-migration-from-elasticsearch-to-clickhouse) [Blog](https://the-guild.dev/blog/graphql-hive-and-clickhouse) | -| [Theia](https://theia.so/) | Software & Technology | Threat Intelligence | — | — | [Twitter, July 2023](https://twitter.com/jreynoldsdev/status/1680639586999980033) | -| [ThirdWeb](https://thirdweb.com/) | Software & Technology | Blockchain analysis | — | — | ClickHouse Cloud user | -| [Timeflow](https://timeflow.systems) | Software | Analytics | — | — | [Blog](https://timeflow.systems/why-we-moved-from-druid-to-clickhouse/ ) | -| [Timeplus](https://www.timeplus.com/) | Software & Technology | Streaming Analytics | — | — | [Meetup, August 2023](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/294472987/) | -| [Tinybird](https://www.tinybird.co/) | Real-time Data Products | Data processing | — | — | [Official website](https://www.tinybird.co/) | -| [TrackingPlan](https://www.trackingplan.com/) | Marketing & Sales | Monitoring | — | — | ClickHouse Cloud user | -| [Traffic Stars](https://trafficstars.com/) | AD network | — | 300 servers in Europe/US | 1.8 PiB, 700 000 insert rps (as of 2021) | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| [Trillabit](https://www.trillabit.com/home) | Software & Technology | Business Intelligence | — | — | [Blog, January 2023](https://clickhouse.com/blog/trillabit-utilizes-the-power-of-clickhouse-for-fast-scalable-results-within-their-self-service-search-driven-analytics-offering) | -| [Trip.com](https://trip.com/) | Travel Services | Logging | — | — | [Meetup, March 2023](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup71/Trip.com.pdf) | -| [Turkcell](https://www.turkcell.com.tr/) | Telecom | BI Analytics | 2 nodes | 2TB per day, 100TB in total | [YouTube Video](https://www.youtube.com/watch?v=ckvPBgXl82Q) | -| [Tweeq](https://tweeq.sa/en) | Fintech | Spending Account | - | - | [Engineering Blog, May 2024](https://engineering.tweeq.sa/tweeq-data-platform-journey-and-lessons-learned-clickhouse-dbt-dagster-and-superset-fa27a4a61904) | -| [Twilio](https://www.twilio.com) | Customer engagement | Twilio SendGrid | - | 10b events/day | [Meetup presentation, September 2024](https://github.com/user-attachments/files/17135790/twilio-sendgrid-clickhouse.1.pdf) | -| [Tydo](https://www.tydo.com) | Customer intelligence | Customer Segmentation product | - | - | [SoCal meetup, August 2024](https://github.com/user-attachments/files/17081169/Tydo_ClickHouse.Presentation.8_21.pdf) | -| [URLsLab](https://www.urlslab.com/) | Software & Technology | WordPress Plugin | — | — | [Twitter, July 2023](https://twitter.com/Yasha_br/status/1680224776302784514) , [Twitter, September 2023](https://twitter.com/Yasha_br/status/1698724654339215812) | -| [UTMSTAT](https://hello.utmstat.com/) | Analytics | Main product | — | — | [Blog post, June 2020](https://vc.ru/tribuna/133956-striming-dannyh-iz-servisa-skvoznoy-analitiki-v-clickhouse) | -| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/uber.pdf) | -| [Uptrace](https://uptrace.dev/) | Software | Tracing Solution | — | — | [Official website, March 2021](https://uptrace.dev/open-source/) | -| [UseTech](https://usetech.com/) | Software Development | — | — | — | [Job Posting, December 2021](https://vk.com/wall136266658_2418) | -| [Usermaven](https://usermaven.com/) | Product Analytics | Main Product | — | — | [HackerNews, January 2023](https://news.ycombinator.com/item?id=34404706) | -| [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| [VKontech](https://vkontech.com/) | Distributed Systems | Migrating from MongoDB | - | - | [Blog, January 2022](https://vkontech.com/migrating-your-reporting-queries-from-a-general-purpose-db-mongodb-to-a-data-warehouse-clickhouse-performance-overview/) | -| [VMware](https://www.vmware.com/) | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) | -| [Valueleaf Services Pvt.Ltd](http://valueleaf.com/) | Software & Technology | Martech platform, Ads platform and Loan aggregator platform | — | — | [ClickHouse Slack, April 2023](https://clickhousedb.slack.com/archives/C04N3AU38DV/p1681122299263959) | -| [Vantage](https://www.vantage.sh/) | Software & Technology | Cloud Cost Management | — | — | [Meetup, April 2023](https://www.youtube.com/watch?v=gBgXcHM_ldc) , [ClickHouse Blog, June 2023](https://clickhouse.com/blog/nyc-meetup-report-vantages-journey-from-redshift-and-postgres-to-clickhouse) | -| [Velvet](https://www.usevelvet.com/) | Database management | Main product | - | - | [Job listing](https://news.ycombinator.com/item?id=38492272) | -| [Vercel](https://vercel.com/) | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 | -| [Vexo](https://www.vexo.co/) | App development | Analytics | — | — | [Twitter, December 2023](https://twitter.com/FalcoAgustin/status/1737161334213546279) | -| [Vidazoo](https://www.vidazoo.com/) | Advertising | Analytics | — | — | ClickHouse Cloud user | -| [Vimeo](https://vimeo.com/) | Video hosting | Analytics | - | - | [Blog post](https://medium.com/vimeo-engineering-blog/clickhouse-is-in-the-house-413862c8ac28) | -| [Visiology](https://visiology.com/) | Business intelligence | Analytics | - | - | [Company website](https://visiology.com/) | -| [Voltmetrix](https://voltmetrix.com/) | Database management | Main product | - | - | [Blog post](https://voltmetrix.com/blog/voltmetrix-iot-manufacturing-use-case/) | -| [Voltus](https://www.voltus.co/) | Energy | — | — | — | [Blog Post, Aug 2022](https://medium.com/voltus-engineering/migrating-kafka-to-amazon-msk-1f3a7d45b5f2) | -| [W3 Analytics](https://w3analytics.hottoshotto.com/) | Blockchain | Dashboards for NFT analytics | — | — | [Community Slack, July 2023](https://clickhousedb.slack.com/archives/CU170QE9H/p1689907164648339) | -| [WSPR Live](https://wspr.live/) | Software & Technology | WSPR Spot Data | — | — | [Twitter, April 2023](https://twitter.com/HB9VQQ/status/1652723207475015680) | -| [Waitlyst](https://waitlyst.co/) | Software & Technology | AI Customer Journey Management | — | — | [Twitter, June 2023](https://twitter.com/aaronkazah/status/1668261900554051585) | -| [Walmart Labs](https://www.walmartlabs.com/) | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) | -| [WanShanData](http://wanshandata.com/home) | Software & Technology | Main Product | — | — | [Meetup Slides in Chinese](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup56/wanshandata.pdf) | -| [Wargaming](https://wargaming.com/en/) | Games | | — | — | [Interview](https://habr.com/en/post/496954/) | -| [WebGazer](https://www.webgazer.io/) | Uptime Monitoring | Main Product | — | — | Community Slack, April 2022 | -| [WebScrapingHQ](https://www.webscrapinghq.com/) | Software & Technology | Web scraping API | — | — | [X, Novemeber 2024](https://x.com/harsh_maur/status/1862129151806968054) | -| [Weights & Biases](https://wandb.ai/site) | Software & Technology | LLM Monitoring | — | — | [Twitter, April 2024](https://github.com/user-attachments/files/17157064/Lukas.-.Clickhouse.pptx) | -| [Wildberries](https://www.wildberries.ru/) | E-commerce | | — | — | [Official website](https://it.wildberries.ru/) | -| [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Workato](https://www.workato.com/) | Automation Software | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=334) | -| [Wowza](https://www.wowza.com/) | Video Platform | Streaming Analytics | — | — | ClickHouse Cloud user | -| [Wundergraph](https://wundergraph.com/) | Software & Technology | API Platform | — | — | [Twitter, February 2023](https://twitter.com/dustindeus/status/1628757807913750531) | -| [Xata](https://xata.io/) | Software & Technology | SaaS observability dashboard | — | — | [Twitter, March 2024](https://x.com/tudor_g/status/1770517054971318656) | -| [Xenoss](https://xenoss.io/) | Martech, Adtech development | — | — | — | [Official website](https://xenoss.io/big-data-solution-development) | -| [Xiaoxin Tech](http://www.xiaoxintech.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| [Ximalaya](https://www.ximalaya.com/) | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| [YTsaurus](https://ytsaurus.tech/) | Distributed Storage and Processing | Main product | - | - | [Main website](https://ytsaurus.tech/) | -| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/datalens.pdf) | -| [Yandex Market](https://market.yandex.ru/) | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | -| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/introduction/#13) | -| [Yellowfin](https://www.yellowfinbi.com) | Analytics | Main product | - | - | [Integration](https://www.yellowfinbi.com/campaign/yellowfin-9-whats-new#el-30219e0e) | -| [Yotascale](https://www.yotascale.com/) | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) | -| [Your Analytics](https://www.your-analytics.org/) | Product Analytics | Main Product | — | - | [Twitter, November 2021](https://twitter.com/mikenikles/status/1459737241165565953) | -| [Zagrava Trading](https://zagravagames.com/en/) | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) | -| [Zappi](https://www.zappi.io/web/) | Software & Technology | Market Research | — | — | [Twitter Post, June 2024](https://x.com/HermanLangner/status/1805870318218580004)) | -| [Zerodha](https://zerodha.tech/) | Stock Broker | Logging | — | — | [Blog, March 2023](https://zerodha.tech/blog/logging-at-zerodha/) | -| [Zing Data](https://getzingdata.com/) | Software & Technology | Business Intelligence | — | — | [Blog, May 2023](https://clickhouse.com/blog/querying-clickhouse-on-your-phone-with-zing-data) | -| [Zipy](https://www.zipy.ai/) | Software & Technology | User session debug | — | — | [Blog, April 2023](https://www.zipy.ai/blog/deep-dive-into-clickhouse) | -| [Zomato](https://www.zomato.com/) | Online food ordering | Logging | — | — | [Blog, July 2023](https://www.zomato.com/blog/building-a-cost-effective-logging-platform-using-clickhouse-for-petabyte-scale) | -| [Zomato](https://www.zomato.com/ncr/golf-course-order-online) | Food & Beverage | Food Delivery | - | - | [Blog 2024](https://blog.zomato.com/building-a-cost-effective-logging-platform-using-clickhouse-for-petabyte-scale) | -| [Zoox](https://zoox.com/) | Software & Technology | Observability | - | - | [Job listing](https://www.linkedin.com/jobs/view/senior-software-engineer-observability-at-zoox-4139400247) | -| [АС "Стрела"](https://magenta-technology.ru/sistema-upravleniya-marshrutami-inkassacii-as-strela/) | Transportation | — | — | — | [Job posting, Jan 2022](https://vk.com/topic-111905078_35689124?post=3553) | -| [ДомКлик](https://domclick.ru/) | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) | -| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [ООО «МПЗ Богородский»](https://shop.okraina.ru/) | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) | -| [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| [ЦФТ](https://cft.ru/) | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) | -| [Цифровой Рабочий](https://promo.croc.ru/digitalworker) | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) | -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md deleted file mode 100644 index acf78abdcf1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: 'ベータ機能と実験的' -title: 'ベータおよび実験的機能' -description: 'ClickHouse にはベータおよび実験的機能があります。このドキュメントページでは定義について説明します。' -slug: '/beta-and-experimental-features' ---- - -Because ClickHouse is open-source, it receives many contributions not only from ClickHouse employees but also from the community. These contributions are often developed at different speeds; certain features may require a lengthy prototyping phase or more time for sufficient community feedback and iteration to be considered generally available (GA). - -Due to the uncertainty of when features are classified as generally available, we delineate features into two categories: **Beta** and **Experimental**. - -**Beta** features are officially supported by the ClickHouse team. **Experimental** features are early prototypes driven by either the ClickHouse team or the community and are not officially supported. - -The sections below explicitly describe the properties of **Beta** and **Experimental** features: - -## Beta Features {#beta-features} - -- 一般提供(GA)に向けて積極的に開発中です -- 主な既知の問題はGitHubで追跡できます -- 将来的に機能が変更される可能性があります -- ClickHouse Cloudで有効にされる可能性があります -- ClickHouseチームはベータ機能をサポートしています - -以下の機能はClickHouse Cloudにおいてベータと見なされ、現在「```allow_experimental_*```」というClickHouseの設定の下で使用可能ですが、これはClickHouse Cloud Servicesで利用することができます。 - -注意: 最近導入された機能を使用するには、ClickHouseの[互換性](/operations/settings/settings#compatibility)設定の最新バージョンを使用していることを確認してください。 - -## Experimental Features {#experimental-features} - -- GAになることは決してない可能性があります -- 削除される可能性があります -- 破壊的変更を引き起こす可能性があります -- 将来的に機能が変更される可能性があります -- 明示的に有効にする必要があります -- ClickHouseチームは**実験的な**機能をサポートしません -- 重要な機能や文書が欠ける可能性があります -- クラウドでは有効にできません - -注意: 上記にリストされたベータ以外の追加の実験的機能はClickHouse Cloudで有効にすることはできません。 - - - - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md.hash deleted file mode 100644 index 53bf427a151..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md.hash +++ /dev/null @@ -1 +0,0 @@ -27f4862a6f6bf7db diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md deleted file mode 100644 index a5389bcf2e6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: '/about-us/cloud' -sidebar_label: 'Cloud Service' -sidebar_position: 10 -description: 'ClickHouse Cloud' -title: 'ClickHouse Cloud' ---- - - - - -# ClickHouse Cloud - -ClickHouse Cloudは、人気のあるオープンソースOLAPデータベースClickHouseのオリジナル作成者が作成したクラウドオファリングです。 -[無料トライアルを開始する](https://console.clickhouse.cloud/signUp)ことでClickHouse Cloudを体験できます。 - -### ClickHouse Cloudの利点: {#clickhouse-cloud-benefits} - -ClickHouse Cloudを使用することのいくつかの利点は以下の通りです: - -- **迅速な価値提供**: クラスターのサイズを調整したりスケールを考えたりすることなく、すぐにビルディングを開始できます。 -- **シームレスなスケーリング**: 自動スケーリングは変動するワークロードに調整されるため、ピーク使用時の過剰プロビジョニングが不要です。 -- **サーバーレスオペレーション**: サイズ調整、スケーリング、安全性、信頼性、アップグレードを私たちが管理している間、リラックスできます。 -- **透明な価格設定**: 使用した分だけ支払うことができ、リソースの予約とスケーリングコントロールが利用できます。 -- **所有コストのトータルコスト**: 最適な価格/パフォーマンス比と低い管理オーバーヘッド。 -- **幅広いエコシステム**: お気に入りのデータコネクタ、ビジュアライゼーションツール、SQLおよび言語クライアントを持ち込むことができます。 - -### ClickHouse CloudはどのバージョンのClickHouseを使用していますか? {#what-version-of-clickhouse-does-clickhouse-cloud-use} - -ClickHouse Cloudは、サービスを継続的に新しいバージョンにアップグレードします。オープンソースでコアデータベースのバージョンを公開した後、クラウドステージング環境で追加の検証を行います。これは通常6~8週間かかり、その後本番環境に展開されます。展開はクラウドサービスプロバイダー、サービスタイプ、地域ごとに段階的に行われます。 - -私たちは、通常のリリーススケジュールの前に更新を受信するための「ファスト」リリースチャンネルを提供しています。詳細については、["ファストリリースチャンネル"](/manage/updates#fast-release-channel-early-upgrades)をご覧ください。 - -前のバージョンの機能に依存している場合は、場合によってはサービスの互換性設定を使用して以前の動作に戻すことができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md.hash deleted file mode 100644 index 132a2ec0d94..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md.hash +++ /dev/null @@ -1 +0,0 @@ -64a94b89cb11b49d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md deleted file mode 100644 index 4492983bf8d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -slug: '/about-us/distinctive-features' -sidebar_label: 'ClickHouseのユニークさ' -sidebar_position: 50 -description: '他のデータベース管理システムとは一線を画すClickHouseの特徴を理解する' -title: 'ClickHouseの特長' ---- - - - - -# ClickHouseの特長 - -## 真の列指向データベース管理システム {#true-column-oriented-database-management-system} - -真の列指向DBMSでは、値と一緒に余分なデータが保存されません。これは、定常長の値をサポートする必要があることを意味し、値の隣にその長さの「数」を保存しないようにします。例えば、10億のUInt8型の値は、圧縮されていない状態で約1GBを消費するべきであり、そうでないとCPUの使用に強く影響します。圧縮されていないデータのボリュームに依存しているため、データをコンパクトに(「ゴミ」なしで)保存することが重要です。 - -これは、異なるカラムの値を別々に保存できるが、HBase、Bigtable、Cassandra、Hypertableなど、他のシナリオの最適化により効果的に分析クエリを処理できないシステムとは対照的です。これらのシステムでは、毎秒約十万行のスループットが得られますが、毎秒数億行にはなりません。 - -最後に、ClickHouseはデータベース管理システムであり、単一のデータベースではありません。テーブルやデータベースを実行時に作成し、データをロードし、サーバーを再構成または再起動することなくクエリを実行できます。 - -## データ圧縮 {#data-compression} - -一部の列指向DBMSはデータ圧縮を使用しません。しかし、データ圧縮は優れたパフォーマンスを達成する上で重要な役割を果たします。 - -ディスクスペースとCPU消費の間で異なるトレードオフを持つ効率的な一般目的の圧縮コーデックに加えて、ClickHouseは特定の種類のデータ向けの[特化型コーデック](/sql-reference/statements/create/table.md#specialized-codecs)を提供しており、これによりClickHouseはニッチなデータベース、例えば時系列データベースに対抗し、優れたパフォーマンスを発揮します。 - -## ディスク上のデータストレージ {#disk-storage-of-data} - -主キーによって物理的にデータをソートすることで、特定の値または値の範囲に基づいて低レイテンシで、数十ミリ秒未満でデータを抽出できるようになります。一部の列指向DBMS、例えばSAP HANAやGoogle PowerDrillは、RAM内でのみ動作することができます。このアプローチは、リアルタイム分析のために必要なハードウェアの予算を超える要求を必要とします。 - -ClickHouseは通常のハードドライブで動作するように設計されており、これによりデータストレージあたりのGBのコストが低くなりますが、SSDや追加のRAMもあれば完全に活用されます。 - -## 複数コアでの並列処理 {#parallel-processing-on-multiple-cores} - -大規模なクエリは自然に並列化され、現在のサーバー上で利用可能なすべてのリソースが使用されます。 - -## 複数サーバーでの分散処理 {#distributed-processing-on-multiple-servers} - -上記の列指向DBMSのほとんどは、分散クエリ処理のサポートを持っていません。 - -ClickHouseでは、データは異なるシャードに存在できます。各シャードは、フォールトトレランスのために使用されるレプリカのグループである場合があります。すべてのシャードは、ユーザーに透過的にクエリを並列で実行するために使用されます。 - -## SQLサポート {#sql-support} - -ClickHouseは、ANSI SQL標準とほぼ互換性のある[SQL言語](/sql-reference/)をサポートしています。 - -サポートされているクエリには、[GROUP BY](../sql-reference/statements/select/group-by.md)、[ORDER BY](../sql-reference/statements/select/order-by.md)、[FROM](../sql-reference/statements/select/from.md)のサブクエリ、[JOIN](../sql-reference/statements/select/join.md)句、[IN](../sql-reference/operators/in.md)オペレーター、[ウィンドウ関数](../sql-reference/window-functions/index.md)、およびスカラーサブクエリが含まれます。 - -依存サブクエリは執筆時点ではサポートされていませんが、将来的に利用可能になる可能性があります。 - -## ベクトル計算エンジン {#vector-engine} - -データはカラムによってだけでなく、ベクトル(カラムの部分)によって処理され、これにより高いCPU効率が達成されます。 - -## リアルタイムデータ挿入 {#real-time-data-updates} - -ClickHouseは主キーを持つテーブルをサポートしています。主キーの範囲に対してクエリを迅速に実行するために、データはマージツリーを使用してインクリメンタルにソートされます。これにより、データをテーブルに継続的に追加できます。新しいデータを取り込む際にロックはかかりません。 - -## 主インデックス {#primary-index} - -主キーによって物理的にデータがソートされることにより、特定の値や値の範囲に基づいて低レイテンシで、数十ミリ秒未満でデータを抽出することが可能になります。 - -## 副インデックス {#secondary-indexes} - -他のデータベース管理システムとは異なり、ClickHouseの副インデックスは特定の行や行の範囲を指しません。その代わり、データの一部がクエリのフィルタ条件に一致しないすべての行を事前に知ることができるため、それらを全く読み込まず、これにより[データスキッピングインデックス](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes)と呼ばれます。 - -## オンラインクエリに適している {#suitable-for-online-queries} - -ほとんどのOLAPデータベース管理システムは、サブ秒レイテンシでのオンラインクエリを目指していません。代替システムでは、数十秒または数分のレポート構築時間が許容されることがよくあります。時には、さらに多くの時間がかかり、システムがオフラインでレポートを準備しなければならないことがあります(事前にまたは「後で戻ってくるように」と応答する形で)。 - -ClickHouseでは、「低レイテンシ」とは、クエリが遅延なく処理され、ユーザーインターフェースページが読み込み中のその瞬間に応答を事前に準備しようとせずに行われることを意味します。言い換えれば、オンラインです。 - -## 近似計算のサポート {#support-for-approximated-calculations} - -ClickHouseは、パフォーマンスのために精度を交換するためのさまざまな方法を提供します: - -1. 異なる値、中央値、パーセンタイルの数の近似計算のための集約関数。 -2. データの部分([SAMPLE](../sql-reference/statements/select/sample.md))に基づいてクエリを実行し、近似結果を得る。この場合、ディスクから取得するデータ量が比例的に少なくなります。 -3. すべてのキーの代わりに、限られた数のランダムキーに対して集約を実行します。データ内のキー分布に関する特定の条件下では、これにより合理的に正確な結果が得られ、より少ないリソースを使用します。 - -## 適応型結合アルゴリズム {#adaptive-join-algorithm} - -ClickHouseは、複数のテーブルを[JOIN](../sql-reference/statements/select/join.md)する方法を適応的に選択し、ハッシュ結合アルゴリズムを優先し、大きなテーブルが1つ以上ある場合はマージ結合アルゴリズムにフォールバックします。 - -## データレプリケーションとデータ整合性のサポート {#data-replication-and-data-integrity-support} - -ClickHouseは非同期のマルチマスターレプリケーションを使用しています。すべての利用可能なレプリカに書き込まれた後、残りのレプリカはバックグラウンドでそのコピーを取得します。システムは異なるレプリカ間で同一のデータを維持します。ほとんどの障害後の回復は自動的に、または複雑なケースでは半自動的に行われます。 - -詳細については、[データレプリケーション](../engines/table-engines/mergetree-family/replication.md)のセクションを参照してください。 - -## ロールベースのアクセス制御 {#role-based-access-control} - -ClickHouseは、SQLクエリを使用してユーザーアカウント管理を実装し、ANSI SQL標準や一般的なリレーショナルデータベース管理システムで見られるような[ロールベースのアクセス制御の設定](/guides/sre/user-management/index.md)を可能にします。 - -## 欠点と見なされる可能性のある機能 {#clickhouse-features-that-can-be-considered-disadvantages} - -1. 完全なトランザクションがない。 -2. 高い速度と低レイテンシで既に挿入されたデータを変更または削除する機能が欠如している。データをクリーンアップまたは修正するためのバッチ削除や更新が利用可能であり、例えば、[GDPR](https://gdpr-info.eu)に準拠するためにそれが必要です。 -3. スパースインデックスにより、ClickHouseはキーによって単一の行を取得するポイントクエリに対してそれほど効率的ではありません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md.hash deleted file mode 100644 index ea1c31ddac1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md.hash +++ /dev/null @@ -1 +0,0 @@ -f333560ccd89cca9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md deleted file mode 100644 index 23c23e0496f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -slug: '/about-us/history' -sidebar_label: 'ClickHouseの歴史' -sidebar_position: 40 -description: 'ClickHouse開発の歴史' -keywords: -- 'history' -- 'development' -- 'Metrica' -title: 'ClickHouseの歴史' ---- - - - - -# ClickHouseの歴史 {#clickhouse-history} - -ClickHouseは最初、[Yandex.Metrica](https://metrica.yandex.com/)を支えるために開発されました。[世界で2番目に大きなウェブ解析プラットフォーム](http://w3techs.com/technologies/overview/traffic_analysis/all)であり、今でもそのコアコンポーネントとなっています。データベースには130兆以上のレコードがあり、毎日200億以上のイベントを処理するClickHouseは、非集約データから直接カスタムレポートをリアルタイムで生成することができます。この文書では、ClickHouseの初期開発の目標について簡単に説明します。 - -Yandex.Metricaは、ユーザーが定義した任意のセグメントに基づいて、ヒット数やセッションに基づいたカスタマイズレポートをリアルタイムで構築します。これを行うためには、ユニークユーザー数のような複雑な集約を構築する必要があることが多く、新しいデータはレポート作成のためにリアルタイムで到着します。 - -2014年4月の時点で、Yandex.Metricaは1日あたり約120億のイベント(ページビューとクリック)を追跡していました。これらのすべてのイベントは、カスタムレポートを作成するために保存する必要がありました。単一のクエリは、数百ミリ秒内に100万行をスキャンする必要がある場合もあれば、数秒で数億行をスキャンする必要がある場合もありました。 - -## Yandex.Metricaおよびその他のYandexサービスでの使用 {#usage-in-yandex-metrica-and-other-yandex-services} - -ClickHouseはYandex.Metricaで多くの目的に使用されています。主なタスクは、非集約データを使用してオンラインモードでレポートを生成することです。20.3兆行以上をデータベースに保存している374台のサーバークラスターを使用しています。圧縮データのボリュームは約2PBで、重複やレプリカを考慮していません。非圧縮データ(TSV形式)のボリュームは約17PBに達します。 - -ClickHouseは以下のプロセスにも重要な役割を果たしています。 - -- Yandex.Metricaからのセッションリプレイのためのデータ保存。 -- 中間データの処理。 -- 分析によるグローバルレポートの構築。 -- Yandex.Metricaエンジンのデバッグ用クエリの実行。 -- APIおよびユーザーインターフェースからのログの分析。 - -現在、他のYandexサービスや部門にも数十件のClickHouseインストールがあります:検索部門、eコマース、広告、ビジネス分析、モバイル開発、個人サービスなど。 - -## 集約データと非集約データ {#aggregated-and-non-aggregated-data} - -統計を効果的に計算するには、データを集約する必要があるという広く親しまれている意見があります。これはデータのボリュームを減らすためです。 - -しかし、データ集約には多くの制約が伴います: - -- 必要なレポートの事前定義されたリストが必要です。 -- ユーザーはカスタムレポートを作成できません。 -- 多くの異なるキーで集約する場合、データ量はほとんど減少しないため、集約は無意味です。 -- レポートの数が多い場合、集約のバリエーションが多すぎ(組合せ爆発)ます。 -- 高いカーディナリティのキー(URLなど)を集約する場合、データのボリュームはあまり減少しません(2倍未満)。 -- このため、集約によってデータのボリュームが縮小するのではなく、増加する可能性があります。 -- ユーザーは私たちが生成するすべてのレポートを表示しません。その計算の多くは無意味です。 -- 様々な集約によってデータの論理的整合性が損なわれる可能性があります。 - -何も集約せず、非集約データで作業する場合、計算量を減少させることができます。 - -しかし、集約を行うことで、大部分の作業がオフラインで行われ、比較的落ち着いて完了します。対照的にオンライン計算は、ユーザーが結果を待っているため、できるだけ早く計算する必要があります。 - -Yandex.Metricaには、Metrageというデータを集約するための専門システムがあり、大部分のレポートで使用されていました。2009年からは、非集約データ用の専門OLAPデータベースOLAPServerも使用されており、これは以前はレポートビルダーに使用されていました。OLAPServerは非集約データに対してはうまく機能しましたが、すべてのレポートに使用することを妨げる多くの制約がありました。これには、データ型へのサポートがない(数字のみ)ことや、リアルタイムでデータを逐次更新することができない(毎日のデータ書き換えにしかできなかった)ことが含まれています。OLAPServerはDBMSではなく、専門DBです。 - -ClickHouseの初期の目標は、OLAPServerの制約を取り除き、すべてのレポートに対して非集約データで作業する問題を解決することでしたが、年月が経つにつれて、さまざまな分析タスクに適した汎用データベース管理システムに成長しました。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md.hash deleted file mode 100644 index 2b7686634a4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/history.md.hash +++ /dev/null @@ -1 +0,0 @@ -753e5b10b6147038 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md deleted file mode 100644 index b494f41a785..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -slug: '/about' -title: 'ClickHouseについて' -description: 'ClickHouseに関するランディングページ' ---- - - - - -# About ClickHouse - -このセクションのドキュメントでは、ClickHouseに関する情報を見つけることができます。以下の目次を参照して、このセクションのページのリストをご覧ください。 - -| ページ | 説明 | -|------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [What is ClickHouse](/about-clickhouse) | ClickHouseのコア機能、アーキテクチャ、使用例を紹介し、新しいユーザーのための簡潔な概要を提供します。 | -| [Adopters](/about-us/adopters) | ClickHouseを使用している企業とその成功事例を、公開ソースから集めたリストです。 | -| [Support](/about-us/support) | ClickHouse Cloud Support Servicesの紹介とその使命について。 | -| [Beta Features and Experimental](/beta-and-experimental-features) | ClickHouseが「ベータ」と「実験的」ラベルを使用して、公式にサポートされた機能と、開発スピードの違いからコミュニティの貢献による初期段階のサポートされていない機能を区別する方法について学びます。 | -| [Cloud Service](/about-us/cloud) | 完全に管理されたサービスであるClickHouse Cloudを発見しましょう。これにより、ユーザーはオープンソースのClickHouseデータベースを立ち上げることができ、迅速な価値提供、シームレスなスケーリング、およびサーバーレス操作のような利点が得られます。 | -| [ClickHouse History](/about-us/history) | ClickHouseの歴史についてさらに学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md.hash deleted file mode 100644 index 4f5c81c5947..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -b0fc519f7f2a8240 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx deleted file mode 100644 index 39fb9847aec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -'slug': '/about-clickhouse' -'sidebar_label': 'What is ClickHouse?' -'title': 'What is ClickHouse?' -'description': 'Page describing what ClickHouse is' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/intro.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx.hash deleted file mode 100644 index b2069d85ea6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/intro.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -0674b032dcd00ad5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md deleted file mode 100644 index 988813b2b0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -slug: '/about-us/support' -sidebar_label: 'サポート' -title: 'ClickHouseクラウドサポートサービス' -sidebar_position: 30 -description: 'ClickHouseクラウドサポートサービスに関する情報' ---- - - - - -# ClickHouse Cloud サポートサービス - -ClickHouseは、当社のClickHouse Cloudユーザーおよび顧客向けにサポートサービスを提供しています。私たちの目標は、ClickHouse製品を代表するサポートサービスチームを構築することであり、比類のないパフォーマンス、使いやすさ、そして非常に迅速で高品質な結果を提供することです。詳細については、[ClickHouseサポートプログラム](https://clickhouse.com/support/program/)ページをご覧ください。 - -[Cloudコンソールにログイン](https://console.clickhouse.cloud/support)し、メニューオプションから**ヘルプ -> サポート**を選択して新しいサポートケースを開き、提出したケースのステータスを確認できます。 - -また、当社の[ステータスページ](https://status.clickhouse.com)に登録することで、プラットフォームに影響を与えるインシデントについて迅速に通知を受けることができます。 - -:::note -サポートインシデントに関するサービスレベル契約(SLA)は、サブスクリプション顧客のみが対象であることに注意してください。現在ClickHouse Cloudユーザーでない場合は、質問に答えるよう努めますが、代わりに以下のコミュニティリソースにアクセスすることをお勧めします: - -- [ClickHouseコミュニティSlackチャンネル](https://clickhouse.com/slack) -- [その他のコミュニティオプション](https://github.com/ClickHouse/ClickHouse/blob/master/README.md#useful-links) -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md.hash deleted file mode 100644 index b21c52b2971..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md.hash +++ /dev/null @@ -1 +0,0 @@ -6647ef4883598877 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md b/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md deleted file mode 100644 index a7fa01ae4ab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -slug: '/architecture/cluster-deployment' -sidebar_label: 'クラスター展開' -sidebar_position: 100 -title: 'クラスターの展開' -description: 'このチュートリアルを通じて、簡単なClickHouseクラスターの設定方法を学ぶことができます。' ---- - - - -このチュートリアルでは、[ローカル ClickHouse サーバー](../getting-started/install/install.mdx)が既にセットアップされている前提です。 - -このチュートリアルを通じて、シンプルな ClickHouse クラスターのセットアップ方法を学びます。小規模ですが、フォールトトレラントでスケーラブルです。その後、サンプルデータセットの1つを使用してデータを埋め込み、いくつかのデモクエリを実行します。 - -## クラスター展開 {#cluster-deployment} - -この ClickHouse クラスターは均質なクラスターになります。手順は以下の通りです: - -1. クラスター内のすべてのマシンに ClickHouse サーバーをインストールします -2. 設定ファイル内でクラスターの設定を行います -3. 各インスタンスにローカルテーブルを作成します -4. [分散テーブル](../engines/table-engines/special/distributed.md)を作成します - -[分散テーブル](../engines/table-engines/special/distributed.md)は、ClickHouse クラスター内のローカルテーブルへの「ビュー」の一種です。分散テーブルからの SELECT クエリは、クラスター内のすべてのシャードのリソースを使用して実行されます。複数のクラスターに対して設定を指定し、異なるクラスターのビューを提供するために複数の分散テーブルを作成することができます。 - -以下は、1つのレプリカを持つ3つのシャードからなるクラスターの設定例です: - -```xml - - - - - example-perftest01j.clickhouse.com - 9000 - - - - - example-perftest02j.clickhouse.com - 9000 - - - - - example-perftest03j.clickhouse.com - 9000 - - - - -``` - -さらにデモを行うために、シングルノード展開チュートリアルで使用した`CREATE TABLE`クエリと同じクエリで、異なるテーブル名で新しいローカルテーブルを作成します: - -```sql -CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... -``` - -分散テーブルを作成することで、クラスターのローカルテーブルへのビューを提供します: - -```sql -CREATE TABLE tutorial.hits_all AS tutorial.hits_local -ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); -``` - -クラスター内のすべてのマシンに同様の分散テーブルを作成するのは一般的な手法です。これにより、クラスターの任意のマシン上で分散クエリを実行できます。また、特定の SELECT クエリのために[remote](../sql-reference/table-functions/remote.md)テーブル関数を使用して一時的な分散テーブルを作成する代替オプションもあります。 - -分散テーブルにデータを広めるために、[INSERT SELECT](../sql-reference/statements/insert-into.md)を実行しましょう。 - -```sql -INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; -``` - -予想通り、計算的に重いクエリは1台のサーバーの代わりに3台のサーバーを利用する場合、N倍速く実行されます。 - -この場合、3つのシャードを持つクラスターを使用しており、各シャードには単一のレプリカが含まれています。 - -本番環境での耐障害性を提供するために、各シャードには2〜3のレプリカを持たせることを推奨します。これらのレプリカは、複数のアベイラビリティゾーンまたはデータセンター(あるいは少なくともラック)に分散させるべきです。ClickHouseは無制限の数のレプリカをサポートしていることに注意してください。 - -以下は、3つのレプリカを持つ1つのシャードからなるクラスターの設定例です: - -```xml - - ... - - - - example-perftest01j.clickhouse.com - 9000 - - - example-perftest02j.clickhouse.com - 9000 - - - example-perftest03j.clickhouse.com - 9000 - - - - -``` - -ネイティブレプリケーションを有効にするには、[ZooKeeper](http://zookeeper.apache.org/)が必要です。ClickHouseはすべてのレプリカでデータの整合性を確保し、障害後に自動的に復元手順を実行します。ZooKeeper クラスターは、他のプロセス(ClickHouseを含む)が稼働していない専用サーバーに展開することを推奨します。 - -:::note 注 -ZooKeeperは厳密な要件ではありません:単純な場合には、アプリケーションコードからすべてのレプリカにデータを書き込むことでデータを複製することができます。このアプローチは**推奨されません**。なぜなら、この場合 ClickHouse はすべてのレプリカでデータの整合性を保証できず、したがってアプリケーションの責任となるからです。 -::: - -ZooKeeperの位置は、設定ファイル内で指定します: - -```xml - - - zoo01.clickhouse.com - 2181 - - - zoo02.clickhouse.com - 2181 - - - zoo03.clickhouse.com - 2181 - - -``` - -また、シャードとレプリカを識別するためのマクロを設定する必要があります。これらはテーブルの作成時に使用されます: - -```xml - - 01 - 01 - -``` - -レプリケーションテーブル作成時にレプリカが存在しない場合、新しい最初のレプリカがインスタンス化されます。すでにライブレプリカがある場合は、新しいレプリカが既存のものからデータをクローンします。すべてのレプリケーションテーブルを先に作成し、その後でデータを挿入することができます。また、いくつかのレプリカを作成し、他をデータ挿入中またはその後に追加するオプションもあります。 - -```sql -CREATE TABLE tutorial.hits_replica (...) -ENGINE = ReplicatedMergeTree( - '/clickhouse_perftest/tables/{shard}/hits', - '{replica}' -) -... -``` - -ここでは、[ReplicatedMergeTree](../engines/table-engines/mergetree-family/replication.md)テーブルエンジンを使用しています。パラメータには、シャードおよびレプリカ識別子を含むZooKeeperパスを指定します。 - -```sql -INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; -``` - -レプリケーションはマルチマスターモードで行われます。データは任意のレプリカにロードでき、システムは自動的に他のインスタンスと同期します。レプリケーションは非同期であるため、特定の時点で全てのレプリカが最近挿入されたデータを含まない場合があります。データインジェクションを行うためには、少なくとも1つのレプリカが稼働している必要があります。他のレプリカは、再びアクティブになった際にデータを同期し、整合性を修復します。このアプローチは、最近挿入されたデータの損失の可能性を低く保つことができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md.hash deleted file mode 100644 index 521afde22d1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/architecture/cluster-deployment.md.hash +++ /dev/null @@ -1 +0,0 @@ -e77d221d30def930 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md deleted file mode 100644 index 8d9fa79060d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{} ---- - -import Image from '@theme/IdealImage'; -import async_inserts from '@site/static/images/bestpractices/async_inserts.png'; - -Asynchronous inserts in ClickHouse provide a powerful alternative when client-side batching isn't feasible. This is especially valuable in observability workloads, where hundreds or thousands of agents send data continuously - logs, metrics, traces - often in small, real-time payloads. Buffering data client-side in these environments increases complexity, requiring a centralized queue to ensure sufficiently large batches can be sent. - -:::note -多くの小さなバッチを同期モードで送信することは推奨されません。これは多くのパーツが作成されることにつながります。これにより、クエリのパフォーマンスが低下し、["too many part"](/knowledgebase/exception-too-many-parts) エラーが発生します。 -::: - -Asynchronous inserts shift batching responsibility from the client to the server by writing incoming data to an in-memory buffer, then flushing it to storage based on configurable thresholds. This approach significantly reduces part creation overhead, lowers CPU usage, and ensures ingestion remains efficient - even under high concurrency. - -The core behavior is controlled via the [`async_insert`](/operations/settings/settings#async_insert) setting. - -Async inserts - -When enabled (1), inserts are buffered and only written to disk once one of the flush conditions is met: - -(1) the buffer reaches a specified size (async_insert_max_data_size) -(2) a time threshold elapses (async_insert_busy_timeout_ms) or -(3) a maximum number of insert queries accumulate (async_insert_max_query_number). - -This batching process is invisible to clients and helps ClickHouse efficiently merge insert traffic from multiple sources. However, until a flush occurs, the data cannot be queried. Importantly, there are multiple buffers per insert shape and settings combination, and in clusters, buffers are maintained per node - enabling fine-grained control across multi-tenant environments. Insert mechanics are otherwise identical to those described for [synchronous inserts](/best-practices/selecting-an-insert-strategy#synchronous-inserts-by-default). - -### Choosing a Return Mode {#choosing-a-return-mode} - -The behavior of asynchronous inserts is further refined using the [`wait_for_async_insert`](/operations/settings/settings#wait_for_async_insert) setting. - -When set to 1 (the default), ClickHouse only acknowledges the insert after the data is successfully flushed to disk. This ensures strong durability guarantees and makes error handling straightforward: if something goes wrong during the flush, the error is returned to the client. This mode is recommended for most production scenarios, especially when insert failures must be tracked reliably. - -[Benchmarks](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse) show it scales well with concurrency - whether you're running 200 or 500 clients- thanks to adaptive inserts and stable part creation behavior. - -Setting `wait_for_async_insert = 0` enables "fire-and-forget" mode. Here, the server acknowledges the insert as soon as the data is buffered, without waiting for it to reach storage. - -This offers ultra-low-latency inserts and maximal throughput, ideal for high-velocity, low-criticality data. However, this comes with trade-offs: there's no guarantee the data will be persisted, errors may only surface during flush, and it's difficult to trace failed inserts. Use this mode only if your workload can tolerate data loss. - -[Benchmarks also demonstrate](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse) substantial part reduction and lower CPU usage when buffer flushes are infrequent (e.g. every 30 seconds), but the risk of silent failure remains. - -Our strong recommendation is to use `async_insert=1,wait_for_async_insert=1` if using asynchronous inserts. Using `wait_for_async_insert=0` is very risky because your INSERT client may not be aware if there are errors, and also can cause potential overload if your client continues to write quickly in a situation where the ClickHouse server needs to slow down the writes and create some backpressure in order to ensure reliability of the service. - -### Deduplication and reliability {#deduplication-and-reliability} - -By default, ClickHouse performs automatic deduplication for synchronous inserts, which makes retries safe in failure scenarios. However, this is disabled for asynchronous inserts unless explicitly enabled (this should not be enabled if you have dependent materialized views - [see issue](https://github.com/ClickHouse/ClickHouse/issues/66003)). - -In practice, if deduplication is turned on and the same insert is retried - due to, for instance, a timeout or network drop - ClickHouse can safely ignore the duplicate. This helps maintain idempotency and avoids double-writing data. Still, it's worth noting that insert validation and schema parsing happen only during buffer flush - so errors (like type mismatches) will only surface at that point. - -### Enabling asynchronous inserts {#enabling-asynchronous-inserts} - -Asynchronous inserts can be enabled for a particular user, or for a specific query: - -- Enabling asynchronous inserts at the user level. This example uses the user `default`, if you create a different user then substitute that username: - ```sql - ALTER USER default SETTINGS async_insert = 1 - ``` -- You can specify the asynchronous insert settings by using the SETTINGS clause of insert queries: - ```sql - INSERT INTO YourTable SETTINGS async_insert=1, wait_for_async_insert=1 VALUES (...) - ``` -- You can also specify asynchronous insert settings as connection parameters when using a ClickHouse programming language client. - - As an example, this is how you can do that within a JDBC connection string when you use the ClickHouse Java JDBC driver for connecting to ClickHouse Cloud: - ```bash - "jdbc:ch://HOST.clickhouse.cloud:8443/?user=default&password=PASSWORD&ssl=true&custom_http_params=async_insert=1,wait_for_async_insert=1" - ``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md.hash deleted file mode 100644 index 09328f918dd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -6573683eec17fe67 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md deleted file mode 100644 index 0a11610d575..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -{} ---- - - - -In ClickHouseでは、**ミューテーション**はテーブル内の既存データを変更または削除する操作を指します - 通常は `ALTER TABLE ... DELETE` または `ALTER TABLE ... UPDATE` を使用します。これらのステートメントは標準SQL操作に似ているように見えるかもしれませんが、内部では根本的に異なります。 - -ClickHouseのミューテーションは、行を直接変更するのではなく、変更の影響を受ける全ての[データパーツ](/parts)を再書き込みする非同期のバックグラウンドプロセスです。このアプローチはClickHouseの列指向で不変のストレージモデルに必要ですが、I/Oやリソース消費が大きくなる可能性があります。 - -ミューテーションが発行されると、ClickHouseは新しい**ミューテーションパーツ**の作成をスケジュールし、元のパーツは新しいものが準備されるまで手を付けません。新しいものが準備が整うと、ミューテーションパーツが元のものと原子的に置き換えられます。しかし、全体のパーツを書き換える操作であるため、わずかな変更(例えば単一行の更新)でも大規模な書き直しや過剰な書き込み増幅を引き起こすことがあります。 - -大規模なデータセットでは、これはディスクI/Oの大幅なスパイクを生じ、全体のクラスターのパフォーマンスを低下させる可能性があります。マージとは異なり、ミューテーションは一度提出されるとロールバックできず、明示的にキャンセルしない限りサーバーの再起動後も実行され続けます - [`KILL MUTATION`](/sql-reference/statements/kill#kill-mutation)を参照してください。 - -ミューテーションは**完全に順序付けられています**: それはミューテーションが発行される前に挿入されたデータに適用され、新しいデータには影響を与えません。挿入をブロックすることはありませんが、他の進行中のクエリと重なる可能性があります。ミューテーション中に実行されるSELECTは、ミューテーションされた部分とミューテーションされていない部分の組み合わせを読み取ることがあり、実行中にデータの不整合なビューを引き起こすことがあります。ClickHouseは部分ごとにミューテーションを並行して実行するため、特に複雑なサブクエリ(例えば x IN (SELECT ...))が関与している場合、メモリやCPUの使用がさらに強化されることがあります。 - -一般的に、**頻繁または大規模なミューテーションは避けてください**、特に高ボリュームのテーブルでは。代わりに、[ReplacingMergeTree](/guides/replacing-merge-tree) や [CollapsingMergeTree](/engines/table-engines/mergetree-family/collapsingmergetree) などの代替テーブルエンジンを使用し、クエリ時やマージ時にデータ修正をより効率的に処理できるようにしてください。ミューテーションが絶対に必要な場合は、system.mutationsテーブルを使用して注意深く監視し、プロセスがスタックしたり動作が不安定な場合には `KILL MUTATION` を使用してください。ミューテーションの誤用は、パフォーマンスの低下や過剰なストレージの消費、潜在的なサービスの不安定性を引き起こす可能性がありますので、注意して稀に適用してください。 - -データを削除するために、ユーザーは[軽量削除](/guides/developer/lightweight-delete)や[パーティション](/best-practices/choosing-a-partitioning-key)を介したデータの管理を考慮することもでき、これにより全体のパーツを[効率的にドロップ](/sql-reference/statements/alter/partition#drop-partitionpart)できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md.hash deleted file mode 100644 index be97b05c41a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md.hash +++ /dev/null @@ -1 +0,0 @@ -de2de59ef8e92d20 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md deleted file mode 100644 index e3c85a5c375..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -{} ---- - - - -[`Nullable` カラム](/sql-reference/data-types/nullable/) (例: `Nullable(String)`) は `UInt8` 型の別のカラムを作成します。この追加のカラムは、ユーザーが Nullable カラムを操作するたびに処理される必要があります。これにより追加のストレージスペースが使用され、ほぼ常にパフォーマンスに悪影響を与えます。 - -`Nullable` カラムを避けるために、そのカラムにデフォルト値を設定することを検討してください。例えば、次の代わりに: - -```sql -CREATE TABLE default.sample -( - `x` Int8, - -- highlight-next-line - `y` Nullable(Int8) -) -ENGINE = MergeTree -ORDER BY x -``` -次のように使用します: - -```sql -CREATE TABLE default.sample2 -( - `x` Int8, - -- highlight-next-line - `y` Int8 DEFAULT 0 -) -ENGINE = MergeTree -ORDER BY x -``` - -あなたのユースケースを考慮すると、デフォルト値が不適切な場合もあります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md.hash deleted file mode 100644 index a580055b2bf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md.hash +++ /dev/null @@ -1 +0,0 @@ -f3e9e97e08f2ff41 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md deleted file mode 100644 index b1ca2a8dda6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{} ---- - -import Image from '@theme/IdealImage'; -import simple_merges from '@site/static/images/bestpractices/simple_merges.png'; - -ClickHouse テーブルは **MergeTree エンジン** を使用して、ディスク上に **不変のパーツ** としてデータを保存します。これは、データが挿入されるたびに作成されます。 - -各挿入は、インデックスやチェックサムなどのメタデータとともに、ソートされた圧縮カラムファイルを含む新しいパーツを作成します。パーツの構造と形成方法についての詳細な説明については、この [ガイド](/parts) をお勧めします。 - -時間が経つにつれて、バックグラウンドプロセスが小さなパーツを大きなパーツにマージして、断片化を減らし、クエリパフォーマンスを向上させます。 - -Simple merges - -次のコマンドを使用して、手動でこのマージをトリガーしたくなるかもしれませんが: - -```sql -OPTIMIZE TABLE FINAL; -``` - -**ほとんどの場合、この操作は避けるべきです**。なぜなら、これはリソース集約的な操作を開始し、クラスターのパフォーマンスに影響を与える可能性があるからです。 - -## なぜ避けるべきか? {#why-avoid} - -### 高コストである {#its-expensive} - -`OPTIMIZE FINAL` を実行すると、ClickHouse は **すべての** アクティブなパーツを **単一のパーツ** にマージすることを強制します。これは、すでに大きなマージが行われている場合でも行われます。これには以下が含まれます: - -1. **すべてのパーツの解凍** -2. **データのマージ** -3. **再圧縮** -4. **最終パーツをディスクやオブジェクトストレージに書き込む** - -これらのステップは **CPU と I/O 集約型** であり、大規模なデータセットが関与する場合、システムに大きな負担をかける可能性があります。 - -### 安全制限を無視する {#it-ignores-safety-limits} - -通常、ClickHouse は ~150 GB より大きいパーツのマージを避けます(これは [max_bytes_to_merge_at_max_space_in_pool](/operations/settings/merge-tree-settings#max_bytes_to_merge_at_max_space_in_pool) を介して設定可能です)。しかし、`OPTIMIZE FINAL` は **この保護機能を無視します**。これは以下を意味します: - -* **複数の 150 GB パーツ** を1つの巨大なパーツにマージしようとする可能性があります。 -* これにより **長いマージ時間**、**メモリプレッシャー**、さらには **メモリエラー** が発生する可能性があります。 -* これらの大きなパーツはマージが困難になる可能性があり、上記の理由によりそれらをさらにマージしようとする試みが失敗します。クエリの時間の正しい動作のためにマージが必要な場合、これは望ましくない結果をもたらす可能性があります。例えば、[ReplacingMergeTree 用の重複排除](/guides/developer/deduplication#using-replacingmergetree-for-upserts) により、クエリのパフォーマンスが低下することがあります。 - -## バックグラウンドマージに作業を任せる {#let-background-merges-do-the-work} - -ClickHouse はすでにストレージとクエリの効率を最適化するために、スマートなバックグラウンドマージを実行しています。これらは段階的で、リソースを考慮し、設定された閾値を尊重します。非常に特定のニーズがない限り(例:テーブルの凍結前にデータを確定する、またはエクスポートするなど)、**ClickHouse にマージを自動的に管理させる方が良いです**。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md.hash deleted file mode 100644 index 28c1a7ccb2b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md.hash +++ /dev/null @@ -1 +0,0 @@ -e39ef18c811b7519 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md deleted file mode 100644 index 9f7f0511084..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -{} ---- - - - -The above mechanics illustrate a constant overhead regardless of the insert size, making batch size the single most important optimization for ingest throughput. Batching inserts reduce the overhead as a proportion of total insert time and improves processing efficiency. - -We recommend inserting data in batches of at least 1,000 rows, and ideally between 10,000–100,000 rows. Fewer, larger inserts reduce the number of parts written, minimize merge load, and lower overall system resource usage. - -**For a synchronous insert strategy to be effective this client-side batching is required.** - -If you're unable to batch data client-side, ClickHouse supports asynchronous inserts that shift batching to the server ([see](/best-practices/selecting-an-insert-strategy#asynchronous-inserts)). - -:::tip -Regardless of the size of your inserts, we recommend keeping the number of insert queries around one insert query per second. The reason for that recommendation is that the created parts are merged to larger parts in the background (in order to optimize your data for read queries), and sending too many insert queries per second can lead to situations where the background merging can't keep up with the number of new parts. However, you can use a higher rate of insert queries per second when you use asynchronous inserts (see asynchronous inserts). -::: - -上記のメカニズムは、挿入サイズに関係なく一定のオーバーヘッドを示しており、バッチサイズがインジェストスループットの最も重要な最適化要素であることを示しています。バッチ挿入は、全体の挿入時間に対するオーバーヘッドを減少させ、処理効率を向上させます。 - -データは、少なくとも1,000行のバッチで挿入することを推奨し、理想的には10,000〜100,000行の間で行うべきです。少ない大きな挿入は、書き込まれるパーツの数を減少させ、マージ負荷を最小化し、全体的なシステムリソースの使用を低下させます。 - -**同期挿入戦略が効果的に機能するためには、このクライアント側のバッチ処理が必要です。** - -クライアント側でデータをバッチ処理できない場合、ClickHouseはバッチ処理をサーバーに移す非同期挿入をサポートしています([参照](/best-practices/selecting-an-insert-strategy#asynchronous-inserts))。 - -:::tip -挿入のサイズに関係なく、挿入クエリの数を約1秒あたり1つの挿入クエリに保つことを推奨します。この推奨の理由は、作成されたパーツがバックグラウンドでより大きなパーツにマージされるため(読み取りクエリ用にデータを最適化するため)、1秒あたりに挿入クエリを送信しすぎると、バックグラウンドのマージが新しいパーツの数に追いつけない状況が発生する可能性があるからです。ただし、非同期挿入を使用する場合は、1秒あたりの挿入クエリの頻度を高めることができます(非同期挿入を参照)。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md.hash deleted file mode 100644 index d6eb64272f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -aad7e65192820e16 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md deleted file mode 100644 index 12b8bc75687..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/best-practices/avoid-mutations' -sidebar_position: 10 -sidebar_label: '変更を避ける' -title: '変更を避ける' -description: 'ClickHouse で変更を避ける理由について説明したページ' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md.hash deleted file mode 100644 index 25f41e51420..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_mutations.md.hash +++ /dev/null @@ -1 +0,0 @@ -86dca8ebdada218c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md deleted file mode 100644 index 97458bf9650..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/best-practices/avoid-optimize-final' -sidebar_position: 10 -sidebar_label: 'Optimize Finalを避ける' -title: 'Optimize Finalを避ける' -description: 'ClickHouse で Optimize Final を避ける理由を説明するページ' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md.hash deleted file mode 100644 index eb6e708657a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/avoid_optimize_final.md.hash +++ /dev/null @@ -1 +0,0 @@ -659fa75fc37be43e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md deleted file mode 100644 index dd3e0531668..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -slug: '/best-practices/choosing-a-primary-key' -sidebar_position: 10 -sidebar_label: '主キーの選択' -title: '主キーの選択' -description: 'ClickHouse で主キーを選択する方法について説明したページ' ---- - -import Image from '@theme/IdealImage'; -import create_primary_key from '@site/static/images/bestpractices/create_primary_key.gif'; -import primary_key from '@site/static/images/bestpractices/primary_key.gif'; - -> このページでは、「ordering key」という用語を「primary key」と同義で使用します。厳密には[ClickHouseではこれらは異なります](/engines/table-engines/mergetree-family/mergetree#choosing-a-primary-key-that-differs-from-the-sorting-key)が、本書の目的においては、読者はこれを同義として扱うことができ、ordering keyは`ORDER BY`で指定されたカラムを指します。 - -ClickHouseの主キーは、PostgresのようなOLTPデータベースでの類似の用語に慣れている人には[非常に異なります](/migrations/postgresql/data-modeling-techniques#primary-ordering-keys-in-clickhouse)。 - -ClickHouseで効果的な主キーを選択することは、クエリのパフォーマンスとストレージ効率にとって非常に重要です。ClickHouseはデータをパーツに分けて管理し、それぞれに独自のスパース主インデックスを持たせます。このインデックスはスキャンするデータの量を減少させることにより、クエリを大幅に高速化します。さらに、主キーはディスク上のデータの物理的な順序を決定するため、圧縮効率にも直接影響します。最適に順序付けられたデータはより効果的に圧縮され、I/Oを減らすことでさらなるパフォーマンス向上を図ります。 - -1. ordering keyを選択する際は、クエリフィルター(つまり`WHERE`句)で頻繁に使用されるカラムを優先します。特に、大量の行を除外するカラムが重要です。 -2. テーブル内の他のデータと高い相関があるカラムも有益で、連続的なストレージが圧縮率とメモリ効率を改善します、特に`GROUP BY`や`ORDER BY`操作中に。 - -
-ordering keyを選定する際に適用できる簡単なルールがあります。以下の項目は時に対立する可能性があるため、順番に考慮してください。**ユーザーはこのプロセスからいくつかのキーを特定でき、通常は4-5個で十分です**。 - -:::note 念のため -ordering keyはテーブル作成時に定義する必要があり、後から追加することはできません。追加のorderingは、データ挿入後(または前)にプロジェクションとして知られる機能を用いてテーブルに追加できます。この結果、データの重複が生じることに注意してください。詳細については[こちら](/sql-reference/statements/alter/projection)を参照してください。 -::: - -## 例 {#example} - -以下の`posts_unordered`テーブルを考察してください。これはStack Overflowの各ポストに対して1行を持ちます。 - -このテーブルには主キーがありません - `ORDER BY tuple()`で示されています。 - -```sql -CREATE TABLE posts_unordered -( - `Id` Int32, - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, - 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime, - `Score` Int32, - `ViewCount` UInt32, - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime, - `LastActivityDate` DateTime, - `Title` String, - `Tags` String, - `AnswerCount` UInt16, - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime, - `ClosedDate` DateTime -) -ENGINE = MergeTree -ORDER BY tuple() -``` - -ユーザーが2024年以降に提出された質問の数を計算することを希望していると仮定しましょう。これは彼らの最も一般的なアクセスパターンを表しています。 - -```sql -SELECT count() -FROM stackoverflow.posts_unordered -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') - -┌─count()─┐ -│ 192611 │ -└─────────┘ ---highlight-next-line -1 row in set. Elapsed: 0.055 sec. Processed 59.82 million rows, 361.34 MB (1.09 billion rows/s., 6.61 GB/s.) -``` - -このクエリによって読み取られた行数とバイト数に注意してください。主キーがないため、クエリはデータセット全体をスキャンする必要があります。 - -`EXPLAIN indexes=1`を使用すると、インデックスの不足によりフルテーブルスキャンであることが確認されています。 - -```sql -EXPLAIN indexes = 1 -SELECT count() -FROM stackoverflow.posts_unordered -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') - -┌─explain───────────────────────────────────────────────────┐ -│ Expression ((Project names + Projection)) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ Expression │ -│ ReadFromMergeTree (stackoverflow.posts_unordered) │ -└───────────────────────────────────────────────────────────┘ - -5 rows in set. Elapsed: 0.003 sec. -``` - -もし`posts_ordered`というテーブルが、同じデータを持ち、`ORDER BY`が`(PostTypeId, toDate(CreationDate))`として定義されていると仮定します。 - -```sql -CREATE TABLE posts_ordered -( - `Id` Int32, - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, - 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), -... -) -ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate)) -``` - -`PostTypeId`は8のカーディナリティを持ち、我们のordering keyの最初のエントリとして論理的に選ばれるべきです。日付の粒度フィルタリングが十分であると認識されるため(それでもdatetimeフィルターには有利である)、`toDate(CreationDate)`を私たちのキーの第2コンポーネントとして使用します。これにより、日付が16ビットで表現できるため、インデックスが小さくなり、フィルター処理が速くなります。 - -以下のアニメーションは、Stack Overflowポストテーブルのために最適化されたスパース主インデックスがどのように作成されるかを示しています。個々の行をインデックス化するのではなく、行のブロックをターゲットにします: - - - -同じクエリがこのordering keyを持つテーブルで繰り返される場合: - -```sql -SELECT count() -FROM stackoverflow.posts_ordered -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') - -┌─count()─┐ -│ 192611 │ -└─────────┘ ---highlight-next-line -1 row in set. Elapsed: 0.013 sec. Processed 196.53 thousand rows, 1.77 MB (14.64 million rows/s., 131.78 MB/s.) -``` - -このクエリは今やスパースインデックスを利用し、読み取られるデータ量を大幅に減少させ、実行時間を4倍に短縮します - 読み取られた行数とバイト数の減少に注目してください。 - -インデックスの使用は`EXPLAIN indexes=1`で確認できます。 - -```sql -EXPLAIN indexes = 1 -SELECT count() -FROM stackoverflow.posts_ordered -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') - -┌─explain─────────────────────────────────────────────────────────────────────────────────────┐ -│ Expression ((Project names + Projection)) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ Expression │ -│ ReadFromMergeTree (stackoverflow.posts_ordered) │ -│ Indexes: │ -│ PrimaryKey │ -│ Keys: │ -│ PostTypeId │ -│ toDate(CreationDate) │ -│ Condition: and((PostTypeId in [1, 1]), (toDate(CreationDate) in [19723, +Inf))) │ -│ Parts: 14/14 │ -│ Granules: 39/7578 │ -└─────────────────────────────────────────────────────────────────────────────────────────────┘ - -13 rows in set. Elapsed: 0.004 sec. -``` - -さらに、スパースインデックスが、私たちの例のクエリに対する一致が不可能なすべての行ブロックをどのようにプルーニングするかを可視化します: - - - -:::note -テーブル内のすべてのカラムは、指定されたordering keyの値に基づいてソートされます。キーそのものに含まれているかどうかに関係なく。たとえば、`CreationDate`をキーとして使用すると、他のすべてのカラムの値の順序は`CreationDate`カラムの値の順序に対応します。複数のordering keyを指定できます - これは`SELECT`クエリの`ORDER BY`句と同様の意味でソートされます。 -::: - -主キーを選択するための完全な高度なガイドは[こちら](/guides/best-practices/sparse-primary-indexes)にあります。 - -ordering keyが圧縮を改善し、ストレージをさらに最適化する方法についての深い洞察は、[ClickHouseの圧縮](/data-compression/compression-in-clickhouse)および[カラム圧縮コーデック](/data-compression/compression-in-clickhouse#choosing-the-right-column-compression-codec)に関する公式ガイドを探求してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md.hash deleted file mode 100644 index 534d7f5ab9c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/choosing_a_primary_key.md.hash +++ /dev/null @@ -1 +0,0 @@ -1d4328817d00d6f9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md deleted file mode 100644 index 365a5183693..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -slug: '/best-practices' -keywords: -- 'Cloud' -- 'Primary key' -- 'Ordering key' -- 'Materialized Views' -- 'Best Practices' -- 'Bulk Inserts' -- 'Asynchronous Inserts' -- 'Avoid Mutations' -- 'Avoid Nullable Columns' -- 'Avoid Optimize Final' -- 'Partitioning Key' -title: '概要' -hide_title: true -description: 'ClickHouseのベストプラクティスセクションのランディングページ' ---- - - - - -# Best Practices in ClickHouse {#best-practices-in-clickhouse} - -このセクションでは、ClickHouseを最大限に活用するために従うべきベストプラクティスを提供します。 - -| ページ | 説明 | -|----------------------------------------------------------------------|----------------------------------------------------------------------| -| [Choosing a Primary Key](/best-practices/choosing-a-primary-key) | ClickHouseで効果的な主キーを選択するためのガイダンス。 | -| [Select Data Types](/best-practices/select-data-types) | 適切なデータ型を選択するための推奨事項。 | -| [Use Materialized Views](/best-practices/use-materialized-views) | マテリアライズドビューを利用するタイミングと方法。 | -| [Minimize and Optimize JOINs](/best-practices/minimize-optimize-joins)| JOIN操作を最小限に抑え、最適化するためのベストプラクティス。 | -| [Choosing a Partitioning Key](/best-practices/choosing-a-partitioning-key) | パーティショニングキーを効果的に選択・適用する方法。 | -| [Selecting an Insert Strategy](/best-practices/selecting-an-insert-strategy) | ClickHouseにおける効率的なデータ挿入戦略。 | -| [Data Skipping Indices](/best-practices/use-data-skipping-indices-where-appropriate) | パフォーマンス向上のためにデータスキッピングインデックスを適用するタイミング。 | -| [Avoid Mutations](/best-practices/avoid-mutations) | ミューテーションを避ける理由と、それなしで設計する方法。 | -| [Avoid OPTIMIZE FINAL](/best-practices/avoid-optimize-final) | `OPTIMIZE FINAL`がコスト高になる理由とその回避方法。 | -| [Use JSON where appropriate](/best-practices/use-json-where-appropriate) | ClickHouseにおけるJSONカラム使用の考慮事項。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md.hash deleted file mode 100644 index 3939d9654b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -bd85620bf203cca9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md deleted file mode 100644 index 6e59f1ad5b7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md +++ /dev/null @@ -1,316 +0,0 @@ ---- -slug: '/best-practices/use-json-where-appropriate' -sidebar_position: 10 -sidebar_label: 'JSON の使用' -title: '適切な場面で JSON を使用する' -description: 'JSON の使用タイミングについて説明したページ' ---- - - - -ClickHouseは、半構造化データおよび動的データ用に設計されたネイティブJSONカラム型を提供しています。重要なことは、**これはデータ形式ではなく、カラム型であることを明確にすること**です。 JSONを文字列としてClickHouseに挿入したり、[JSONEachRow](/interfaces/formats/JSONEachRow)などのサポートされている形式を使用することができますが、JSONカラム型を使用することを意味するわけではありません。ユーザーは、自分のデータの構造が動的である場合にのみJSON型を使用すべきです。単にJSONを保存している場合には使用すべきではありません。 - -## JSON型を使用するタイミング {#when-to-use-the-json-type} - -データに次のような特徴がある場合、JSON型を使用してください: - -* **予測できないキー**があり、時間とともに変化する可能性がある。 -* **様々なタイプの値**を含む(例えば、パスには時々文字列が含まれ、時々数値が含まれる場合がある)。 -* 厳格な型付けが実現できない場合でも、スキーマの柔軟性が必要。 - -データの構造が既知で一貫している場合、JSON型の必要性はほとんどありません。たとえデータがJSON形式であっても、特に次のような場合は: - -* **既知のキーを持つ平坦な構造**:標準のカラム型(例:String)を使用してください。 -* **予測可能なネスト**:これらの構造にはTuple、Array、またはNested型を使用してください。 -* **様々なタイプを持つ予測可能な構造**:代わりにDynamicまたはVariant型を検討してください。 - -アプローチを組み合わせることも可能です。例えば、予測可能なトップレベルフィールドに静的カラムを使用し、ペイロードの動的セクションに対して単一のJSONカラムを使用することができます。 - -## JSONを使用するための考慮事項とヒント {#considerations-and-tips-for-using-json} - -JSON型は、パスをサブカラムにフラット化することによって効率的な列指向ストレージを実現します。しかし、柔軟性には責任が伴います。効果的に使用するためには: - -* **カラム定義においてパスタイプを指定**し、既知のサブカラムの型を指定して不必要な型推論を回避します。 [hints in the column definition](/sql-reference/data-types/newjson)を使用してください。 -* **必要ない場合はパスをスキップ**し、[SKIPやSKIP REGEXP](/sql-reference/data-types/newjson)を使用してストレージを削減し、パフォーマンスを向上させます。 -* あまりにも高く[`max_dynamic_paths`](/sql-reference/data-types/newjson#reaching-the-limit-of-dynamic-paths-inside-json)を設定しないようにしてください。大きな値はリソース消費を増加させ、効率を低下させます。目安としては10,000未満にしてください。 - -:::note 型ヒント -型ヒントは、不必要な型推論を回避する方法を提供するだけでなく、ストレージと処理の間接指向を完全に排除します。型ヒントのあるJSONパスは、従来のカラムと同様に常にストレージされ、[**識別子カラム**](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse#storage-extension-for-dynamically-changing-data)やクエリ時の動的解決の必要がありません。したがって、明確に定義された型ヒントを使用することで、ネストされたJSONフィールドは、最初からトップレベルフィールドとしてモデル化されていたかのように同じパフォーマンスと効率を実現します。その結果、ほとんど一貫しているがJSONの柔軟性の恩恵を受けるデータセットに対して、型ヒントはスキーマやインジェストパイプラインを再構築することなくパフォーマンスを維持する便利な方法を提供します。 -::: - -## 高度な機能 {#advanced-features} - -* JSONカラムは、他のカラムと同様に**主キーに使用できます**。サブカラムのためのコーデックは指定できません。 -* [`JSONAllPathsWithTypes()`や`JSONDynamicPaths()`](/sql-reference/data-types/newjson#introspection-functions)などの関数を介してイントロスペクションをサポートしています。 -* `.^`構文を使用してネストされたサブオブジェクトを読むことができます。 -* クエリ構文は標準SQLと異なる場合があり、ネストされたフィールドのために特別なキャスティングや演算子が必要になることがあります。 - -追加のガイダンスについては、[ClickHouse JSONドキュメント](/sql-reference/data-types/newjson)を参照するか、ブログ投稿[ClickHouseのための新しい強力なJSONデータ型](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse)を探ってください。 - -## 例 {#examples} - -次のJSONサンプルを考えてみましょう。これは[Python PyPIデータセット](https://clickpy.clickhouse.com/)からの行を表しています。 - -```json -{ - "date": "2022-11-15", - "country_code": "ES", - "project": "clickhouse-connect", - "type": "bdist_wheel", - "installer": "pip", - "python_minor": "3.9", - "system": "Linux", - "version": "0.3.0" -} -``` - -このスキーマが静的であり、型が明確に定義できると仮定しましょう。データがNDJSON形式(各行がJSON)であっても、そのようなスキーマに対してJSON型を使用する必要はありません。単に従来の型を使用してスキーマを定義します。 - -```sql -CREATE TABLE pypi ( - `date` Date, - `country_code` String, - `project` String, - `type` String, - `installer` String, - `python_minor` String, - `system` String, - `version` String -) -ENGINE = MergeTree -ORDER BY (project, date) -``` - -そして、JSON行を挿入します。 - -```sql -INSERT INTO pypi FORMAT JSONEachRow -{"date":"2022-11-15","country_code":"ES","project":"clickhouse-connect","type":"bdist_wheel","installer":"pip","python_minor":"3.9","system":"Linux","version":"0.3.0"} -``` - -[arXivデータセット](https://www.kaggle.com/datasets/Cornell-University/arxiv?resource=download)には250万件の学術論文が含まれています。このデータセット内の各行は、公開された学術論文を表しています。以下に例行を示します。 - -```json -{ - "id": "2101.11408", - "submitter": "Daniel Lemire", - "authors": "Daniel Lemire", - "title": "Number Parsing at a Gigabyte per Second", - "comments": "Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/", - "journal-ref": "Software: Practice and Experience 51 (8), 2021", - "doi": "10.1002/spe.2984", - "report-no": null, - "categories": "cs.DS cs.MS", - "license": "http://creativecommons.org/licenses/by/4.0/", - "abstract": "With disks and networks providing gigabytes per second ....\n", - "versions": [ - { - "created": "Mon, 11 Jan 2021 20:31:27 GMT", - "version": "v1" - }, - { - "created": "Sat, 30 Jan 2021 23:57:29 GMT", - "version": "v2" - } - ], - "update_date": "2022-11-07", - "authors_parsed": [ - [ - "Lemire", - "Daniel", - "" - ] - ] -} -``` - -このJSONは複雑でネストされた構造を持っていますが、予測可能です。フィールドの数とタイプは変わりません。この例にはJSON型を使用することもできますが、[Tuples](/sql-reference/data-types/tuple)および[Nested](/sql-reference/data-types/nested-data-structures/nested)型を使用して構造を明示的に定義することもできます。 - -```sql -CREATE TABLE arxiv -( - `id` String, - `submitter` String, - `authors` String, - `title` String, - `comments` String, - `journal-ref` String, - `doi` String, - `report-no` String, - `categories` String, - `license` String, - `abstract` String, - `versions` Array(Tuple(created String, version String)), - `update_date` Date, - `authors_parsed` Array(Array(String)) -) -ENGINE = MergeTree -ORDER BY update_date -``` - -再度、データをJSONとして挿入できます。 - -```sql -INSERT INTO arxiv FORMAT JSONEachRow -{"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"Number Parsing at a Gigabyte per Second","comments":"Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"With disks and networks providing gigabytes per second ....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]]} -``` - -例えば、`tags`という別のカラムが追加されたとします。これは単なる文字列のリストであれば`Array(String)`としてモデル化できますが、ユーザーが混合タイプの任意のタグ構造を追加できると仮定します(スコアが文字列または整数であることに注意してください)。修正したJSONドキュメント: - -```sql -{ - "id": "2101.11408", - "submitter": "Daniel Lemire", - "authors": "Daniel Lemire", - "title": "Number Parsing at a Gigabyte per Second", - "comments": "Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/", - "journal-ref": "Software: Practice and Experience 51 (8), 2021", - "doi": "10.1002/spe.2984", - "report-no": null, - "categories": "cs.DS cs.MS", - "license": "http://creativecommons.org/licenses/by/4.0/", - "abstract": "With disks and networks providing gigabytes per second ....\n", - "versions": [ - { - "created": "Mon, 11 Jan 2021 20:31:27 GMT", - "version": "v1" - }, - { - "created": "Sat, 30 Jan 2021 23:57:29 GMT", - "version": "v2" - } - ], - "update_date": "2022-11-07", - "authors_parsed": [ - [ - "Lemire", - "Daniel", - "" - ] - ], - "tags": { - "tag_1": { - "name": "ClickHouse user", - "score": "A+", - "comment": "A good read, applicable to ClickHouse" - }, - "28_03_2025": { - "name": "professor X", - "score": 10, - "comment": "Didn't learn much", - "updates": [ - { - "name": "professor X", - "comment": "Wolverine found more interesting" - } - ] - } - } -} -``` - -この場合、arXivのドキュメントをすべてJSONとしてモデル化するか、単にJSONの`tags`カラムを追加することができます。以下に両方の例を提供します。 - -```sql -CREATE TABLE arxiv -( - `doc` JSON(update_date Date) -) -ENGINE = MergeTree -ORDER BY doc.update_date -``` - -:::note -JSON定義内で`update_date`カラムの型ヒントを提供します。これはオーダリング/主キーで使用するためです。これにより、ClickHouseはこのカラムがnullではないことを把握し、どの`update_date`サブカラムを使用すべきかを把握します(各タイプごとに複数が存在する場合があるため、そうでなければあいまいになります)。 -::: - -このテーブルに挿入し、次に[`JSONAllPathsWithTypes`](/sql-reference/functions/json-functions#jsonallpathswithtypes)関数と[`PrettyJSONEachRow`](/interfaces/formats/PrettyJSONEachRow)出力形式を使用して推論されたスキーマを確認できます。 - -```sql -INSERT INTO arxiv FORMAT JSONAsObject -{"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"Number Parsing at a Gigabyte per Second","comments":"Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"With disks and networks providing gigabytes per second ....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]],"tags":{"tag_1":{"name":"ClickHouse user","score":"A+","comment":"A good read, applicable to ClickHouse"},"28_03_2025":{"name":"professor X","score":10,"comment":"Didn't learn much","updates":[{"name":"professor X","comment":"Wolverine found more interesting"}]}}} -``` - -```sql -SELECT JSONAllPathsWithTypes(doc) -FROM arxiv -FORMAT PrettyJSONEachRow - -{ - "JSONAllPathsWithTypes(doc)": { - "abstract": "String", - "authors": "String", - "authors_parsed": "Array(Array(Nullable(String)))", - "categories": "String", - "comments": "String", - "doi": "String", - "id": "String", - "journal-ref": "String", - "license": "String", - "submitter": "String", - "tags.28_03_2025.comment": "String", - "tags.28_03_2025.name": "String", - "tags.28_03_2025.score": "Int64", - "tags.28_03_2025.updates": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "tags.tag_1.comment": "String", - "tags.tag_1.name": "String", - "tags.tag_1.score": "String", - "title": "String", - "update_date": "Date", - "versions": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))" - } -} - -1行の結果。経過時間:0.003秒。 -``` - -あるいは、先ほどのスキーマを使用し、JSON `tags`カラムを持つモデル化を行うこともできます。これは一般的に好まれ、ClickHouseによる推論を最小限に抑えます: - -```sql -CREATE TABLE arxiv -( - `id` String, - `submitter` String, - `authors` String, - `title` String, - `comments` String, - `journal-ref` String, - `doi` String, - `report-no` String, - `categories` String, - `license` String, - `abstract` String, - `versions` Array(Tuple(created String, version String)), - `update_date` Date, - `authors_parsed` Array(Array(String)), - `tags` JSON() -) -ENGINE = MergeTree -ORDER BY update_date -``` - -```sql -INSERT INTO arxiv FORMAT JSONEachRow -{"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"Number Parsing at a Gigabyte per Second","comments":"Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"With disks and networks providing gigabytes per second ....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]],"tags":{"tag_1":{"name":"ClickHouse user","score":"A+","comment":"A good read, applicable to ClickHouse"},"28_03_2025":{"name":"professor X","score":10,"comment":"Didn't learn much","updates":[{"name":"professor X","comment":"Wolverine found more interesting"}]}}} -``` - -`tags`のサブカラムの型を推論することができます。 - -```sql -SELECT JSONAllPathsWithTypes(tags) -FROM arxiv -FORMAT PrettyJSONEachRow - -{ - "JSONAllPathsWithTypes(tags)": { - "28_03_2025.comment": "String", - "28_03_2025.name": "String", - "28_03_2025.score": "Int64", - "28_03_2025.updates": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "tag_1.comment": "String", - "tag_1.name": "String", - "tag_1.score": "String" - } -} - -1行の結果。経過時間:0.002秒。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md.hash deleted file mode 100644 index d3b3f7a821d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb0ab1729b167fe6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md deleted file mode 100644 index 011ef5c55c8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -slug: '/best-practices/minimize-optimize-joins' -sidebar_position: 10 -sidebar_label: 'JOINの最小化と最適化' -title: 'JOINの最小化と最適化' -description: 'JOINに関するベストプラクティスを説明するページ' ---- - -import Image from '@theme/IdealImage'; -import joins from '@site/static/images/bestpractices/joins-speed-memory.png'; - -ClickHouseは、さまざまなJOINタイプとアルゴリズムをサポートしており、最近のリリースではJOINのパフォーマンスが大幅に向上しています。ただし、JOINは本質的に単一の非正規化テーブルからのクエリよりもコストが高くなります。非正規化は、クエリ時間から挿入または前処理時間への計算作業をシフトさせるため、実行時のレイテンシが大幅に低下することが多いです。リアルタイムまたはレイテンシ感受性の高い分析クエリの場合は、**非正規化が強く推奨されます**。 - -一般的に、以下のような場合に非正規化を行うべきです: - -- テーブルが頻繁に変更されない場合、またはバッチリフレッシュが許容される場合。 -- 関係が多対多ではないか、基数が過度に高くない場合。 -- クエリされるカラムの限定されたサブセットのみが必要な場合、つまり特定のカラムを非正規化から除外できる場合。 -- Flinkのような上流システムに処理をシフトできる能力がある場合、リアルタイムでの強化やフラット化が管理できます。 - -すべてのデータを非正規化する必要はありません - よくクエリされる属性に焦点を当ててください。また、[マテリアライズドビュー](/best-practices/use-materialized-views)を検討して、サブテーブル全体を複製するのではなく、逐次的に集計を計算することをお勧めします。スキーマの更新がまれであり、レイテンシが重要な場合、非正規化は最良のパフォーマンストレードオフを提供します。 - -ClickHouseでのデータの非正規化に関する完全なガイドは[こちら](/data-modeling/denormalization)を参照してください。 - -## JOINが必要な場合 {#when-joins-are-required} - -JOINが必要な場合は、**少なくともバージョン24.12、できれば最新バージョンを使用してください**。JOINのパフォーマンスは、新しいリリースごとに改善され続けています。ClickHouse 24.12以降、クエリプランナーは最適なパフォーマンスのために自動的に小さなテーブルをJOINの右側に配置します。このタスクは以前は手動で行う必要がありました。さらに、より侵攻的なフィルタープッシュダウンや複数のJOINの自動再配置が近日中に登場する予定です。 - -JOINのパフォーマンスを向上させるためのベストプラクティスを次の通りに実践してください: - -* **直交積を避ける**: 左側の値が右側の複数の値と一致する場合、JOINは複数の行を返します - いわゆる直交積です。右側のすべての一致が必要でなく、単一の一致だけが必要な場合は、`ANY` JOIN(例:`LEFT ANY JOIN`)を使用できます。これらは通常のJOINよりも速く、メモリを少なく使用します。 -* **JOINされるテーブルのサイズを削減する**: JOINのランタイムとメモリ消費は、左側と右側のテーブルのサイズに比例して増加します。JOINによって処理されるデータ量を減らすために、`WHERE`または`JOIN ON`句に追加のフィルタ条件を追加してください。ClickHouseはフィルタ条件をクエリプランのできるだけ深い位置にプッシュダウンします。フィルタが自動的にプッシュダウンされない場合(何らかの理由で)、JOINの一方をサブクエリとして再記述して強制的にプッシュダウンさせます。 -* **適切な場合は辞書経由の直接JOINを使用する**: ClickHouseの標準JOINは、2つのフェーズで実行されます。右側を反復してハッシュテーブルを構築するビルドフェーズの後、左側を反復してハッシュテーブルルックアップを通じて一致するJOINパートナーを見つけるプローブフェーズです。右側が[辞書](/dictionary)またはキーと値の特性を持つ別のテーブルエンジン(例:[EmbeddedRocksDB](/engines/table-engines/integrations/embedded-rocksdb)や[Joinテーブルエンジン](/engines/table-engines/special/join))である場合、ClickHouseは「直接」JOINアルゴリズムを使用でき、ハッシュテーブルを構築する必要がなくなり、クエリ処理を高速化します。これは`INNER`または`LEFT OUTER` JOINに対して機能し、リアルタイムの分析ワークロードに最適です。 -* **JOINのためにテーブルのソートを活用する**: ClickHouseの各テーブルは、テーブルの主キーのカラムによってソートされています。`full_sorting_merge`や`partial_merge`のようなソートマージJOINアルゴリズムを使用してテーブルのソートを利用できます。ハッシュテーブルに基づく標準のJOINアルゴリズム(以下の`parallel_hash`、`hash`、`grace_hash`を参照)とは異なり、ソートマージJOINアルゴリズムはまずソートを行い、次に両方のテーブルをマージします。クエリがそれぞれの主キーのカラムで両方のテーブルをJOINする場合、ソートステップが省略される最適化があります。 -* **ディスクスピルJOINを避ける**: JOINの中間状態(例:ハッシュテーブル)は、大きくなりすぎて主メモリに収まらなくなることがあります。この場合、ClickHouseはデフォルトでアウトオブメモリーエラーを返します。一部のJOINアルゴリズム(下記参照)、例えば[`grace_hash`](https://clickhouse.com/blog/clickhouse-fully-supports-joins-hash-joins-part2)、[`partial_merge`](https://clickhouse.com/blog/clickhouse-fully-supports-joins-full-sort-partial-merge-part3)、[`full_sorting_merge`](https://clickhouse.com/blog/clickhouse-fully-supports-joins-full-sort-partial-merge-part3)などは、中間状態をディスクにスピルしてクエリの実行を続けることができます。ただし、ディスクアクセスがJOIN処理を大幅に遅くする可能性があるため、これらのJOINアルゴリズムは慎重に使用すべきです。代わりに中間状態のサイズを減らすために他の方法でJOINクエリを最適化することをお勧めします。 -* **外部JOINにおけるデフォルト値を不一致マーカーとして使用する**: 左/右/完全外部JOINは、左/右/両方のテーブルからすべての値を含みます。他のテーブルで特定の値に対するJOINパートナーが見つからない場合、ClickHouseはJOINパートナーを特別なマーカーで置き換えます。SQL標準では、データベースがNULLをそのようなマーカーとして使用することが義務付けられています。ClickHouseでは、結果カラムをNullableでラップする必要があり、追加のメモリとパフォーマンスオーバーヘッドが発生します。代替案として、`join_use_nulls = 0`の設定を構成し、結果カラムのデータ型のデフォルト値をマーカーとして使用できます。 - -:::note 辞書の使用に注意 -ClickHouseでJOINに辞書を使用する際は、辞書が設計上、重複キーを許可しないことを理解することが重要です。データの読み込み中、重複キーは静かに重複削除され、特定のキーに対して最後に読み込まれた値のみが保持されます。この動作により、辞書は一対一または多対一の関係に理想的であり、最新または公的な値のみが必要です。しかし、一対多または多対多の関係(例:役者に役割を結合する場合、役者が複数の役割を持つ可能性がある)で辞書を使用すると、すべての一致する行のうち1つを除いて静かにデータが失われます。そのため、辞書は複数の一致を通じて完全な関係の忠実度を要求されるシナリオには適していません。 -::: - -## 適切なJOINアルゴリズムの選択 {#choosing-the-right-join-algorithm} - -ClickHouseは、スピードとメモリのトレードオフを行ういくつかのJOINアルゴリズムをサポートしています: - -* **パラレルハッシュJOIN(デフォルト)**: メモリに収まる小中規模の右側テーブルに対して高速です。 -* **直接JOIN**: 辞書(またはキーと値の特性を持つ他のテーブルエンジン)を使用する場合に理想的で、`INNER`または`LEFT ANY JOIN`のための最速の方法であり、ハッシュテーブルを構築する必要がありません。 -* **フルソートマージJOIN**: 両方のテーブルがJOINキーでソートされている場合に効率的です。 -* **パーシャルマージJOIN**: メモリを最小限に抑えますが、遅くなります - 大きなテーブルを限られたメモリで結合するのに最適です。 -* **グレースハッシュJOIN**: 柔軟でメモリチューン可能で、大規模データセットにおけるパフォーマンス特性の調整に適しています。 - - - -:::note -各アルゴリズムには、JOINタイプに対する異なるサポートがあります。各アルゴリズムのサポートされているJOINタイプの完全なリストは[こちら](/guides/joining-tables#choosing-a-join-algorithm)で確認できます。 -::: - -ClickHouseに最適なアルゴリズムを選ばせるには、`join_algorithm = 'auto'`(デフォルト)の設定を使用するか、ワークロードに応じて明示的に制御します。パフォーマンスまたはメモリオーバーヘッドを最適化するためにJOINアルゴリズムを選択する必要がある場合は、[こちらのガイド](/guides/joining-tables#choosing-a-join-algorithm)をお勧めします。 - -最適なパフォーマンスを得るためには: - -* 高パフォーマンスのワークロードではJOINを最小限に抑えます。 -* クエリごとに3~4つ以上のJOINを避けます。 -* 実データで異なるアルゴリズムをベンチマークします - パフォーマンスはJOINキーの分布とデータサイズに基づいて変動します。 - -JOIN最適化戦略、JOINアルゴリズム、およびそのチューニング方法については、[ClickHouseのドキュメント](/guides/joining-tables)およびこの[ブログシリーズ](https://clickhouse.com/blog/clickhouse-fully-supports-joins-part1)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md.hash deleted file mode 100644 index 645d4e888c7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/minimize_optimize_joins.md.hash +++ /dev/null @@ -1 +0,0 @@ -29da1e2d9dc33211 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx deleted file mode 100644 index 1a6397c66c1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -'slug': '/best-practices/choosing-a-partitioning-key' -'sidebar_position': 10 -'sidebar_label': 'パーティションキーの選択' -'title': 'パーティションキーの選択' -'description': 'パーティションキーの選択方法について説明したページ' ---- - -import Image from '@theme/IdealImage'; -import partitions from '@site/static/images/bestpractices/partitions.png'; -import merges_with_partitions from '@site/static/images/bestpractices/merges_with_partitions.png'; - -:::note A data management technique -パーティショニングは主にデータ管理技術であり、クエリ最適化ツールではありません。特定のワークロードでパフォーマンスを向上させることができる一方で、クエリを加速させるための最初の手段として使用すべきではありません。パーティショニングキーは慎重に選択し、その影響を明確に理解した上で、データライフサイクルのニーズや十分に理解されたアクセスパターンと一致する場合にのみ適用されるべきです。 -::: - -ClickHouseでは、パーティショニングは指定されたキーに基づいてデータを論理的なセグメントに整理します。これはテーブル作成時の `PARTITION BY` 句を使用して定義され、通常、時間間隔、カテゴリ、または他のビジネス関連次元によって行をグループ化するために使用されます。パーティショニング式の各ユニークな値は、ディスク上の独自の物理パーティションを形成し、ClickHouseはこれらの値ごとにデータを別々のパーツに保存します。パーティショニングはデータ管理を改善し、保持ポリシーを簡素化し、特定のクエリパターンに役立つことがあります。 - -例えば、次のパーティショニングキーとして `toStartOfMonth(date)` を持つUKの支払価格データセットテーブルを考えてみましょう。 - -```sql -CREATE TABLE uk.uk_price_paid_simple_partitioned -( - date Date, - town LowCardinality(String), - street LowCardinality(String), - price UInt32 -) -ENGINE = MergeTree -ORDER BY (town, street) -PARTITION BY toStartOfMonth(date) -``` - -テーブルに一連の行が挿入されるたびに、すべての挿入された行を含む1つのデータパートを作成する代わりに([ここ](/parts)で説明されているように)、ClickHouseは挿入された行のユニークなパーティションキー値ごとに新しいデータパートを1つ作成します。 - - - -ClickHouseサーバは、上記の挿入の4行から構成される例の行を、そのパーティションキー値 `toStartOfMonth(date)` によって最初に分割します。その後、特定された各パーティションについて、行は[通常通り](/parts)にいくつかの順次プロセス(① ソート、② カラムへの分割、③ 圧縮、④ ディスクへの書き込み)を実行して処理されます。 - -パーティショニングについての詳細な説明については、[このガイド](/partitions)をお勧めします。 - -パーティショニングが有効な場合、ClickHouseはパーティション内のデータパーツのみを[マージ](/merges)しますが、パーティション間ではマージしません。これを上記の例のテーブルに適用すると、次のようになります。 - - - -## パーティショニングの適用 {#applications-of-partitioning} - -パーティショニングは、特に監視および分析のユースケースにおいて、ClickHouseの大規模データセットを管理するための強力なツールです。これは、時間やビジネスロジックに沿った全体のパーティションを1回のメタデータ操作で削除、移動、またはアーカイブできることで、効率的なデータライフサイクル操作を実現します。これは、行レベルの削除やコピー操作よりも大幅に速く、リソースを消費しません。パーティショニングは、TTLや層状ストレージなどのClickHouseの機能とクリーンに統合され、カスタムオーケストレーションなしで保持ポリシーやホット/コールドストレージ戦略を実装することができます。たとえば、最近のデータは速いSSDストレージに保持され、古いパーティションは自動的に安価なオブジェクトストレージに移動されます。 - -パーティショニングは、特定のワークロードでクエリパフォーマンスを向上させることがありますが、応答時間に悪影響を及ぼすこともあります。 - -パーティショニングキーが主キーに含まれておらず、そのキーで絞り込みを行う場合、ユーザーはパーティショニングによってクエリパフォーマンスが向上するのを感じることがあるかもしれません。例については、[こちら](/partitions#query-optimization)を参照してください。 - -逆に、クエリがパーティションをまたいで行なわれる必要がある場合、合計パーツ数の増加によりパフォーマンスが悪化することがあります。このため、ユーザーはパーティショニングをクエリ最適化技術として検討する前に、自身のアクセスパターンを理解しておくべきです。 - -要約すると、ユーザーは主にパーティショニングをデータ管理技術として考えるべきです。データ管理の例については、監視ユースケースガイドの["データの管理"](/observability/managing-data)およびコアコンセプト - テーブルパーティションの["テーブルパーティションは何に使用されるか?"](/partitions#data-management)を参照してください。 - -## 低カーディナリティのパーティショニングキーを選択する {#choose-a-low-cardinality-partitioning-key} - -重要なのは、パーツの数が多いとクエリパフォーマンスに悪影響を及ぼすことです。したがって、ClickHouseは、[総数](/operations/settings/merge-tree-settings#max_parts_in_total)または[パーティションごとの数](/operations/settings/merge-tree-settings#parts_to_throw_insert)が指定された制限を超えると、[「パーツが多すぎる」](/knowledgebase/exception-too-many-parts)エラーで応答します。 - -パーティショニングキーに適切な**カーディナリティ**を選択することは重要です。特異なパーティション値の数が多い高カーディナリティのパーティショニングキーは、データパーツの proliferate を引き起こす可能性があります。ClickHouse はパーティション間でパーツをマージしないため、パーティションが多すぎると、マージされていないパーツが多すぎる結果となり、「パーツが多すぎる」エラーが発生します。[マージは重要です](/merges) ストレージの断片化を減少させ、クエリ速度を最適化するために必要ですが、高カーディナリティのパーティションでは、マージの可能性が失われます。 - -対照的に、**低カーディナリティのパーティショニングキー**(100〜1,000の異なる値未満)は通常最適です。これにより、効率的なパートのマージが可能になり、メタデータのオーバーヘッドを低く保ち、ストレージ内での過剰なオブジェクト作成を回避できます。さらに、ClickHouseはパーティションカラムに自動的にMinMaxインデックスを構築するため、これらのカラムでフィルタリングするクエリの速度が大幅に向上します。たとえば、テーブルが `toStartOfMonth(date)` によってパーティショニングされている場合、月でフィルタリングすることで、エンジンは無関係なパーティションとそのパーツを完全にスキップすることができます。 - -パーティショニングは、いくつかのクエリパターンでパフォーマンスを改善することができますが、主にデータ管理機能です。多くの場合、すべてのパーティションをまたぐクエリは、データの断片化が増し、スキャンされるパーツが増えるため、非パーティショニングテーブルを使用するよりも遅くなる場合があります。パーティショニングは賢く使い、選択したキーが低カーディナリティであり、データライフサイクルポリシー(例えば、TTLによる保持)と整合していることを常に確認してください。パーティショニングが必要かどうかわからない場合は、まずはそれなしで始め、観察されたアクセスパターンに基づいて後で最適化を行うことをお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx.hash deleted file mode 100644 index 74d12dceffe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -38d17b2c7d2bcbd5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md deleted file mode 100644 index de4b8f9bf1f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -slug: '/best-practices/select-data-types' -sidebar_position: 10 -sidebar_label: 'データ型を選択' -title: 'データ型を選択' -description: 'ClickHouse でデータ型を選択する方法を説明したページ' ---- - -import NullableColumns from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md'; - -One of the core reasons for ClickHouse's query performance is its efficient data compression. Less data on disk results in faster queries and inserts by minimizing I/O overhead. ClickHouse's column-oriented architecture naturally arranges similar data adjacently, enabling compression algorithms and codecs to reduce data size dramatically. To maximize these compression benefits, it's essential to carefully choose appropriate data types. - -Compression efficiency in ClickHouse depends mainly on three factors: the ordering key, data types, and codecs, all defined through the table schema. Choosing optimal data types yields immediate improvements in both storage and query performance. - -Some straightforward guidelines can significantly enhance the schema: - -* **厳密な型を使用する:** 常にカラムに正しいデータ型を選択してください。数値および日付フィールドには、一般的な文字列型ではなく、適切な数値および日付型を使用する必要があります。これにより、フィルタリングや集計に対する正しい意味が確保されます。 - -* **Nullableカラムを避ける:** Nullableカラムは、null値を追跡するための別のカラムを維持することによる追加のオーバーヘッドを引き起こします。空とnullの状態を区別するために明示的に必要ない限り、Nullableを使用しないでください。それ以外の場合、デフォルト値やゼロ相当の値で通常は十分です。この型を必要に応じて避けるべき理由については、[Nullableカラムを避ける](/best-practices/select-data-types#avoid-nullable-columns)を参照してください。 - -* **数値精度を最小限に抑える:** 予想されるデータ範囲をまだ満たす最小のビット幅を持つ数値型を選択してください。たとえば、負の値が必要ない場合、[Int32の代わりにUInt16を選択する](/sql-reference/data-types/int-uint)ことをお勧めしますし、範囲が0〜65535に収まる場合に推奨されます。 - -* **日付および時間精度を最適化する:** クエリの要件を満たす最も粗い日付または日時型を選択してください。日付のみのフィールドにはDateまたはDate32を使用し、ミリ秒やそれ以上の精度が重要でない限り、DateTimeの代わりにDateTime64を使用してください。 - -* **LowCardinalityおよび特殊型を活用する:** 約10,000未満のユニーク値のカラムには、辞書エンコーディングを用いてストレージを大幅に削減するためにLowCardinality型を使用してください。同様に、カラム値が厳密に固定長の文字列である場合のみFixedStringを使用し、有限の値のセットを持つカラムにはEnum型を好んで使用して、効率的なストレージと組み込みのデータ検証を可能にします。 - -* **データ検証用のEnums:** Enum型は、列挙型を効率的にエンコードするために使用できます。Enumsは、保存する必要のあるユニーク値の数に応じて8ビットまたは16ビットとなります。挿入時の関連する検証が必要な場合(未宣言の値は拒否されます)や、Enum値の自然な順序を利用したクエリを実行したい場合には、これを使用することを検討してください。例として、ユーザーの反応を含むフィードバックカラムEnum(':(' = 1, ':|' = 2, ':)' = 3)を想像してください。 - -## 例 {#example} - -ClickHouseは、型の最適化を簡素化するための組み込みツールを提供しています。たとえば、スキーマ推論は最初の型を自動的に特定できます。Parquet形式で公開されているStack Overflowデータセットを考慮してください。[`DESCRIBE`](/sql-reference/statements/describe-table)コマンドを使用して簡単なスキーマ推論を実行すると、初期の最適化されていないスキーマが提供されます。 - -:::note -デフォルトでは、ClickHouseはこれを同等のNullable型にマッピングします。これは、スキーマが行のサンプルに基づいているため、推奨されます。 -::: - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') -SETTINGS describe_compact_output = 1 - -┌─name───────────────────────┬─type──────────────────────────────┐ -│ Id │ Nullable(Int64) │ -│ PostTypeId │ Nullable(Int64) │ -│ AcceptedAnswerId │ Nullable(Int64) │ -│ CreationDate │ Nullable(DateTime64(3, 'UTC')) │ -│ Score │ Nullable(Int64) │ -│ ViewCount │ Nullable(Int64) │ -│ Body │ Nullable(String) │ -│ OwnerUserId │ Nullable(Int64) │ -│ OwnerDisplayName │ Nullable(String) │ -│ LastEditorUserId │ Nullable(Int64) │ -│ LastEditorDisplayName │ Nullable(String) │ -│ LastEditDate │ Nullable(DateTime64(3, 'UTC')) │ -│ LastActivityDate │ Nullable(DateTime64(3, 'UTC')) │ -│ Title │ Nullable(String) │ -│ Tags │ Nullable(String) │ -│ AnswerCount │ Nullable(Int64) │ -│ CommentCount │ Nullable(Int64) │ -│ FavoriteCount │ Nullable(Int64) │ -│ ContentLicense │ Nullable(String) │ -│ ParentId │ Nullable(String) │ -│ CommunityOwnedDate │ Nullable(DateTime64(3, 'UTC')) │ -│ ClosedDate │ Nullable(DateTime64(3, 'UTC')) │ -└────────────────────────────┴───────────────────────────────────┘ - -22 rows in set. Elapsed: 0.130 sec. -``` - -:::note -以下に、stackoverflow/parquet/postsフォルダー内のすべてのファイルを読み込むためにグロブパターン*.parquetを使用しています。 -::: - -初期のシンプルなルールをpostsテーブルに適用することで、各カラムに最適な型を特定できます: - -| Column | Is Numeric | Min, Max | Unique Values | Nulls | Comment | Optimized Type | -|------------------------|------------|------------------------------------------------------------------------|----------------|--------|----------------------------------------------------------------------------------------------|------------------------------------------| -| `PostTypeId` | Yes | 1, 8 | 8 | No | | `Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8)` | -| `AcceptedAnswerId` | Yes | 0, 78285170 | 12282094 | Yes | Nullを0の値と区別する | UInt32 | -| `CreationDate` | No | 2008-07-31 21:42:52.667000000, 2024-03-31 23:59:17.697000000 | - | No | ミリ秒単位の精度は不要、DateTimeを使用 | DateTime | -| `Score` | Yes | -217, 34970 | 3236 | No | | Int32 | -| `ViewCount` | Yes | 2, 13962748 | 170867 | No | | UInt32 | -| `Body` | No | - | - | No | | String | -| `OwnerUserId` | Yes | -1, 4056915 | 6256237 | Yes | | Int32 | -| `OwnerDisplayName` | No | - | 181251 | Yes | Nullは空文字列と見なす | String | -| `LastEditorUserId` | Yes | -1, 9999993 | 1104694 | Yes | 0は使われていない値でNullに使用可能 | Int32 | -| `LastEditorDisplayName` | No | - | 70952 | Yes | Nullは空文字列として見なす。LowCardinalityを試したが利益なし | String | -| `LastEditDate` | No | 2008-08-01 13:24:35.051000000, 2024-04-06 21:01:22.697000000 | - | No | ミリ秒単位の精度は不要、DateTimeを使用 | DateTime | -| `LastActivityDate` | No | 2008-08-01 12:19:17.417000000, 2024-04-06 21:01:22.697000000 | - | No | ミリ秒単位の精度は不要、DateTimeを使用 | DateTime | -| `Title` | No | - | - | No | Nullは空文字列として見なす | String | -| `Tags` | No | - | - | No | Nullは空文字列として見なす | String | -| `AnswerCount` | Yes | 0, 518 | 216 | No | Nullと0は同一扱い | UInt16 | -| `CommentCount` | Yes | 0, 135 | 100 | No | Nullと0は同一扱い | UInt8 | -| `FavoriteCount` | Yes | 0, 225 | 6 | Yes | Nullと0は同一扱い | UInt8 | -| `ContentLicense` | No | - | 3 | No | LowCardinalityがFixedStringよりも優れています | LowCardinality(String) | -| `ParentId` | No | - | 20696028 | Yes | Nullは空文字列として見なす | String | -| `CommunityOwnedDate` | No | 2008-08-12 04:59:35.017000000, 2024-04-01 05:36:41.380000000 | - | Yes | Nullの場合はデフォルト1970-01-01を考慮。ミリ秒単位の精度は不要、DateTimeを使用 | DateTime | -| `ClosedDate` | No | 2008-09-04 20:56:44, 2024-04-06 18:49:25.393000000 | - | Yes | Nullの場合はデフォルト1970-01-01を考慮。ミリ秒単位の精度は不要、DateTimeを使用 | DateTime | - -:::note tip -カラムの型を特定するには、その数値範囲とユニーク値の数を理解することが必要です。すべてのカラムの範囲および異なる値の数を見つけるには、ユーザーはシンプルなクエリ`SELECT * APPLY min, * APPLY max, * APPLY uniq FROM table FORMAT Vertical`を使用できます。これをデータの少ないサブセットに対して実行することをお勧めします。これは高コストです。 -::: - -これにより、次のような最適化されたスキーマが得られます(型に関して): - -```sql -CREATE TABLE posts -( - Id Int32, - PostTypeId Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, - 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - AcceptedAnswerId UInt32, - CreationDate DateTime, - Score Int32, - ViewCount UInt32, - Body String, - OwnerUserId Int32, - OwnerDisplayName String, - LastEditorUserId Int32, - LastEditorDisplayName String, - LastEditDate DateTime, - LastActivityDate DateTime, - Title String, - Tags String, - AnswerCount UInt16, - CommentCount UInt8, - FavoriteCount UInt8, - ContentLicense LowCardinality(String), - ParentId String, - CommunityOwnedDate DateTime, - ClosedDate DateTime -) -ENGINE = MergeTree -ORDER BY tuple() -``` - -## Nullableカラムを避ける {#avoid-nullable-columns} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md.hash deleted file mode 100644 index e7aaabd7289..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/select_data_type.md.hash +++ /dev/null @@ -1 +0,0 @@ -4c9fc344505bb133 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md deleted file mode 100644 index b0da77d4d8f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -slug: '/best-practices/selecting-an-insert-strategy' -sidebar_position: 10 -sidebar_label: 'インサートストラテジーの選択' -title: 'インサートストラテジーの選択' -description: 'ClickHouse でインサートストラテジーを選択する方法について説明したページ' ---- - -import Image from '@theme/IdealImage'; -import insert_process from '@site/static/images/bestpractices/insert_process.png'; -import async_inserts from '@site/static/images/bestpractices/async_inserts.png'; -import AsyncInserts from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md'; -import BulkInserts from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md'; - -効率的なデータ取り込みは、高性能のClickHouse展開の基盤を形成します。適切な挿入戦略を選択することで、スループット、コスト、信頼性に大きな影響を与えることができます。このセクションでは、ワークロードに最適な決定を下すためのベストプラクティス、トレードオフ、および設定オプションについて概説します。 - -:::note -以下は、クライアントを介してClickHouseにデータをプッシュすることを想定しています。例えば、[s3](/sql-reference/table-functions/s3)や[gcs](/sql-reference/table-functions/gcs)などの組み込みテーブル関数を使用してClickHouseにデータをプルしている場合は、私たちのガイド["S3挿入および読み取りパフォーマンスの最適化"](/integrations/s3/performance)をお勧めします。 -::: - -## デフォルトで同期挿入 {#synchronous-inserts-by-default} - -デフォルトでは、ClickHouseへの挿入は同期的です。各挿入クエリは即座にディスク上にストレージパーツを作成し、メタデータやインデックスを含みます。 - -:::note クライアント側でデータをバッチ処理できる場合は、同期挿入を使用してください。そうでない場合は、以下の[非同期挿入](#asynchronous-inserts)を参照してください。 -::: - -以下にClickHouseのMergeTree挿入メカニクスを簡単に説明します。 - - - -#### クライアント側のステップ {#client-side-steps} - -最適なパフォーマンスを得るためには、データを① [バッチ処理](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse#data-needs-to-be-batched-for-optimal-performance)し、バッチサイズを**最初の決定**とします。 - -ClickHouseは挿入されたデータをディスクに、テーブルの主キー列によって[順序付けて](/guides/best-practices/sparse-primary-indexes#data-is-stored-on-disk-ordered-by-primary-key-columns)格納します。**2番目の決定**は、サーバーへの送信前にデータを②事前にソートするかどうかです。バッチが主キー列によって事前にソートされた状態で到着した場合、ClickHouseは⑨ソートステップを[スキップ](https://github.com/ClickHouse/ClickHouse/blob/94ce8e95404e991521a5608cd9d636ff7269743d/src/Storages/MergeTree/MergeTreeDataWriter.cpp#L595)でき、取り込みが迅速になります。 - -取り込むデータに事前定義された形式がない場合、**主要な決定**は形式を選択することです。ClickHouseは[70以上の形式](/interfaces/formats)でデータの挿入をサポートしています。ただし、ClickHouseのコマンドラインクライアントまたはプログラミング言語クライアントを使用する場合、この選択はしばしば自動的に処理されます。必要に応じて、この自動選択を明示的にオーバーライドすることも可能です。 - -次の**主要な決定**は、④データをClickHouseサーバーに送信する前に圧縮するかどうかです。圧縮は転送サイズを減少させ、ネットワークの効率を向上させ、特に大規模なデータセットにおいて、より迅速なデータ転送と帯域幅使用量の低下をもたらします。 - -データは⑤ClickHouseのネットワークインターフェースに転送されます—[ネイティブ](/interfaces/tcp)または[HTTP](/interfaces/http)インターフェースのいずれか(この投稿で後ほど[比較](https://clickhouse.com/blog/clickhouse-input-format-matchup-which-is-fastest-most-efficient#clickhouse-client-defaults)します)。 - -#### サーバー側のステップ {#server-side-steps} - -データを⑥受信した後、ClickHouseは圧縮が使用されている場合は⑦それを解凍し、次に元の送信形式から⑧解析します。 - -そのフォーマットデータの値とターゲットテーブルの[DDL](/sql-reference/statements/create/table)ステートメントを使用して、ClickHouseは⑨メモリ内の[ブロック](/development/architecture#block)をMergeTree形式で構築し、もしそれらが事前にソートされていない場合は⑩[主キー列で](/parts#what-are-table-parts-in-clickhouse)行をソートし、⑪[sparse primary index](/guides/best-practices/sparse-primary-indexes)を作成し、⑫[列ごとの圧縮](/parts#what-are-table-parts-in-clickhouse)を適用し、⑬データを新しい⑭[データパーツ](/parts)としてディスクに書き込みます。 - -### 同期の場合はバッチ挿入 {#batch-inserts-if-synchronous} - - - -### 冪等性のあるリトライを確保 {#ensure-idempotent-retries} - -同期挿入は**冪等性**があります。MergeTreeエンジンを使用すると、ClickHouseはデフォルトで挿入を重複排除します。これにより、ネットワーク中断によってクライアントが応答を受け取れなかったなど、不明瞭な障害ケースに対して保護されます。 - -* 挿入が成功したが、ネットワーク中断によりクライアントが確認を受け取れなかった。 -* サーバー側で挿入が失敗し、タイムアウトした。 - -どちらのケースでも、**挿入をリトライするのは安全です** - バッチ内容と順序が同じである限り。したがって、クライアントが一貫してリトライし、データを変更または順序を変更しないことが重要です。 - -### 正しい挿入ターゲットを選択 {#choose-the-right-insert-target} - -シャードクラスターの場合、2つのオプションがあります: - -* **MergeTree**または**ReplicatedMergeTree**テーブルに直接挿入します。クライアントがシャード間で負荷分散を行える場合、これは最も効率的なオプションです。`internal_replication = true`により、ClickHouseはレプリケーションを透明に処理します。 -* [Distributed table](/engines/table-engines/special/distributed)に挿入します。これにより、クライアントは任意のノードにデータを送信し、ClickHouseがそれを正しいシャードに転送します。これは単純ですが、追加の転送ステップによりややパフォーマンスが低下します。`internal_replication = true`は引き続き推奨されます。 - -**ClickHouse Cloudでは、すべてのノードが同一の単一シャードに対して読み書きします。挿入はノード間で自動的にバランスされます。ユーザーは単に公開されたエンドポイントに挿入を送信することができます。** - -### 正しい形式を選択 {#choose-the-right-format} - -効率的なデータ取り込みにおいて、適切な入力形式を選択することが重要です。70以上のサポートされている形式があるため、最もパフォーマンスの高いオプションを選ぶことは、挿入速度、CPUおよびメモリ使用量、全体的なシステム効率に大きな影響を及ぼします。 - -柔軟性はデータエンジニアリングやファイルベースのインポートに役立ちますが、**アプリケーションはパフォーマンス志向の形式を優先すべきです**: - -* **ネイティブ形式**(推奨):最も効率的。列指向で、サーバー側で必要な解析が最小限です。デフォルトでGoおよびPythonクライアントで使用されます。 -* **RowBinary**:効率的な行ベースの形式で、カラム指向への変換がクライアント側で難しい場合に最適です。Javaクライアントで使用されます。 -* **JSONEachRow**:使いやすいが解析コストが高いです。低ボリュームのユースケースや迅速な統合に適しています。 - -### 圧縮を使用 {#use-compression} - -圧縮は、ネットワークのオーバーヘッドを削減し、挿入を加速し、ClickHouseにおけるストレージコストを低下させる上で重要な役割を果たします。効果的に使用することで、データ形式やスキーマを変更することなく、取り込みパフォーマンスを向上させます。 - -挿入データを圧縮すると、ネットワーク経由で送信されるペイロードのサイズが減少し、帯域幅使用量が最小化され、伝送が加速されます。 - -挿入においては、ネイティブ形式で使用すると特に効果的です。この形式はClickHouseの内部の列指向ストレージモデルにすでにマッチしています。この設定では、サーバーは迅速にデータを解凍し、最小限の変換で直接データを保存できます。 - -#### スピードにはLZ4を、圧縮率にはZSTDを使用 {#use-lz4-for-speed-zstd-for-compression-ratio} - -ClickHouseはデータ転送中にいくつかの圧縮コーデックをサポートしています。一般的なオプションは2つあります: - -* **LZ4**:高速で軽量。CPUオーバーヘッドが最小限で、データサイズを大幅に削減します。高スループットの挿入に最適で、ほとんどのClickHouseクライアントでデフォルトになっています。 -* **ZSTD**:より高い圧縮率を持ちますが、よりCPU集約的です。ネットワーク転送コストが高い場合(地域間やクラウドプロバイダーのシナリオなど)に役立ちますが、クライアント側の計算およびサーバー側の解凍時間をわずかに増加させます。 - -ベストプラクティス:帯域幅が制約されている場合やデータ流出コストがかかる場合を除き、LZ4を使用してください。その場合はZSTDを検討してください。 - -:::note -[FastFormatsベンチマーク](https://clickhouse.com/blog/clickhouse-input-format-matchup-which-is-fastest-most-efficient)からのテストでは、LZ4圧縮されたネイティブ挿入がデータサイズを50%以上削減し、5.6 GiBのデータセットに対して取り込み時間を150秒から131秒に短縮しました。ZSTDに切り替えた場合、同じデータセットは1.69 GiBに圧縮されましたが、サーバー側の処理時間はわずかに増加しました。 -::: - -#### 圧縮はリソース使用量を削減 {#compression-reduces-resource-usage} - -圧縮はネットワークトラフィックを削減するだけでなく、サーバー上でのCPUおよびメモリの効率も向上させます。圧縮されたデータを使用すると、ClickHouseは少ないバイト数を受け取り、大きな入力の解析に費やす時間も減少します。この利点は、特に可観測性シナリオなど、複数の同時クライアントからの取り込み時に重要です。 - -LZ4では圧縮によるCPUおよびメモリへの影響は控えめで、ZSTDでは中程度です。負荷がかかっている場合でも、サーバー側の効率はデータ量の減少により改善されます。 - -**圧縮とバッチ処理、効率的な入力形式(ネイティブのような)を組み合わせることで、最良の取り込みパフォーマンスが得られます。** - -ネイティブインターフェース(例:[clickhouse-client](/interfaces/cli))を使用している場合、デフォルトでLZ4圧縮が有効になっています。必要に応じて設定からZSTDに切り替えることができます。 - -[HTTPインターフェース](/interfaces/http)を使用する場合、Content-Encodingヘッダーを使用して圧縮を適用します(例:Content-Encoding: lz4)。全てのペイロードは送信前に圧縮される必要があります。 - -### 低コストの場合は事前ソートしてください {#pre-sort-if-low-cost} - -挿入の前に主キーでデータを事前にソートすると、特に大規模なバッチにおいて、ClickHouseでの取り込み効率が向上します。 - -データが事前にソートされた状態で到着すると、ClickHouseはパート作成中に内部ソートステップをスキップまたは簡略化でき、CPU使用量を削減し、挿入プロセスを加速します。事前ソートは、似たような値がまとめられるため、圧縮効率も向上させます。これによりLZ4やZSTDなどのコーデックがより良い圧縮率を達成できます。特に、大規模なバッチ挿入および圧縮と組み合わせると、処理オーバーヘッドと転送データ量の両方を削減するのに役立ちます。 - -**ただし、事前ソートはオプションの最適化であり、必須ではありません。** ClickHouseは並列処理を利用してデータを非常に効率的にソートし、多くの場合、サーバー側のソートはクライアント側の事前ソートよりも速いか、便利です。 - -**データがほぼ順序付けられている、またはクライアント側のリソース(CPU、メモリ)が十分で未使用である場合のみ、事前ソートを推奨します。** 遅延に敏感な高スループットのユースケース(可観測性など)では、データが順不同または多数のエージェントから到着するため、事前ソートをスキップし、ClickHouseの内蔵されたパフォーマンスに依存する方がしばしば良いです。 - -## 非同期挿入 {#asynchronous-inserts} - - - -## インターフェースを選択 - HTTPまたはネイティブ {#choose-an-interface} - -### ネイティブ {#choose-an-interface-native} - -ClickHouseはデータ取り込みのために、**ネイティブインターフェース**と**HTTPインターフェース**の2つの主なインターフェースを提供しています - それぞれパフォーマンスと柔軟性の間でトレードオフがあります。ネイティブインターフェースは、[clickhouse-client](/interfaces/cli)やGo、C++などの一部の言語クライアントによって使用され、パフォーマンスのために特別に設計されています。常にClickHouseの非常に効率的なネイティブ形式でデータを送信し、LZ4またはZSTDによるブロック単位の圧縮をサポートし、解析や形式変換などの作業をクライアントにオフロードしてサーバー側の処理を最小限に抑えます。 - -このインターフェースは、MATERIALIZEDおよびDEFAULT列の値のクライアント側の計算を可能にし、サーバーがこれらのステップを完全にスキップできるようにします。これにより、高スループットの取り込みシナリオに最適です。 - -### HTTP {#choose-an-interface-http} - -多くの従来のデータベースとは異なり、ClickHouseはHTTPインターフェースもサポートしています。**これに対して、互換性と柔軟性を優先します。** データは[任意のサポートされた形式](/integrations/data-formats)で送信でき、JSON、CSV、Parquetなどを含み、Python、Java、JavaScript、RustなどのほとんどのClickHouseクライアントで広くサポートされています。 - -これは、トラフィックをロードバランサーで容易に切り替えることができるため、ClickHouseのネイティブプロトコルよりも好まれることがよくあります。ネイティブプロトコルでは、少しだけオーバーヘッドが低い場合、挿入性能に小さな差異が生じると期待しています。 - -ただし、クライアント側の最適化、例えばマテリアライズされた値の計算やネイティブ形式への自動変換を行うことはできません。HTTP挿入は標準のHTTPヘッダーを使用して圧縮を行うことができますが(例:`Content-Encoding: lz4`)、圧縮は個々のデータブロックではなく全ペイロードに適用されます。このインターフェースは、プロトコルのシンプルさ、負荷分散、または広範な形式互換性が生のパフォーマンスよりも重要とされる環境で好まれることがよくあります。 - -これらのインターフェースの詳細な説明については、[こちら](/interfaces/overview)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md.hash deleted file mode 100644 index 95202b9cfe6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/selecting_an_insert_strategy.md.hash +++ /dev/null @@ -1 +0,0 @@ -9911b1f71ef2abe7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md deleted file mode 100644 index cea4c046f5b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -slug: '/guides/sizing-and-hardware-recommendations' -sidebar_label: 'サイジングおよびハードウェアの推奨事項' -sidebar_position: 4 -title: 'サイジングおよびハードウェアの推奨事項' -description: 'このガイドでは、オープンソースユーザー向けのハードウェア、コンピュート、メモリおよびディスク構成に関する一般的な推奨事項について説明しています。' ---- - - - - -# ハードウェアのサイズ指定と推奨事項 - -このガイドでは、オープンソースユーザー向けのハードウェア、計算、メモリ、ディスク構成に関する一般的な推奨事項を説明します。セットアップを簡素化したい場合は、[ClickHouse Cloud](https://clickhouse.com/cloud)を使用することをお勧めします。これにより、ワークロードに応じて自動的にスケールし、インフラ管理に関するコストを最小限に抑えることができます。 - -ClickHouseクラスターの構成は、アプリケーションの使用ケースやワークロードパターンに大きく依存します。アーキテクチャを計画する際には、以下の要因を考慮する必要があります。 - -- 同時実行性(リクエスト数/秒) -- スループット(処理された行数/秒) -- データ量 -- データ保持ポリシー -- ハードウェアコスト -- メンテナンスコスト - -## ディスク {#disk} - -ClickHouseで使用するディスクの種類は、データ量、レイテンシ、またはスループットの要件に依存します。 - -### パフォーマンスの最適化 {#optimizing-for-performance} - -パフォーマンスを最大化するために、[AWSのプロビジョニングIOPS SSDボリューム](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/provisioned-iops.html)またはクラウドプロバイダーの同等の提供物を直接接続することをお勧めします。これにより、IOが最適化されます。 - -### ストレージコストの最適化 {#optimizing-for-storage-costs} - -コストを抑えるために、[一般目的のSSD EBSボリューム](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/general-purpose.html)を使用できます。 - -SDDとHDDを使用した[ホット/ウォーム/コールドアーキテクチャ](/guides/developer/ttl#implementing-a-hotwarmcold-architecture)を利用した段階的ストレージを実装することもできます。あるいは、[AWS S3](https://aws.amazon.com/s3/)をストレージとして使用し、計算とストレージを分離することも可能です。計算とストレージの分離に関するガイドは[こちら](/guides/separation-storage-compute)をご覧ください。計算とストレージの分離は、ClickHouse Cloudでデフォルトで利用可能です。 - -## CPU {#cpu} - -### どのCPUを使用すべきか? {#which-cpu-should-i-use} - -使用するCPUの種類は、使用パターンに依存します。ただし、一般に、同時実行クエリが多く、より多くのデータを処理するアプリケーションや、計算集約型のユーザー定義関数(UDF)を使用する場合は、より多くのCPUコアが必要になります。 - -**低レイテンシまたは顧客向けアプリケーション** - -顧客向けワークロードのために、レイテンシ要件が数ミリ秒の場合は、AWSのEC2 [i3ライント](https://aws.amazon.com/ec2/instance-types/i3/)または[i4iライント](https://aws.amazon.com/ec2/instance-types/i4i/)を推奨します。または、クラウドプロバイダーの同等の提供物を選択してください。 - -**高同時実行アプリケーション** - -同時実行性を最適化する必要があるワークロード(100クエリ/秒以上)の場合は、AWSの[計算最適化Cシリーズ](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized)を推奨します。あるいは、クラウドプロバイダーの同等の提供物もご利用いただけます。 - -**データウェアハウジングのユースケース** - -データウェアハウジングのワークロードやアドホック分析クエリには、AWSの[Rタイプシリーズ](https://aws.amazon.com/ec2/instance-types/#Memory_Optimized)を推奨します。または、クラウドプロバイダーの同等の提供物を使用してください。これらはメモリ最適化されています。 - ---- - -### CPU使用率はどのくらいにすべきか? {#what-should-cpu-utilization-be} - -ClickHouseに対する標準的なCPU使用率の目標はありません。[iostat](https://linux.die.net/man/1/iostat)などのツールを使用して平均CPU使用率を測定し、予期しないトラフィックスパイクを管理するためにサーバーのサイズを調整してください。ただし、アナリティクスやデータウェアハウジングのユースケースでアドホッククエリを行っている場合、CPU使用率は10〜20%を目指すべきです。 - -### どれくらいのCPUコアを使用すべきか? {#how-many-cpu-cores-should-i-use} - -使用するCPUの数は、ワークロードによって異なります。ただし、以下のCPUタイプに基づくメモリとCPUコアの比率を推奨します。 - -- **[Mタイプ](https://aws.amazon.com/ec2/instance-types/)(一般目的のユースケース):** メモリとCPUコアの比率は4:1 -- **[Rタイプ](https://aws.amazon.com/ec2/instance-types/#Memory_Optimized)(データウェアハウジングのユースケース):** メモリとCPUコアの比率は8:1 -- **[Cタイプ](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized)(計算最適化のユースケース):** メモリとCPUコアの比率は2:1 - -例えば、MタイプのCPUを使用する場合、25CPUコアごとに100GBのメモリをプロビジョニングすることを推奨します。アプリケーションに適したメモリ量を特定するには、メモリの使用状況をプロファイリングする必要があります。メモリに関する問題のデバッグに関する[このガイド](/guides/developer/debugging-memory-issues)を読むか、ClickHouseを監視するために[組み込みの可観測性ダッシュボード](/operations/monitoring)を使用してください。 - -## メモリ {#memory} - -CPUの選択と同様に、ストレージ比率に対するメモリ、CPUに対するメモリの比率は使用ケースに依存します。 - -必要なRAMの容量は通常、以下の要因に依存します。 -- クエリの複雑さ。 -- クエリで処理されるデータの量。 - -一般に、メモリが多いほど、クエリの実行は速くなります。 -コストに敏感な使用ケースの場合、メモリの少ない構成でも動作します([`max_bytes_before_external_group_by`](/operations/settings/settings#max_bytes_before_external_group_by)および[`max_bytes_before_external_sort`](/operations/settings/settings#max_bytes_before_external_sort)を有効にする設定が可能で、ディスクにデータをスピルさせることができます)が、これによりクエリのパフォーマンスに大きな影響を与える可能性があることに注意してください。 - -### メモリとストレージの比率はどのくらいにすべきか? {#what-should-the-memory-to-storage-ratio-be} - -データ量が少ない場合、1:1のメモリストレージ比率は受け入れられますが、合計メモリは8GB以上であるべきです。 - -データの保持期間が長いまたはデータ量が多いユースケースの場合、1:100から1:130のメモリストレージ比率を推奨します。たとえば、10TBのデータを保存する場合は、レプリカごとに100GBのRAMを推奨します。 - -顧客向けのワークロードのように頻繁にアクセスされるユースケースの場合、1:30から1:50のメモリストレージ比率でより多くのメモリを使用することを推奨します。 - -## レプリカ {#replicas} - -シャードごとに少なくとも3つのレプリカ(または[Amazon EBS](https://aws.amazon.com/ebs/)を含む2つのレプリカ)を持つことを推奨します。さらに、追加のレプリカを追加する前にすべてのレプリカを縦にスケールアップすることをお勧めします(水平スケーリング)。 - -ClickHouseは自動的にシャーディングを行わず、データセットの再シャーディングにはかなりの計算リソースが必要です。したがって、将来データを再シャーディングする必要がないように、通常は利用可能な最大のサーバーを使用することを推奨します。 - -[ClickHouse Cloud](https://clickhouse.com/cloud)を使用すると自動的にスケールし、使用ケースに応じてレプリカの数を簡単に制御できます。 - -## 大規模ワークロードの例示的な構成 {#example-configurations-for-large-workloads} - -ClickHouseの構成は、特定のアプリケーションの要件によって大きく異なります。コストとパフォーマンスの最適化についてのサポートが必要な場合は、[Salesに問い合わせ](https://clickhouse.com/company/contact?loc=docs-sizing-and-hardware-recommendations)てください。 - -ガイダンスを提供するために(推奨事項ではありません)、以下はプロダクションでのClickHouseユーザーの例示的な構成です。 - -### Fortune 500 B2B SaaS {#fortune-500-b2b-saas} - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ストレージ
月間新データ量30TB
合計ストレージ(圧縮後)540TB
データ保持18ヶ月
ノードごとのディスク25TB
CPU
同時実行性200+同時クエリ
レプリカの数(HAペアを含む)44
ノードごとのvCPU62
合計vCPU2700
メモリ
合計RAM11TB
レプリカごとのRAM256GB
RAMとvCPUの比率4:1
RAMとディスクの比率1:50
- -### Fortune 500 Telecom Operatorのログユースケース {#fortune-500-telecom-operator-for-a-logging-use-case} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ストレージ
月間ログデータ量4860TB
合計ストレージ(圧縮後)608TB
データ保持30日
ノードごとのディスク13TB
CPU
レプリカの数(HAペアを含む)38
ノードごとのvCPU42
合計vCPU1600
メモリ
合計RAM10TB
レプリカごとのRAM256GB
RAMとvCPUの比率6:1
RAMとディスクの比率1:60
- -## さらなる読書 {#further-reading} - -以下は、オープンソースのClickHouseを使用している企業によるアーキテクチャに関する公開されたブログ投稿です。 - -- [Cloudflare](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/?utm_source=linkedin&utm_medium=social&utm_campaign=blog) -- [eBay](https://innovation.ebayinc.com/tech/engineering/ou-online-analytical-processing/) -- [GitLab](https://handbook.gitlab.com/handbook/engineering/development/ops/monitor/observability/#clickhouse-datastore) -- [Lyft](https://eng.lyft.com/druid-deprecation-and-clickhouse-adoption-at-lyft-120af37651fd) -- [MessageBird](https://clickhouse.com/blog/how-messagebird-uses-clickhouse-to-monitor-the-delivery-of-billions-of-messages) -- [Microsoft](https://clickhouse.com/blog/self-service-data-analytics-for-microsofts-biggest-web-properties) -- [Uber](https://www.uber.com/en-ES/blog/logging/) -- [Zomato](https://blog.zomato.com/building-a-cost-effective-logging-platform-using-clickhouse-for-petabyte-scale) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md.hash deleted file mode 100644 index 05dd1f593e3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md.hash +++ /dev/null @@ -1 +0,0 @@ -31948f0993eea545 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md deleted file mode 100644 index a676f8c6145..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -slug: '/best-practices/use-materialized-views' -sidebar_position: 10 -sidebar_label: 'Materialized View' -title: 'Materialized View' -description: 'Page describing Materialized Views' ---- - -import Image from '@theme/IdealImage'; -import incremental_materialized_view from '@site/static/images/bestpractices/incremental_materialized_view.gif'; -import refreshable_materialized_view from '@site/static/images/bestpractices/refreshable_materialized_view.gif'; - -ClickHouseは2種類のマテリアライズドビューをサポートしています: [**インクリメンタル**](/materialized-view/incremental-materialized-view) と [**リフレッシュ可能**](/materialized-view/refreshable-materialized-view)。どちらも結果を事前に計算して保存することでクエリを加速するように設計されていますが、基本となるクエリがどのように、またはいつ実行されるか、どのワークロードに適しているか、データの新鮮さがどのように扱われるかにおいて重要な違いがあります。 - -**ユーザーは、前のベストプラクティス [タイプに関する](/best-practices/select-data-types) と [主キーの最適化](/best-practices/choosing-a-primary-key) が実施されたと仮定して、加速する必要のある特定のクエリパターンについてマテリアライズドビューを検討する必要があります。** - -**インクリメンタルマテリアライズドビュー**はリアルタイムで更新されます。新しいデータがソーステーブルに挿入されると、ClickHouseは自動的にマテリアライズドビューのクエリを新しいデータブロックに適用し、結果を別のターゲットテーブルに書き込みます。時間が経つにつれて、ClickHouseはこれらの部分的な結果をマージして完全で最新のビューを生成します。このアプローチは、計算コストを挿入時間にシフトし、新しいデータのみを処理するため非常に効率的です。その結果、ターゲットテーブルに対する`SELECT`クエリは高速で軽量です。インクリメンタルビューはすべての集約関数をサポートし、挿入されるデータセットの小さく最近のサブセットに対して各クエリが操作を行うため、ペタバイトのデータにもスケールします。 - -マテリアライズドビュー - -**リフレッシュ可能なマテリアライズドビュー**は、対照的に、スケジュールで更新されます。これらのビューは定期的にフルクエリを再実行し、ターゲットテーブルの結果を上書きします。これは、Postgresのような従来のOLTPデータベースのマテリアライズドビューに似ています。 - -リフレッシュ可能なマテリアライズドビューの図 - -インクリメンタルとリフレッシュ可能なマテリアライズドビューの選択は、クエリの性質、データの変更頻度、ビューの更新が挿入されるたびにすべての行を反映する必要があるか、定期的なリフレッシュが許容されるかどうかに大きく依存します。これらのトレードオフを理解することが、ClickHouseでパフォーマンスが高くスケーラブルなマテリアライズドビューを設計するための鍵となります。 - -## インクリメンタルマテリアライズドビューの使用時期 {#when-to-use-incremental-materialized-views} - -インクリメンタルマテリアライズドビューは一般的に好まれます。これは、ソーステーブルに新しいデータが受け取られるたびにリアルタイムで自動的に更新されるからです。すべての集約関数をサポートしており、単一テーブルに対する集約に特に効果的です。挿入時に結果をインクリメンタルに計算することにより、クエリは大幅に小さなデータサブセットに対して実行され、これによりペタバイトのデータにもストレスなくスケールします。ほとんどの場合、全体的なクラスターのパフォーマンスに対して顕著な影響はありません。 - -インクリメンタルマテリアライズドビューは次の場合に使用すべきです: - -- 挿入のたびに更新されるリアルタイムのクエリ結果が必要な場合。 -- 大量のデータを頻繁に集約またはフィルタリングしている場合。 -- クエリが単一テーブルに対する簡単な変換または集約を含む場合。 - -インクリメンタルマテリアライズドビューの例については、[こちら](/materialized-view/incremental-materialized-view)を参照してください。 - -## リフレッシュ可能なマテリアライズドビューの使用時期 {#when-to-use-refreshable-materialized-views} - -リフレッシュ可能なマテリアライズドビューは、クエリをインクリメンタルではなく定期的に実行し、クエリ結果セットを高速に取得するために保存します。 - -これらは、クエリのパフォーマンスが重要で(例えば、サブミリ秒の待機時間)、わずかに古い結果が許容される場合に最も便利です。クエリが完全に再実行されるため、リフレッシュ可能なビューは、比較的高速に計算できるクエリや、頻繁には計算できないクエリ(例えば、毎時)、キャッシングされた「トップN」結果やルックアップテーブルなどに最も適しています。 - -実行頻度は、システムに過度の負荷がかからないように慎重に調整する必要があります。リソースを消費する非常に複雑なクエリは、慎重にスケジュールする必要があります - これらは、キャッシュに影響を与えたり、CPUとメモリを消費したりすることによって、全体のクラスターのパフォーマンスを悪化させる可能性があります。クエリは、リフレッシュ間隔に対して相対的に迅速に実行され、クラスターに負荷をかけないようにする必要があります。例えば、クエリ自体が計算に少なくとも10秒かかる場合、10秒ごとにビューを更新するようにスケジュールしないでください。 - -## 概要 {#summary} - -要約すると、リフレッシュ可能なマテリアライズドビューを使用するのは次の場合です: - -- 即時に利用できるキャッシュされたクエリ結果が必要であり、新鮮さのわずかな遅延が許容される場合。 -- クエリ結果セットのトップNが必要な場合。 -- 結果セットのサイズが時間とともに無制限に増加することがない場合。これにより、ターゲットビューのパフォーマンスが低下します。 -- 複数のテーブルを含む複雑な結合や非正規化を行い、ソーステーブルが変更されるたびに更新が必要な場合。 -- バッチワークフロー、非正規化タスク、DBT DAGsに似たビュー依存関係を構築している場合。 - -リフレッシュ可能なマテリアライズドビューの例については、[こちら](/materialized-view/refreshable-materialized-view)を参照してください。 - -### APPENDとREPLACEモード {#append-vs-replace-mode} - -リフレッシュ可能なマテリアライズドビューは、ターゲットテーブルにデータを書き込むための2つのモードをサポートします: `APPEND`と`REPLACE`。これらのモードは、ビューがリフレッシュされたときにクエリの結果がどのように書き込まれるかを定義します。 - -`REPLACE`はデフォルトの動作です。ビューがリフレッシュされるたびに、ターゲットテーブルの以前の内容は最新のクエリ結果で完全に上書きされます。これは、ビューが常に最新の状態を反映する必要がある場合に適しています。例えば、結果セットをキャッシュする場合などです。 - -対照的に、`APPEND`は、ターゲットテーブルの内容を置き換えるのではなく、新しい行をテーブルの末尾に追加することを許可します。これにより、定期的なスナップショットをキャプチャするなどの追加のユースケースが可能になります。`APPEND`は、各リフレッシュが異なる時点を示す場合や、結果の履歴蓄積が望ましい場合に特に便利です。 - -`APPEND`モードを選択する場合: - -- 過去のリフレッシュの履歴を保持したい場合。 -- 定期的なスナップショットやレポートを構築している場合。 -- 時間の経過とともにリフレッシュされた結果を段階的に収集する必要がある場合。 - -`REPLACE`モードを選択する場合: - -- 最も最近の結果のみが必要な場合。 -- 古いデータを完全に破棄する必要がある場合。 -- ビューが現在の状態またはルックアップを表している場合。 - -ユーザーは、[メダリオンアーキテクチャ](https://clickhouse.com/blog/building-a-medallion-architecture-for-bluesky-json-data-with-clickhouse)を構築する際に`APPEND`機能を適用することができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md.hash deleted file mode 100644 index 643bf8083a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/use_materialized_views.md.hash +++ /dev/null @@ -1 +0,0 @@ -8928d726ef9bcf0f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md deleted file mode 100644 index 0023f0562a0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -slug: '/best-practices/use-data-skipping-indices-where-appropriate' -sidebar_position: 10 -sidebar_label: 'データスキッピングインデックス' -title: '適切な場所でデータスキッピングインデックスを使用する' -description: 'データスキッピングインデックスの使用方法とタイミングについて説明するページ' ---- - -import Image from '@theme/IdealImage'; -import building_skipping_indices from '@site/static/images/bestpractices/building_skipping_indices.gif'; -import using_skipping_indices from '@site/static/images/bestpractices/using_skipping_indices.gif'; - -データスキッピングインデックスは、前のベストプラクティスが守られている場合に考慮されるべきです。つまり、型が最適化され、良好な主キーが選択され、マテリアライズドビューが活用されている必要があります。 - -これらのインデックスは、どのように機能するかを理解した上で注意深く使用されると、クエリパフォーマンスを加速するために使用できます。 - -ClickHouseは、クエリ実行中にスキャンされるデータ量を劇的に減少させることができる**データスキッピングインデックス**という強力なメカニズムを提供します。特に、特定のフィルタ条件に対して主キーが役立たない場合に有効です。従来のデータベースが行ベースの二次インデックス(Bツリーなど)に依存しているのに対し、ClickHouseは列指向であり、そのような構造をサポートする形で行の位置を保存しません。代わりにスキップインデックスを使用し、クエリのフィルタ条件と一致しないことが保証されているデータブロックの読み込みを回避します。 - -スキップインデックスは、データブロックに関するメタデータ(最小値/最大値、値のセット、またはブルームフィルタ表現など)を保存し、クエリ実行中にこのメタデータを使用してどのデータブロックを完全にスキップできるかを判断します。これらは[MergeTreeファミリー](/engines/table-engines/mergetree-family/mergetree)のテーブルエンジンにのみ適用され、式、インデックスタイプ、名前、およびインデックスされた各ブロックのサイズを定義する粒度を使用して定義されます。これらのインデックスは、テーブルデータとともに保存され、クエリフィルタがインデックス式に一致するときに参照されます。 - -データスキッピングインデックスには、さまざまなクエリとデータ分布に適したいくつかのタイプがあります: - -* **minmax**: 各ブロックごとの式の最小値と最大値を追跡します。緩やかにソートされたデータに対する範囲クエリに最適です。 -* **set(N)**: 各ブロックごとに指定されたサイズNまでの値のセットを追跡します。ブロックごとの低いカーディナリティのカラムに効果的です。 -* **bloom_filter**: 値がブロックに存在するかどうかを確率的に判断し、セットメンバーシップのための高速近似フィルタリングを可能にします。「干し草の中の針」を探すクエリを最適化するために効果的です。 -* **tokenbf_v1 / ngrambf_v1**: 文字列内のトークンや文字列シーケンスを検索するために設計された特化型ブルームフィルタのバリアント - ログデータやテキスト検索のユースケースに特に役立ちます。 - -強力である一方で、スキップインデックスは注意して使用する必要があります。意味のある数のデータブロックを排除する場合にのみベネフィットを提供し、クエリやデータ構造が合致しない場合はオーバーヘッドを引き起こす可能性があります。ブロックに一致する値が1つでも存在する場合、そのブロック全体はまだ読み込まれる必要があります。 - -**効果的なスキップインデックスの使用は、インデックスされたカラムとテーブルの主キーとの強い相関関係、または類似の値をグループ化する形でのデータ挿入に依存することが多いです。** - -一般的に、データスキッピングインデックスは、適切な主キー設計と型最適化を確認した後に適用するのが最適です。特に役立つのは: - -* 全体的なカーディナリティが高いが、ブロック内のカーディナリティが低いカラム。 -* 検索において重要な稀な値(例:エラーコード、特定のID)。 -* 非主キーのカラムでのフィルタリングがローカライズされた分布で発生する場合。 - -常に: - -1. 実データで現実的なクエリを使用してスキップインデックスをテストします。異なるインデックスタイプと粒度の値を試してください。 -2. send_logs_level='trace' や `EXPLAIN indexes=1` などのツールを使用して、インデックスの効果を評価します。 -3. インデックスのサイズと、それが粒度によってどのように影響を受けるかを常に評価します。粒度サイズを減少させることは、しばしばパフォーマンスを向上させ、より多くのグラニュールがフィルタリングされ、スキャンされる必要が生じます。ただし、インデックスサイズが粒度の低下に伴って増加する場合、パフォーマンスが低下することもあります。さまざまな粒度データポイントに対するパフォーマンスとインデックスサイズを測定します。これは特にブルームフィルタインデックスに関連しています。 - -

-**適切に使用される場合、スキップインデックスは大幅なパフォーマンスブーストを提供しますが、盲目的に使用すると不必要なコストを加える可能性があります。** - -データスキッピングインデックスに関する詳細なガイドについては、[こちら](/sql-reference/statements/alter/skipping-index)を参照してください。 - -## 例 {#example} - -次の最適化されたテーブルを考慮してください。これは、各投稿に対して行があるStack Overflowのデータを含んでいます。 - -```sql -CREATE TABLE stackoverflow.posts -( - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - `PostTypeId` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta(4), ZSTD(1)), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta(2), ZSTD(1)), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC') -) -ENGINE = MergeTree -PARTITION BY toYear(CreationDate) -ORDER BY (PostTypeId, toDate(CreationDate)) -``` - -このテーブルは、投稿の種類と日付でフィルタリングおよび集約するクエリに最適化されています。たとえば、2009年以降に公開された、ビュー数が10,000,000を超える投稿の数をカウントしたいとしましょう。 - -```sql -SELECT count() -FROM stackoverflow.posts -WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) - -┌─count()─┐ -│ 5 │ -└─────────┘ - -1行がセットされました。経過時間: 0.720秒。59.55百万行、230.23 MBが処理されました (82.66百万行/秒, 319.56 MB/秒)。 -``` - -このクエリは、主インデックスを使用して一部の行(およびグラニュール)を除外することができます。しかし、上記の応答および次の`EXPLAIN indexes=1`で示されているように、大多数の行はまだ読み込む必要があります。 - -```sql -EXPLAIN indexes = 1 -SELECT count() -FROM stackoverflow.posts -WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) -LIMIT 1 - -┌─explain──────────────────────────────────────────────────────────┐ -│ Expression ((Project names + Projection)) │ -│ Limit (preliminary LIMIT (without OFFSET)) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ Expression │ -│ ReadFromMergeTree (stackoverflow.posts) │ -│ Indexes: │ -│ MinMax │ -│ Keys: │ -│ CreationDate │ -│ Condition: (CreationDate in ('1230768000', +Inf)) │ -│ Parts: 123/128 │ -│ Granules: 8513/8545 │ -│ Partition │ -│ Keys: │ -│ toYear(CreationDate) │ -│ Condition: (toYear(CreationDate) in [2009, +Inf)) │ -│ Parts: 123/123 │ -│ Granules: 8513/8513 │ -│ PrimaryKey │ -│ Keys: │ -│ toDate(CreationDate) │ -│ Condition: (toDate(CreationDate) in [14245, +Inf)) │ -│ Parts: 123/123 │ -│ Granules: 8513/8513 │ -└──────────────────────────────────────────────────────────────────┘ - -25行がセットされました。経過時間: 0.070秒。 -``` - -簡単な分析により、`ViewCount`が`CreationDate`(主キー)と相関していることが示されています。予想通り、投稿が存在する時間が長くなるほど、より多くの閲覧が得られます。 - -```sql -SELECT toDate(CreationDate) as day, avg(ViewCount) as view_count FROM stackoverflow.posts WHERE day > '2009-01-01' GROUP BY day -``` - -したがって、これはデータスキッピングインデックスの論理的な選択になります。数値型であるため、min_maxインデックスが適していると言えます。次の`ALTER TABLE`コマンドを使用してインデックスを追加します - 最初に追加し、その後「マテリアライズ」します。 - -```sql -ALTER TABLE stackoverflow.posts - (ADD INDEX view_count_idx ViewCount TYPE minmax GRANULARITY 1); - -ALTER TABLE stackoverflow.posts MATERIALIZE INDEX view_count_idx; -``` - -このインデックスは、初期のテーブル作成時に追加することもできます。DDLの一部として定義されたスキーマは次の通りです: - -```sql -CREATE TABLE stackoverflow.posts -( - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - `PostTypeId` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta(4), ZSTD(1)), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta(2), ZSTD(1)), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC'), - INDEX view_count_idx ViewCount TYPE minmax GRANULARITY 1 --インデックスここ -) -ENGINE = MergeTree -PARTITION BY toYear(CreationDate) -ORDER BY (PostTypeId, toDate(CreationDate)) -``` - -次のアニメーションは、最小値と最大値の`ViewCount`値をテーブル内の各行(グラニュール)のブロックに対して追跡するために、私たちのminmaxスキッピングインデックスがどのように構築されるかを示しています。 - -Building skipping indices - -以前のクエリを繰り返すと、パフォーマンスが大幅に改善されたことがわかります。スキャンされた行数が大幅に削減されたことに注意してください: - -```sql -SELECT count() -FROM stackoverflow.posts -WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) - -┌─count()─┐ -│ 5 │ -└─────────┘ - -1行がセットされました。経過時間: 0.012秒。39.11千行、321.39 KBが処理されました (3.40百万行/秒, 27.93 MB/秒)。 -``` - -`EXPLAIN indexes=1`はインデックスを使用していることを確認しています。 - -```sql -EXPLAIN indexes = 1 -SELECT count() -FROM stackoverflow.posts -WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) - -┌─explain────────────────────────────────────────────────────────────┐ -│ Expression ((Project names + Projection)) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ Expression │ -│ ReadFromMergeTree (stackoverflow.posts) │ -│ Indexes: │ -│ MinMax │ -│ Keys: │ -│ CreationDate │ -│ Condition: (CreationDate in ('1230768000', +Inf)) │ -│ Parts: 123/128 │ -│ Granules: 8513/8545 │ -│ Partition │ -│ Keys: │ -│ toYear(CreationDate) │ -│ Condition: (toYear(CreationDate) in [2009, +Inf)) │ -│ Parts: 123/123 │ -│ Granules: 8513/8513 │ -│ PrimaryKey │ -│ Keys: │ -│ toDate(CreationDate) │ -│ Condition: (toDate(CreationDate) in [14245, +Inf)) │ -│ Parts: 123/123 │ -│ Granules: 8513/8513 │ -│ Skip │ -│ Name: view_count_idx │ -│ Description: minmax GRANULARITY 1 │ -│ Parts: 5/123 │ -│ Granules: 23/8513 │ -└────────────────────────────────────────────────────────────────────┘ - -29行がセットされました。経過時間: 0.211秒。 -``` - -また、minmaxスキッピングインデックスが、例のクエリ内の`ViewCount` > 10,000,000の条件に対して一致を持たないすべての行ブロックをいかに剪定するかを示すアニメーションも示します: - -Using skipping indices diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md.hash deleted file mode 100644 index a7ddba09195..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md.hash +++ /dev/null @@ -1 +0,0 @@ -76024d2d3300abfa diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md deleted file mode 100644 index ecd54089f9d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -title: 'Getting started with chDB' -sidebar_label: 'Getting started' -slug: '/chdb/getting-started' -description: 'chDBはClickHouseを搭載したインプロセスSQL OLAPエンジンです' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'in-process' -- 'in process' ---- - - - - - -# chDBの使い方 - -このガイドでは、chDBのPythonバリアントを使用して設定を行います。 -まず、S3に保存されたJSONファイルをクエリしてから、そのJSONファイルに基づいてchDBにテーブルを作成し、データに対していくつかのクエリを実行します。 -また、クエリがApache ArrowやPandaを含むさまざまな形式でデータを返す方法を見て、最後にPandas DataFramesをクエリする方法を学びます。 - -## セットアップ {#setup} - -まず、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDBをインストールします。 -バージョン2.0.3以上であることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に、[ipython](https://ipython.org/)をインストールします: - -```bash -pip install ipython -``` - -このガイドの残りのコマンドを実行するために`ipython`を使用する予定です。次のコマンドを実行して起動できます: - -```bash -ipython -``` - -このガイドではPandasとApache Arrowも使用するので、これらのライブラリもインストールしましょう: - -```bash -pip install pandas pyarrow -``` - -## S3内のJSONファイルをクエリする {#querying-a-json-file-in-s3} - -次に、S3バケットに保存されているJSONファイルをどのようにクエリするかを見ていきます。 -[YouTubeの嫌いなデータセット](/getting-started/example-datasets/youtube-dislikes)には、2021年までのYouTube動画の嫌いの数が40億行以上含まれています。 -そのデータセットからのJSONファイルの1つを使用します。 - -chdbをインポートします: - -```python -import chdb -``` - -次に、JSONファイルの構造を記述するクエリを次のように書くことができます: - -```python -chdb.query( - """ - DESCRIBE s3( - 's3://clickhouse-public-datasets/youtube/original/files/' || - 'youtubedislikes_20211127161229_18654868.1637897329_vid.json.zst', - 'JSONLines' - ) - SETTINGS describe_compact_output=1 - """ -) -``` - -```text -"id","Nullable(String)" -"fetch_date","Nullable(String)" -"upload_date","Nullable(String)" -"title","Nullable(String)" -"uploader_id","Nullable(String)" -"uploader","Nullable(String)" -"uploader_sub_count","Nullable(Int64)" -"is_age_limit","Nullable(Bool)" -"view_count","Nullable(Int64)" -"like_count","Nullable(Int64)" -"dislike_count","Nullable(Int64)" -"is_crawlable","Nullable(Bool)" -"is_live_content","Nullable(Bool)" -"has_subtitles","Nullable(Bool)" -"is_ads_enabled","Nullable(Bool)" -"is_comments_enabled","Nullable(Bool)" -"description","Nullable(String)" -"rich_metadata","Array(Tuple( - call Nullable(String), - content Nullable(String), - subtitle Nullable(String), - title Nullable(String), - url Nullable(String)))" -"super_titles","Array(Tuple( - text Nullable(String), - url Nullable(String)))" -"uploader_badges","Nullable(String)" -"video_badges","Nullable(String)" -``` - -そのファイル内の行数をカウントすることもできます: - -```python -chdb.query( - """ - SELECT count() - FROM s3( - 's3://clickhouse-public-datasets/youtube/original/files/' || - 'youtubedislikes_20211127161229_18654868.1637897329_vid.json.zst', - 'JSONLines' - )""" -) -``` - -```text -336432 -``` - -このファイルにはちょうど300,000件を超えるレコードが含まれています。 - -chdbはまだクエリパラメータを渡すことをサポートしていませんが、パスを抽出してf-Stringを介して渡すことができます。 - -```python -path = 's3://clickhouse-public-datasets/youtube/original/files/youtubedislikes_20211127161229_18654868.1637897329_vid.json.zst' -``` - -```python -chdb.query( - f""" - SELECT count() - FROM s3('{path}','JSONLines') - """ -) -``` - -:::warning -プログラム内で定義された変数を使うのは問題ありませんが、ユーザー提供の入力に対しては行わないでください。そうしないと、クエリがSQLインジェクション攻撃を受ける可能性があります。 -::: - -## 出力形式の設定 {#configuring-the-output-format} - -デフォルトの出力形式は`CSV`ですが、`output_format`パラメーターを介して変更できます。 -chDBはClickHouseのデータ形式をサポートしており、[独自の形式もいくつか用意しています](/chdb/reference/data-formats.md)。その中には、Pandas DataFrameを返す`DataFrame`という形式もあります: - -```python -result = chdb.query( - f""" - SELECT is_ads_enabled, count() - FROM s3('{path}','JSONLines') - GROUP BY ALL - """, - output_format="DataFrame" -) - -print(type(result)) -print(result) -``` - -```text - - is_ads_enabled count() -0 False 301125 -1 True 35307 -``` - -また、Apache Arrowテーブルを得たい場合は次のようにします: - -```python -result = chdb.query( - f""" - SELECT is_live_content, count() - FROM s3('{path}','JSONLines') - GROUP BY ALL - """, - output_format="ArrowTable" -) - -print(type(result)) -print(result) -``` - -```text - -pyarrow.Table -is_live_content: bool -count(): uint64 not null ----- -is_live_content: [[false,true]] -count(): [[315746,20686]] -``` - -## JSONファイルからテーブルを作成する {#creating-a-table-from-json-file} - -次に、chDBにテーブルを作成する方法を見ていきましょう。 -このために異なるAPIを使用する必要があるので、まずそれをインポートします: - -```python -from chdb import session as chs -``` - -次に、セッションを初期化します。 -セッションをディスクに保持する場合は、ディレクトリ名を提供する必要があります。 -空白のままにすると、データベースはメモリ内に留まることになり、Pythonプロセスが終了すると失われます。 - -```python -sess = chs.Session("gettingStarted.chdb") -``` - -次に、データベースを作成します: - -```python -sess.query("CREATE DATABASE IF NOT EXISTS youtube") -``` - -次に、`CREATE...EMPTY AS`技法を使用して、JSONファイルからのスキーマに基づいて`dislikes`テーブルを作成します。 -すべてのカラムタイプを`Nullable`にしないために、[`schema_inference_make_columns_nullable`](/operations/settings/formats/#schema_inference_make_columns_nullable)設定を使用します。 - -```python -sess.query(f""" - CREATE TABLE youtube.dislikes - ORDER BY fetch_date - EMPTY AS - SELECT * - FROM s3('{path}','JSONLines') - SETTINGS schema_inference_make_columns_nullable=0 - """ -) -``` - -次に、`DESCRIBE`句を使用してスキーマを確認します: - -```python -sess.query(f""" - DESCRIBE youtube.dislikes - SETTINGS describe_compact_output=1 - """ -) -``` - -```text -"id","String" -"fetch_date","String" -"upload_date","String" -"title","String" -"uploader_id","String" -"uploader","String" -"uploader_sub_count","Int64" -"is_age_limit","Bool" -"view_count","Int64" -"like_count","Int64" -"dislike_count","Int64" -"is_crawlable","Bool" -"is_live_content","Bool" -"has_subtitles","Bool" -"is_ads_enabled","Bool" -"is_comments_enabled","Bool" -"description","String" -"rich_metadata","Array(Tuple( - call String, - content String, - subtitle String, - title String, - url String))" -"super_titles","Array(Tuple( - text String, - url String))" -"uploader_badges","String" -"video_badges","String" -``` - -次に、そのテーブルにデータを挿入します: - -```python -sess.query(f""" - INSERT INTO youtube.dislikes - SELECT * - FROM s3('{path}','JSONLines') - SETTINGS schema_inference_make_columns_nullable=0 - """ -) -``` - -これらの手順を1回で実行するために、`CREATE...AS`技法を使うこともできます。 -その技法を使用して別のテーブルを作成しましょう: - -```python -sess.query(f""" - CREATE TABLE youtube.dislikes2 - ORDER BY fetch_date - AS - SELECT * - FROM s3('{path}','JSONLines') - SETTINGS schema_inference_make_columns_nullable=0 - """ -) -``` - -## テーブルをクエリする {#querying-a-table} - -最後に、テーブルをクエリしてみましょう: - -```sql -df = sess.query(""" - SELECT uploader, sum(view_count) AS viewCount, sum(like_count) AS likeCount, sum(dislike_count) AS dislikeCount - FROM youtube.dislikes - GROUP BY ALL - ORDER BY viewCount DESC - LIMIT 10 - """, - "DataFrame" -) -df -``` - -```text - uploader viewCount likeCount dislikeCount -0 Jeremih 139066569 812602 37842 -1 TheKillersMusic 109313116 529361 11931 -2 LetsGoMartin- Canciones Infantiles 104747788 236615 141467 -3 Xiaoying Cuisine 54458335 1031525 37049 -4 Adri 47404537 279033 36583 -5 Diana and Roma IND 43829341 182334 148740 -6 ChuChuTV Tamil 39244854 244614 213772 -7 Cheez-It 35342270 108 27 -8 Anime Uz 33375618 1270673 60013 -9 RC Cars OFF Road 31952962 101503 49489 -``` - -データフレームに「いいね」と「嫌い」の比率を計算するために追加のカラムを加えるとします。 -次のように書くことができます: - -```python -df["likeDislikeRatio"] = df["likeCount"] / df["dislikeCount"] -``` - -## Pandas DataFrameをクエリする {#querying-a-pandas-dataframe} - -その後、chDBからそのDataFrameをクエリできます: - -```python -chdb.query( - """ - SELECT uploader, likeDislikeRatio - FROM Python(df) - """, - output_format="DataFrame" -) -``` - -```text - uploader likeDislikeRatio -0 Jeremih 21.473548 -1 TheKillersMusic 44.368536 -2 LetsGoMartin- Canciones Infantiles 1.672581 -3 Xiaoying Cuisine 27.842182 -4 Adri 7.627395 -5 Diana and Roma IND 1.225857 -6 ChuChuTV Tamil 1.144275 -7 Cheez-It 4.000000 -8 Anime Uz 21.173296 -9 RC Cars OFF Road 2.051021 -``` - -Pandas DataFramesのクエリに関しては、[Pandasをクエリする開発者ガイド](guides/querying-pandas.md)でさらに詳しく読むことができます。 - -## 次のステップ {#next-steps} - -このガイドがchDBの概要を把握するのに役立ったことを願っています。 -使用方法の詳細については、以下の開発者ガイドを参照してください: - -* [Pandas DataFramesをクエリする](guides/querying-pandas.md) -* [Apache Arrowをクエリする](guides/querying-apache-arrow.md) -* [JupySQLでchDBを使用する](guides/jupysql.md) -* [既存のclickhouse-localデータベースでchDBを使用する](guides/clickhouse-local.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md.hash deleted file mode 100644 index 6c65b46e479..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md.hash +++ /dev/null @@ -1 +0,0 @@ -0b7a488f98a713a2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md deleted file mode 100644 index ab72e3e0caf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: 'Using a clickhouse-local database' -sidebar_label: 'Using clickhouse-local database' -slug: '/chdb/guides/clickhouse-local' -description: 'Learn how to use a clickhouse-local database with chDB' -keywords: -- 'chdb' -- 'clickhouse-local' ---- - - - -[clickhouse-local](/operations/utilities/clickhouse-local) は、埋め込みバージョンの ClickHouse を持つ CLI です。 -これにより、ユーザーはサーバーをインストールすることなく ClickHouse の機能を利用できます。 -このガイドでは、chDB から clickhouse-local データベースを使用する方法を学びます。 - -## セットアップ {#setup} - -まず、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDB をインストールします。 -バージョン 2.0.2 以上を確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に、[ipython](https://ipython.org/) をインストールします: - -```bash -pip install ipython -``` - -このガイドの残りのコマンドを実行するために `ipython` を使用します。 -`ipython` は以下のコマンドで起動できます: - -```bash -ipython -``` - -## clickhouse-local のインストール {#installing-clickhouse-local} - -clickhouse-local のダウンロードとインストールは、[ClickHouse のダウンロードとインストール](/install) と同じです。 -以下のコマンドを実行することでこれを行います: - -```bash -curl https://clickhouse.com/ | sh -``` - -データをディレクトリに永続化するために clickhouse-local を起動するには、`--path` を指定する必要があります: - -```bash -./clickhouse -m --path demo.chdb -``` - -## clickhouse-local へのデータの取り込み {#ingesting-data-into-clickhouse-local} - -デフォルトのデータベースはメモリ内のデータのみを保存しますので、取り込むデータがディスクに永続化されるように、名前付きデータベースを作成する必要があります。 - -```sql -CREATE DATABASE foo; -``` - -テーブルを作成し、いくつかのランダムな数字を挿入しましょう: - -```sql -CREATE TABLE foo.randomNumbers -ORDER BY number AS -SELECT rand() AS number -FROM numbers(10_000_000); -``` - -どのデータがあるかを確認するためのクエリを書きます: - -```sql -SELECT quantilesExact(0, 0.5, 0.75, 0.99)(number) AS quants -FROM foo.randomNumbers - -┌─quants────────────────────────────────┐ -│ [69,2147776478,3221525118,4252096960] │ -└───────────────────────────────────────┘ -``` - -これが完了したら、CLI から `exit;` して出てください。 -このディレクトリ上でロックを保持できるプロセスは一つだけなので、これを行わないと chDB からデータベースに接続しようとしたときに以下のエラーが発生します: - -```text -ChdbError: Code: 76. DB::Exception: Cannot lock file demo.chdb/status. Another server instance in same directory is already running. (CANNOT_OPEN_FILE) -``` - -## clickhouse-local データベースへの接続 {#connecting-to-a-clickhouse-local-database} - -`ipython` シェルに戻り、chDB から `session` モジュールをインポートします: - -```python -from chdb import session as chs -``` - -`demo.chdb` を指すセッションを初期化します: - -```python -sess = chs.Session("demo.chdb") -``` - -次に、数字の分位数を返すクエリを実行します: - -```python -sess.query(""" -SELECT quantilesExact(0, 0.5, 0.75, 0.99)(number) AS quants -FROM foo.randomNumbers -""", "Vertical") - -Row 1: -────── -quants: [0,9976599,2147776478,4209286886] -``` - -また、chDB からこのデータベースにデータを挿入することもできます: - -```python -sess.query(""" -INSERT INTO foo.randomNumbers -SELECT rand() AS number FROM numbers(10_000_000) -""") - -Row 1: -────── -quants: [0,9976599,2147776478,4209286886] -``` - -その後、chDB または clickhouse-local から分位数のクエリを再実行できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md.hash deleted file mode 100644 index c97fc9bc1c0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/clickhouse-local.md.hash +++ /dev/null @@ -1 +0,0 @@ -70254ba9c82343dc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md deleted file mode 100644 index 258a4463df0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'chDB ガイド' -slug: '/chdb/guides' -description: 'chDB ガイドのインデックスページ' -keywords: -- 'chdb' -- 'guides' ---- - - - -以下の chDB 開発者ガイドをご覧ください: - - - -| ページ | 説明 | -|-----|-----| -| [Parquet ファイルをクエリする方法](/chdb/guides/querying-parquet) | chDB を使用して Parquet ファイルをクエリする方法を学びます。 | -| [S3 バケット内のデータをクエリする方法](/chdb/guides/querying-s3) | chDB を使用して S3 バケット内のデータをクエリする方法を学びます。 | -| [clickhouse-local データベースの使用法](/chdb/guides/clickhouse-local) | chDB で clickhouse-local データベースを使用する方法を学びます。 | -| [chDB を使用して Pandas DataFrames をクエリする方法](/chdb/guides/pandas) | chDB を使用して Pandas DataFrames をクエリする方法を学びます。 | -| [JupySQL と chDB](/chdb/guides/jupysql) | Bun 用に chDB をインストールする方法 | -| [リモート ClickHouse サーバーをクエリする方法](/chdb/guides/query-remote-clickhouse) | このガイドでは、chDB からリモート ClickHouse サーバーをクエリする方法を学びます。 | -| [chDB を使用して Apache Arrow をクエリする方法](/chdb/guides/apache-arrow) | このガイドでは、chDB を使用して Apache Arrow テーブルをクエリする方法を学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md.hash deleted file mode 100644 index b507cde2646..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -e27c68e90d9b656a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md deleted file mode 100644 index 656eeec4d60..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'JupySQL and chDB' -sidebar_label: 'JupySQL' -slug: '/chdb/guides/jupysql' -description: 'How to install chDB for Bun' -keywords: -- 'chdb' -- 'JupySQL' ---- - -import Image from '@theme/IdealImage'; -import PlayersPerRank from '@site/static/images/chdb/guides/players_per_rank.png'; - -[JupySQL](https://jupysql.ploomber.io/en/latest/quick-start.html) は、Jupyter ノートブックや IPython シェルで SQL を実行するための Python ライブラリです。このガイドでは、chDB と JupySQL を使用してデータをクエリする方法を学びます。 - -

- -
- -## セットアップ {#setup} - -まず、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -その後、JupySQL、IPython、Jupyter Lab をインストールします: - -```bash -pip install jupysql ipython jupyterlab -``` - -IPython では JupySQL を使用でき、次のコマンドを実行して起動できます: - -```bash -ipython -``` - -または、Jupyter Lab を次のコマンドで起動できます: - -```bash -jupyter lab -``` - -:::note -Jupyter Lab を使用している場合は、ガイドの残りの部分をフォローする前にノートブックを作成する必要があります。 -::: - -## データセットのダウンロード {#downloading-a-dataset} - -[Jeff Sackmann の tennis_atp](https://github.com/JeffSackmann/tennis_atp) データセットの1つを使用します。このデータセットは、選手とそのランキングに関するメタデータが含まれています。まず、ランキングファイルをダウンロードします: - -```python -from urllib.request import urlretrieve -``` - -```python -files = ['00s', '10s', '20s', '70s', '80s', '90s', 'current'] -base = "https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master" -for file in files: - _ = urlretrieve( - f"{base}/atp_rankings_{file}.csv", - f"atp_rankings_{file}.csv", - ) -``` - -## chDB と JupySQL の設定 {#configuring-chdb-and-jupysql} - -次に、chDB の `dbapi` モジュールをインポートします: - -```python -from chdb import dbapi -``` - -そして、chDB 接続を作成します。永続化するデータは `atp.chdb` ディレクトリに保存されます: - -```python -conn = dbapi.connect(path="atp.chdb") -``` - -次に、`sql` マジックを読み込み、chDB への接続を作成します: - -```python -%load_ext sql -%sql conn --alias chdb -``` - -クエリの結果が切り捨てられないように、表示制限を設定します: - -```python -%config SqlMagic.displaylimit = None -``` - -## CSV ファイル内のデータをクエリする {#querying-data-in-csv-files} - -`atp_rankings` プレフィックスのついた複数のファイルをダウンロードしました。`DESCRIBE` 句を使用してスキーマを理解しましょう: - -```python -%%sql -DESCRIBE file('atp_rankings*.csv') -SETTINGS describe_compact_output=1, - schema_inference_make_columns_nullable=0 -``` - -```text -+--------------+-------+ -| name | type | -+--------------+-------+ -| ranking_date | Int64 | -| rank | Int64 | -| player | Int64 | -| points | Int64 | -+--------------+-------+ -``` - -これらのファイルに対して直接 `SELECT` クエリを書いて、データがどのようなものか見てみましょう: - -```python -%sql SELECT * FROM file('atp_rankings*.csv') LIMIT 1 -``` - -```text -+--------------+------+--------+--------+ -| ranking_date | rank | player | points | -+--------------+------+--------+--------+ -| 20000110 | 1 | 101736 | 4135 | -+--------------+------+--------+--------+ -``` - -データの形式は少し変わっています。日付をきれいにして、`REPLACE` 句を使用してクリーンアップした `ranking_date` を返します: - -```python -%%sql -SELECT * REPLACE ( - toDate(parseDateTime32BestEffort(toString(ranking_date))) AS ranking_date -) -FROM file('atp_rankings*.csv') -LIMIT 10 -SETTINGS schema_inference_make_columns_nullable=0 -``` - -```text -+--------------+------+--------+--------+ -| ranking_date | rank | player | points | -+--------------+------+--------+--------+ -| 2000-01-10 | 1 | 101736 | 4135 | -| 2000-01-10 | 2 | 102338 | 2915 | -| 2000-01-10 | 3 | 101948 | 2419 | -| 2000-01-10 | 4 | 103017 | 2184 | -| 2000-01-10 | 5 | 102856 | 2169 | -| 2000-01-10 | 6 | 102358 | 2107 | -| 2000-01-10 | 7 | 102839 | 1966 | -| 2000-01-10 | 8 | 101774 | 1929 | -| 2000-01-10 | 9 | 102701 | 1846 | -| 2000-01-10 | 10 | 101990 | 1739 | -+--------------+------+--------+--------+ -``` - -## chDB に CSV ファイルをインポートする {#importing-csv-files-into-chdb} - -次に、これらの CSV ファイルからデータをテーブルに格納します。デフォルトのデータベースはディスク上にデータを永続化しないため、まず別のデータベースを作成する必要があります: - -```python -%sql CREATE DATABASE atp -``` - -そして、CSV ファイルのデータの構造に基づいて `rankings` という名前のテーブルを作成します: - -```python -%%sql -CREATE TABLE atp.rankings -ENGINE=MergeTree -ORDER BY ranking_date AS -SELECT * REPLACE ( - toDate(parseDateTime32BestEffort(toString(ranking_date))) AS ranking_date -) -FROM file('atp_rankings*.csv') -SETTINGS schema_inference_make_columns_nullable=0 -``` - -テーブル内のデータを簡単にチェックします: - -```python -%sql SELECT * FROM atp.rankings LIMIT 10 -``` - -```text -+--------------+------+--------+--------+ -| ranking_date | rank | player | points | -+--------------+------+--------+--------+ -| 2000-01-10 | 1 | 101736 | 4135 | -| 2000-01-10 | 2 | 102338 | 2915 | -| 2000-01-10 | 3 | 101948 | 2419 | -| 2000-01-10 | 4 | 103017 | 2184 | -| 2000-01-10 | 5 | 102856 | 2169 | -| 2000-01-10 | 6 | 102358 | 2107 | -| 2000-01-10 | 7 | 102839 | 1966 | -| 2000-01-10 | 8 | 101774 | 1929 | -| 2000-01-10 | 9 | 102701 | 1846 | -| 2000-01-10 | 10 | 101990 | 1739 | -+--------------+------+--------+--------+ -``` - -良さそうです - 出力は予想通り、CSV ファイルを直接クエリしたときと同じです。 - -選手のメタデータについても同じプロセスを実行します。今回はデータが1つの CSV ファイルにすべて入っているので、そのファイルをダウンロードしましょう: - - -```python -_ = urlretrieve( - f"{base}/atp_players.csv", - "atp_players.csv", -) -``` - -その後、CSV ファイルの内容に基づいて `players` という名前のテーブルを作成します。`dob` フィールドもクリーンアップして、`Date32` 型にします。 - -> ClickHouse では、`Date` 型は 1970 年以降の日付のみをサポートしています。`dob` 列には 1970 年以前の日付が含まれているため、`Date32` 型を代わりに使用します。 - -```python -%%sql -CREATE TABLE atp.players -Engine=MergeTree -ORDER BY player_id AS -SELECT * REPLACE ( - makeDate32( - toInt32OrNull(substring(toString(dob), 1, 4)), - toInt32OrNull(substring(toString(dob), 5, 2)), - toInt32OrNull(substring(toString(dob), 7, 2)) - )::Nullable(Date32) AS dob -) -FROM file('atp_players.csv') -SETTINGS schema_inference_make_columns_nullable=0 -``` - -これが実行されると、取り込んだデータを確認できます: - - -```python -%sql SELECT * FROM atp.players LIMIT 10 -``` - -```text -+-----------+------------+-----------+------+------------+-----+--------+-------------+ -| player_id | name_first | name_last | hand | dob | ioc | height | wikidata_id | -+-----------+------------+-----------+------+------------+-----+--------+-------------+ -| 100001 | Gardnar | Mulloy | R | 1913-11-22 | USA | 185 | Q54544 | -| 100002 | Pancho | Segura | R | 1921-06-20 | ECU | 168 | Q54581 | -| 100003 | Frank | Sedgman | R | 1927-10-02 | AUS | 180 | Q962049 | -| 100004 | Giuseppe | Merlo | R | 1927-10-11 | ITA | 0 | Q1258752 | -| 100005 | Richard | Gonzalez | R | 1928-05-09 | USA | 188 | Q53554 | -| 100006 | Grant | Golden | R | 1929-08-21 | USA | 175 | Q3115390 | -| 100007 | Abe | Segal | L | 1930-10-23 | RSA | 0 | Q1258527 | -| 100008 | Kurt | Nielsen | R | 1930-11-19 | DEN | 0 | Q552261 | -| 100009 | Istvan | Gulyas | R | 1931-10-14 | HUN | 0 | Q51066 | -| 100010 | Luis | Ayala | R | 1932-09-18 | CHI | 170 | Q1275397 | -+-----------+------------+-----------+------+------------+-----+--------+-------------+ -``` - -## chDB をクエリする {#querying-chdb} - -データの取り込みが完了し、次は楽しい部分 - データをクエリします! - -テニス選手は、参加するトーナメントでのパフォーマンスに基づいてポイントを受け取ります。各選手のポイントは、52 週間のローリング期間にわたって集計されます。各選手が獲得した最大ポイントと、その時のランキングを見つけるクエリを書きます: - -```python -%%sql -SELECT name_first, name_last, - max(points) as maxPoints, - argMax(rank, points) as rank, - argMax(ranking_date, points) as date -FROM atp.players -JOIN atp.rankings ON rankings.player = players.player_id -GROUP BY ALL -ORDER BY maxPoints DESC -LIMIT 10 -``` - -```text -+------------+-----------+-----------+------+------------+ -| name_first | name_last | maxPoints | rank | date | -+------------+-----------+-----------+------+------------+ -| Novak | Djokovic | 16950 | 1 | 2016-06-06 | -| Rafael | Nadal | 15390 | 1 | 2009-04-20 | -| Andy | Murray | 12685 | 1 | 2016-11-21 | -| Roger | Federer | 12315 | 1 | 2012-10-29 | -| Daniil | Medvedev | 10780 | 2 | 2021-09-13 | -| Carlos | Alcaraz | 9815 | 1 | 2023-08-21 | -| Dominic | Thiem | 9125 | 3 | 2021-01-18 | -| Jannik | Sinner | 8860 | 2 | 2024-05-06 | -| Stefanos | Tsitsipas | 8350 | 3 | 2021-09-20 | -| Alexander | Zverev | 8240 | 4 | 2021-08-23 | -+------------+-----------+-----------+------+------------+ -``` - -このリストにある選手のうち、ポイントが1位でなくても多くのポイントを累積している選手がいるのは非常に興味深いです。 - -## クエリを保存する {#saving-queries} - -`--save` パラメータを使用して同じ行にクエリを保存できます。`--no-execute` パラメータは、クエリの実行をスキップすることを意味します。 - -```python -%%sql --save best_points --no-execute -SELECT name_first, name_last, - max(points) as maxPoints, - argMax(rank, points) as rank, - argMax(ranking_date, points) as date -FROM atp.players -JOIN atp.rankings ON rankings.player = players.player_id -GROUP BY ALL -ORDER BY maxPoints DESC -``` - -保存されたクエリを実行すると、実行前に共通テーブル式(CTE)に変換されます。次のクエリでは、選手がランキング1位の時に達成した最大ポイントを計算します: - -```python -%sql select * FROM best_points WHERE rank=1 -``` - -```text -+-------------+-----------+-----------+------+------------+ -| name_first | name_last | maxPoints | rank | date | -+-------------+-----------+-----------+------+------------+ -| Novak | Djokovic | 16950 | 1 | 2016-06-06 | -| Rafael | Nadal | 15390 | 1 | 2009-04-20 | -| Andy | Murray | 12685 | 1 | 2016-11-21 | -| Roger | Federer | 12315 | 1 | 2012-10-29 | -| Carlos | Alcaraz | 9815 | 1 | 2023-08-21 | -| Pete | Sampras | 5792 | 1 | 1997-08-11 | -| Andre | Agassi | 5652 | 1 | 1995-08-21 | -| Lleyton | Hewitt | 5205 | 1 | 2002-08-12 | -| Gustavo | Kuerten | 4750 | 1 | 2001-09-10 | -| Juan Carlos | Ferrero | 4570 | 1 | 2003-10-20 | -| Stefan | Edberg | 3997 | 1 | 1991-02-25 | -| Jim | Courier | 3973 | 1 | 1993-08-23 | -| Ivan | Lendl | 3420 | 1 | 1990-02-26 | -| Ilie | Nastase | 0 | 1 | 1973-08-27 | -+-------------+-----------+-----------+------+------------+ -``` - -## パラメータを使ったクエリ {#querying-with-parameters} - -クエリ内でパラメータを使用することもできます。パラメータは通常の変数です: - -```python -rank = 10 -``` - -そして、`{{variable}}` 構文をクエリ内で使用できます。次のクエリは、選手が最初にトップ 10 にランキングされてから最後にランキングがあるまでの日数が最も少ない選手を見つけます: - -```python -%%sql -SELECT name_first, name_last, - MIN(ranking_date) AS earliest_date, - MAX(ranking_date) AS most_recent_date, - most_recent_date - earliest_date AS days, - 1 + (days/7) AS weeks -FROM atp.rankings -JOIN atp.players ON players.player_id = rankings.player -WHERE rank <= {{rank}} -GROUP BY ALL -ORDER BY days -LIMIT 10 -``` - -```text -+------------+-----------+---------------+------------------+------+-------+ -| name_first | name_last | earliest_date | most_recent_date | days | weeks | -+------------+-----------+---------------+------------------+------+-------+ -| Alex | Metreveli | 1974-06-03 | 1974-06-03 | 0 | 1 | -| Mikael | Pernfors | 1986-09-22 | 1986-09-22 | 0 | 1 | -| Felix | Mantilla | 1998-06-08 | 1998-06-08 | 0 | 1 | -| Wojtek | Fibak | 1977-07-25 | 1977-07-25 | 0 | 1 | -| Thierry | Tulasne | 1986-08-04 | 1986-08-04 | 0 | 1 | -| Lucas | Pouille | 2018-03-19 | 2018-03-19 | 0 | 1 | -| John | Alexander | 1975-12-15 | 1975-12-15 | 0 | 1 | -| Nicolas | Massu | 2004-09-13 | 2004-09-20 | 7 | 2 | -| Arnaud | Clement | 2001-04-02 | 2001-04-09 | 7 | 2 | -| Ernests | Gulbis | 2014-06-09 | 2014-06-23 | 14 | 3 | -+------------+-----------+---------------+------------------+------+-------+ -``` - -## ヒストグラムのプロット {#plotting-histograms} - -JupySQL には限られたチャート機能もあります。ボックスプロットやヒストグラムを作成できます。 - -ヒストグラムを作成しますが、まずは各プレイヤーが達成したトップ100のランキングを計算するクエリを書いて(保存します)、これを使ってヒストグラムを作成します: - -```python -%%sql --save players_per_rank --no-execute -select distinct player, rank -FROM atp.rankings -WHERE rank <= 100 -``` - -次に、以下のコードを実行してヒストグラムを作成できます: - -```python -from sql.ggplot import ggplot, geom_histogram, aes - -plot = ( - ggplot( - table="players_per_rank", - with_="players_per_rank", - mapping=aes(x="rank", fill="#69f0ae", color="#fff"), - ) + geom_histogram(bins=100) -) -``` - -ATP データセットにおけるプレイヤーランクのヒストグラム diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md.hash deleted file mode 100644 index 5f97fe001e0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md.hash +++ /dev/null @@ -1 +0,0 @@ -a6d7a009fd2b2644 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md deleted file mode 100644 index 5eb6c55d840..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: 'リモートClickHouseサーバーのクエリ方法' -sidebar_label: 'リモートClickHouseのクエリ' -slug: '/chdb/guides/query-remote-clickhouse' -description: 'このガイドでは、chDBからリモートClickHouseサーバーにクエリする方法について学びます。' -keywords: -- 'chdb' -- 'clickhouse' ---- - - - -In this guide, we're going to learn how to query a remote ClickHouse server from chDB. - -## Setup {#setup} - -まず、仮想環境を作成します。 - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDBをインストールします。 -バージョン2.0.2以上であることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に、pandasとipythonをインストールします: - -```bash -pip install pandas ipython -``` - -このガイドの残りの部分でコマンドを実行するために、`ipython`を使用します。これを起動するには、次のコマンドを実行します: - -```bash -ipython -``` - -コードをPythonスクリプトやお気に入りのノートブックで使用することもできます。 - -## An intro to ClickPy {#an-intro-to-clickpy} - -私たちがクエリを実行するリモートClickHouseサーバーは[ClickPy](https://clickpy.clickhouse.com)です。 -ClickPyはPyPIパッケージのすべてのダウンロードを追跡し、UIを介してパッケージの統計を探索できます。 -基礎データベースは`play`ユーザーを使用してクエリが可能です。 - -ClickPyの詳細については、[GitHubリポジトリ](https://github.com/ClickHouse/clickpy)を参照してください。 - -## Querying the ClickPy ClickHouse service {#querying-the-clickpy-clickhouse-service} - -まずchDBをインポートします: - -```python -import chdb -``` - -`remoteSecure`関数を使ってClickPyにクエリを実行します。 -この関数は、ホスト名、テーブル名、ユーザー名を最低限必要とします。 - -次のクエリを記述して、[`openai`パッケージ](https://clickpy.clickhouse.com/dashboard/openai)の1日あたりのダウンロード数をPandas DataFrameとして返します: - -```python -query = """ -SELECT - toStartOfDay(date)::Date32 AS x, - sum(count) AS y -FROM remoteSecure( - 'clickpy-clickhouse.clickhouse.com', - 'pypi.pypi_downloads_per_day', - 'play' -) -WHERE project = 'openai' -GROUP BY x -ORDER BY x ASC -""" - -openai_df = chdb.query(query, "DataFrame") -openai_df.sort_values(by=["x"], ascending=False).head(n=10) -``` - -```text - x y -2392 2024-10-02 1793502 -2391 2024-10-01 1924901 -2390 2024-09-30 1749045 -2389 2024-09-29 1177131 -2388 2024-09-28 1157323 -2387 2024-09-27 1688094 -2386 2024-09-26 1862712 -2385 2024-09-25 2032923 -2384 2024-09-24 1901965 -2383 2024-09-23 1777554 -``` - -次に、[`scikit-learn`](https://clickpy.clickhouse.com/dashboard/scikit-learn)のダウンロード数を返すために同じことを行います: - -```python -query = """ -SELECT - toStartOfDay(date)::Date32 AS x, - sum(count) AS y -FROM remoteSecure( - 'clickpy-clickhouse.clickhouse.com', - 'pypi.pypi_downloads_per_day', - 'play' -) -WHERE project = 'scikit-learn' -GROUP BY x -ORDER BY x ASC -""" - -sklearn_df = chdb.query(query, "DataFrame") -sklearn_df.sort_values(by=["x"], ascending=False).head(n=10) -``` - -```text - x y -2392 2024-10-02 1793502 -2391 2024-10-01 1924901 -2390 2024-09-30 1749045 -2389 2024-09-29 1177131 -2388 2024-09-28 1157323 -2387 2024-09-27 1688094 -2386 2024-09-26 1862712 -2385 2024-09-25 2032923 -2384 2024-09-24 1901965 -2383 2024-09-23 1777554 -``` - -## Merging Pandas DataFrames {#merging-pandas-dataframes} - -現在、2つのDataFrameができたので、日付(`x`列)に基づいてマージできます: - -```python -df = openai_df.merge( - sklearn_df, - on="x", - suffixes=("_openai", "_sklearn") -) -df.head(n=5) -``` - -```text - x y_openai y_sklearn -0 2018-02-26 83 33971 -1 2018-02-27 31 25211 -2 2018-02-28 8 26023 -3 2018-03-01 8 20912 -4 2018-03-02 5 23842 -``` - -次に、Open AIのダウンロード数と`scikit-learn`のダウンロード数の比率を計算します: - -```python -df['ratio'] = df['y_openai'] / df['y_sklearn'] -df.head(n=5) -``` - -```text - x y_openai y_sklearn ratio -0 2018-02-26 83 33971 0.002443 -1 2018-02-27 31 25211 0.001230 -2 2018-02-28 8 26023 0.000307 -3 2018-03-01 8 20912 0.000383 -4 2018-03-02 5 23842 0.000210 -``` - -## Querying Pandas DataFrames {#querying-pandas-dataframes} - -次に、最高と最低の比率の日付を見つけたいとしましょう。 -chDBに戻ってそれらの値を計算できます: - -```python -chdb.query(""" -SELECT max(ratio) AS bestRatio, - argMax(x, ratio) AS bestDate, - min(ratio) AS worstRatio, - argMin(x, ratio) AS worstDate -FROM Python(df) -""", "DataFrame") -``` - -```text - bestRatio bestDate worstRatio worstDate -0 0.693855 2024-09-19 0.000003 2020-02-09 -``` - -Pandas DataFramesのクエリについてさらに学ぶには、[Pandas DataFrames開発者ガイド](querying-pandas.md)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md.hash deleted file mode 100644 index 1fe1e5218e9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -ab345efcc1f61943 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md deleted file mode 100644 index 9db1ca36a11..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Apache ArrowをchDBでクエリする方法' -sidebar_label: 'Apache Arrowのクエリ' -slug: '/chdb/guides/apache-arrow' -description: 'このガイドでは、Apache ArrowのテーブルをchDBでクエリする方法について学びます' -keywords: -- 'chdb' -- 'Apache Arrow' ---- - - - -[Apache Arrow](https://arrow.apache.org/) はデータコミュニティで人気のある標準化された列指向メモリ形式です。 -このガイドでは、`Python` テーブル関数を使用して Apache Arrow をクエリする方法を学びます。 - -## セットアップ {#setup} - -まず最初に、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDB をインストールします。 -バージョン 2.0.2 以上であることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に PyArrow、pandas、および ipython をインストールします: - -```bash -pip install pyarrow pandas ipython -``` - -このガイドの残りのコマンドを実行するために `ipython` を使用します。次のコマンドで起動できます: - -```bash -ipython -``` - -Python スクリプトやお好みのノートブックでもこのコードを使用できます。 - -## ファイルから Apache Arrow テーブルを作成する {#creating-an-apache-arrow-table-from-a-file} - -まず、[Ooklaデータセット](https://github.com/teamookla/ookla-open-data) の Parquet ファイルの1つを、[AWS CLIツール](https://aws.amazon.com/cli/) を使用してダウンロードします: - -```bash -aws s3 cp \ - --no-sign \ - s3://ookla-open-data/parquet/performance/type=mobile/year=2023/quarter=2/2023-04-01_performance_mobile_tiles.parquet . -``` - -:::note -もっと多くのファイルをダウンロードしたい場合は、`aws s3 ls` を使用してすべてのファイルのリストを取得し、上記のコマンドを更新してください。 -::: - -次に、`pyarrow` パッケージから Parquet モジュールをインポートします: - -```python -import pyarrow.parquet as pq -``` - -次に、Parquet ファイルを Apache Arrow テーブルに読み込みます: - -```python -arrow_table = pq.read_table("./2023-04-01_performance_mobile_tiles.parquet") -``` - -スキーマは以下のように表示されます: - -```python -arrow_table.schema -``` - -```text -quadkey: string -tile: string -tile_x: double -tile_y: double -avg_d_kbps: int64 -avg_u_kbps: int64 -avg_lat_ms: int64 -avg_lat_down_ms: int32 -avg_lat_up_ms: int32 -tests: int64 -devices: int64 -``` - -`shape` 属性を呼び出すことで行数と列数を取得できます: - -```python -arrow_table.shape -``` - -```text -(3864546, 11) -``` - -## Apache Arrow をクエリする {#querying-apache-arrow} - -さあ、chDB から Arrow テーブルをクエリしましょう。 -まず、chDB をインポートします: - -```python -import chdb -``` - -次に、テーブルを説明します: - -```python -chdb.query(""" -DESCRIBE Python(arrow_table) -SETTINGS describe_compact_output=1 -""", "DataFrame") -``` - -```text - name type -0 quadkey String -1 tile String -2 tile_x Float64 -3 tile_y Float64 -4 avg_d_kbps Int64 -5 avg_u_kbps Int64 -6 avg_lat_ms Int64 -7 avg_lat_down_ms Int32 -8 avg_lat_up_ms Int32 -9 tests Int64 -10 devices Int64 -``` - -行数をカウントすることもできます: - -```python -chdb.query("SELECT count() FROM Python(arrow_table)", "DataFrame") -``` - -```text - count() -0 3864546 -``` - -次に、少し面白いことをしてみましょう。 -以下のクエリは `quadkey` および `tile.*` カラムを除外し、残りのすべてのカラムの平均値と最大値を計算します: - -```python -chdb.query(""" -WITH numericColumns AS ( - SELECT * EXCEPT ('tile.*') EXCEPT(quadkey) - FROM Python(arrow_table) -) -SELECT * APPLY(max), * APPLY(avg) APPLY(x -> round(x, 2)) -FROM numericColumns -""", "Vertical") -``` - -```text -Row 1: -────── -max(avg_d_kbps): 4155282 -max(avg_u_kbps): 1036628 -max(avg_lat_ms): 2911 -max(avg_lat_down_ms): 2146959360 -max(avg_lat_up_ms): 2146959360 -max(tests): 111266 -max(devices): 1226 -round(avg(avg_d_kbps), 2): 84393.52 -round(avg(avg_u_kbps), 2): 15540.4 -round(avg(avg_lat_ms), 2): 41.25 -round(avg(avg_lat_down_ms), 2): 554355225.76 -round(avg(avg_lat_up_ms), 2): 552843178.3 -round(avg(tests), 2): 6.31 -round(avg(devices), 2): 2.88 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md.hash deleted file mode 100644 index c567d915e37..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-apache-arrow.md.hash +++ /dev/null @@ -1 +0,0 @@ -55698e72afc0928a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md deleted file mode 100644 index 5cfc44d372c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md +++ /dev/null @@ -1,403 +0,0 @@ ---- -title: 'How to query Pandas DataFrames with chDB' -sidebar_label: 'Querying Pandas' -slug: '/chdb/guides/pandas' -description: 'Learn how to query Pandas DataFrames with chDB' -keywords: -- 'chdb' -- 'pandas' ---- - - - -[Pandas](https://pandas.pydata.org/) は、Python におけるデータ操作と分析のための人気のあるライブラリです。 -chDB のバージョン 2 では、Pandas DataFrame のクエリ性能を向上させ、`Python` テーブル関数を導入しました。 -このガイドでは、`Python` テーブル関数を使用して Pandas にクエリを実行する方法を学びます。 - -## セットアップ {#setup} - -まず、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDB をインストールします。 -バージョン 2.0.2 以上を持っていることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に、Pandas といくつかの他のライブラリをインストールします: - -```bash -pip install pandas requests ipython -``` - -これからのガイドのコマンドを実行するために `ipython` を使用します。以下のコマンドで起動できます: - -```bash -ipython -``` - -Python スクリプトやあなたのお気に入りのノートブックでもコードを使用できます。 - -## URL から Pandas DataFrame を作成する {#creating-a-pandas-dataframe-from-a-url} - -[StatsBomb GitHub リポジトリ](https://github.com/statsbomb/open-data/tree/master?tab=readme-ov-file) からデータをクエリします。 -まず、requests と pandas をインポートします: - -```python -import requests -import pandas as pd -``` - -次に、1 つの試合の JSON ファイルを DataFrame に読み込みます: - -```python -response = requests.get( - "https://raw.githubusercontent.com/statsbomb/open-data/master/data/matches/223/282.json" -) -matches_df = pd.json_normalize(response.json(), sep='_') -``` - -どのデータを扱うのか見てみましょう: - -```python -matches_df.iloc[0] -``` - -```text -match_id 3943077 -match_date 2024-07-15 -kick_off 04:15:00.000 -home_score 1 -away_score 0 -match_status available -match_status_360 unscheduled -last_updated 2024-07-15T15:50:08.671355 -last_updated_360 None -match_week 6 -competition_competition_id 223 -competition_country_name South America -competition_competition_name Copa America -season_season_id 282 -season_season_name 2024 -home_team_home_team_id 779 -home_team_home_team_name Argentina -home_team_home_team_gender male -home_team_home_team_group None -home_team_country_id 11 -home_team_country_name Argentina -home_team_managers [{'id': 5677, 'name': 'Lionel Sebastián Scalon... -away_team_away_team_id 769 -away_team_away_team_name Colombia -away_team_away_team_gender male -away_team_away_team_group None -away_team_country_id 49 -away_team_country_name Colombia -away_team_managers [{'id': 5905, 'name': 'Néstor Gabriel Lorenzo'... -metadata_data_version 1.1.0 -metadata_shot_fidelity_version 2 -metadata_xy_fidelity_version 2 -competition_stage_id 26 -competition_stage_name Final -stadium_id 5337 -stadium_name Hard Rock Stadium -stadium_country_id 241 -stadium_country_name United States of America -referee_id 2638 -referee_name Raphael Claus -referee_country_id 31 -referee_country_name Brazil -Name: 0, dtype: object -``` - -次に、1 つのイベントの JSON ファイルを読み込み、その DataFrame に `match_id` という列を追加します: - -```python -response = requests.get( - "https://raw.githubusercontent.com/statsbomb/open-data/master/data/events/3943077.json" -) -events_df = pd.json_normalize(response.json(), sep='_') -events_df["match_id"] = 3943077 -``` - -再度、最初の行を見てみましょう: - -```python -with pd.option_context("display.max_rows", None): - first_row = events_df.iloc[0] - non_nan_columns = first_row[first_row.notna()].T - display(non_nan_columns) -``` - -```text -id 279b7d66-92b5-4daa-8ff6-cba8fce271d9 -index 1 -period 1 -timestamp 00:00:00.000 -minute 0 -second 0 -possession 1 -duration 0.0 -type_id 35 -type_name Starting XI -possession_team_id 779 -possession_team_name Argentina -play_pattern_id 1 -play_pattern_name Regular Play -team_id 779 -team_name Argentina -tactics_formation 442.0 -tactics_lineup [{'player': {'id': 6909, 'name': 'Damián Emili... -match_id 3943077 -Name: 0, dtype: object -``` - -## Pandas DataFrame をクエリする {#querying-pandas-dataframes} - -次に、chDB を使ってこれらの DataFrame にクエリを実行する方法を見てみましょう。 -ライブラリをインポートします: - -```python -import chdb -``` - -Pandas DataFrame を `Python` テーブル関数を使用してクエリすることができます: - -```sql -SELECT * -FROM Python() -``` - -したがって、`matches_df` のカラムをリストアップしたい場合、次のように書くことができます: - -```python -chdb.query(""" -DESCRIBE Python(matches_df) -SETTINGS describe_compact_output=1 -""", "DataFrame") -``` - -```text - name type -0 match_id Int64 -1 match_date String -2 kick_off String -3 home_score Int64 -4 away_score Int64 -5 match_status String -6 match_status_360 String -7 last_updated String -8 last_updated_360 String -9 match_week Int64 -10 competition_competition_id Int64 -11 competition_country_name String -12 competition_competition_name String -13 season_season_id Int64 -14 season_season_name String -15 home_team_home_team_id Int64 -16 home_team_home_team_name String -17 home_team_home_team_gender String -18 home_team_home_team_group String -19 home_team_country_id Int64 -20 home_team_country_name String -21 home_team_managers String -22 away_team_away_team_id Int64 -23 away_team_away_team_name String -24 away_team_away_team_gender String -25 away_team_away_team_group String -26 away_team_country_id Int64 -27 away_team_country_name String -28 away_team_managers String -29 metadata_data_version String -30 metadata_shot_fidelity_version String -31 metadata_xy_fidelity_version String -32 competition_stage_id Int64 -33 competition_stage_name String -34 stadium_id Int64 -35 stadium_name String -36 stadium_country_id Int64 -37 stadium_country_name String -38 referee_id Int64 -39 referee_name String -40 referee_country_id Int64 -41 referee_country_name String -``` - -次に、過去に 1 回以上の試合を裁いた審判を見つけるために、以下のクエリを書くことができます: - -```python -chdb.query(""" -SELECT referee_name, count() AS count -FROM Python(matches_df) -GROUP BY ALL -HAVING count > 1 -ORDER BY count DESC -""", "DataFrame") -``` - -```text - referee_name count -0 César Arturo Ramos Palazuelos 3 -1 Maurizio Mariani 3 -2 Piero Maza Gomez 3 -3 Mario Alberto Escobar Toca 2 -4 Wilmar Alexander Roldán Pérez 2 -5 Jesús Valenzuela Sáez 2 -6 Wilton Pereira Sampaio 2 -7 Darío Herrera 2 -8 Andrés Matonte 2 -9 Raphael Claus 2 -``` - -次に、`events_df` を見てみましょう。 - -```python -chdb.query(""" -SELECT pass_recipient_name, count() -FROM Python(events_df) -WHERE type_name = 'Pass' AND pass_recipient_name <> '' -GROUP BY ALL -ORDER BY count() DESC -LIMIT 10 -""", "DataFrame") -``` - -```text - pass_recipient_name count() -0 Davinson Sánchez Mina 76 -1 Ángel Fabián Di María Hernández 64 -2 Alexis Mac Allister 62 -3 Enzo Fernandez 57 -4 James David Rodríguez Rubio 56 -5 Johan Andrés Mojica Palacio 55 -6 Rodrigo Javier De Paul 54 -7 Jefferson Andrés Lerma Solís 53 -8 Jhon Adolfo Arias Andrade 52 -9 Carlos Eccehomo Cuesta Figueroa 50 -``` - -## Pandas DataFrame を結合する {#joining-pandas-dataframes} - -クエリ内で DataFrame を結合することもできます。 -たとえば、試合の概要を得るために、以下のクエリを書くことができます: - -```python -chdb.query(""" -SELECT home_team_home_team_name, away_team_away_team_name, home_score, away_score, - countIf(type_name = 'Pass' AND possession_team_id=home_team_home_team_id) AS home_passes, - countIf(type_name = 'Pass' AND possession_team_id=away_team_away_team_id) AS away_passes, - countIf(type_name = 'Shot' AND possession_team_id=home_team_home_team_id) AS home_shots, - countIf(type_name = 'Shot' AND possession_team_id=away_team_away_team_id) AS away_shots -FROM Python(matches_df) AS matches -JOIN Python(events_df) AS events ON events.match_id = matches.match_id -GROUP BY ALL -LIMIT 5 -""", "DataFrame").iloc[0] -``` - -```text -home_team_home_team_name Argentina -away_team_away_team_name Colombia -home_score 1 -away_score 0 -home_passes 527 -away_passes 669 -home_shots 11 -away_shots 19 -Name: 0, dtype: object -``` - -## DataFrame からテーブルを作成する {#populating-a-table-from-a-dataframe} - -DataFrame から ClickHouse テーブルを作成して populate することも可能です。 -chDB にテーブルを作成するには Stateful Session API を使用する必要があります。 - -セッションモジュールをインポートしましょう: - -```python -from chdb import session as chs -``` - -セッションを初期化します: - -```python -sess = chs.Session() -``` - -次に、データベースを作成します: - -```python -sess.query("CREATE DATABASE statsbomb") -``` - -次に、`events_df` に基づいて `events` テーブルを作成します: - -```python -sess.query(""" -CREATE TABLE statsbomb.events ORDER BY id AS -SELECT * -FROM Python(events_df) -""") -``` - -その後、最も多くのパスを受け取った選手を返すクエリを実行します: - -```python -sess.query(""" -SELECT pass_recipient_name, count() -FROM statsbomb.events -WHERE type_name = 'Pass' AND pass_recipient_name <> '' -GROUP BY ALL -ORDER BY count() DESC -LIMIT 10 -""", "DataFrame") -``` - -```text - pass_recipient_name count() -0 Davinson Sánchez Mina 76 -1 Ángel Fabián Di María Hernández 64 -2 Alexis Mac Allister 62 -3 Enzo Fernandez 57 -4 James David Rodríguez Rubio 56 -5 Johan Andrés Mojica Palacio 55 -6 Rodrigo Javier De Paul 54 -7 Jefferson Andrés Lerma Solís 53 -8 Jhon Adolfo Arias Andrade 52 -9 Carlos Eccehomo Cuesta Figueroa 50 -``` - -## Pandas DataFrame とテーブルを結合する {#joining-a-pandas-dataframe-and-table} - -最後に、結合クエリを更新して `matches_df` DataFrame を `statsbomb.events` テーブルと結合することもできます: - -```python -sess.query(""" -SELECT home_team_home_team_name, away_team_away_team_name, home_score, away_score, - countIf(type_name = 'Pass' AND possession_team_id=home_team_home_team_id) AS home_passes, - countIf(type_name = 'Pass' AND possession_team_id=away_team_away_team_id) AS away_passes, - countIf(type_name = 'Shot' AND possession_team_id=home_team_home_team_id) AS home_shots, - countIf(type_name = 'Shot' AND possession_team_id=away_team_away_team_id) AS away_shots -FROM Python(matches_df) AS matches -JOIN statsbomb.events AS events ON events.match_id = matches.match_id -GROUP BY ALL -LIMIT 5 -""", "DataFrame").iloc[0] -``` - -```text -home_team_home_team_name Argentina -away_team_away_team_name Colombia -home_score 1 -away_score 0 -home_passes 527 -away_passes 669 -home_shots 11 -away_shots 19 -Name: 0, dtype: object -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md.hash deleted file mode 100644 index 9b0d4863080..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-pandas.md.hash +++ /dev/null @@ -1 +0,0 @@ -425df15bffc0f1e8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md deleted file mode 100644 index b78d43d0fe5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: 'Parquetファイルのクエリ方法' -sidebar_label: 'Parquetファイルのクエリ' -slug: '/chdb/guides/querying-parquet' -description: 'chDBでParquetファイルをクエリする方法について学びます。' -keywords: -- 'chdb' -- 'parquet' ---- - - - -A lot of the world's data lives in Amazon S3 buckets. -このガイドでは、chDBを使用してそのデータをクエリする方法を学びます。 - -## Setup {#setup} - -まず、仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次に、chDBをインストールします。 -バージョン2.0.2以上であることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -次に、IPythonをインストールします: - -```bash -pip install ipython -``` - -今後のガイドのコマンドを実行するために`ipython`を使用します。 -次のコマンドで起動できます: - -```bash -ipython -``` - -Pythonスクリプトやお気に入りのノートブックでもこのコードを使用できます。 - -## Exploring Parquet metadata {#exploring-parquet-metadata} - -[Amazon reviews](/getting-started/example-datasets/amazon-reviews)データセットからParquetファイルを探索します。 -まず、`chDB`をインストールしましょう: - -```python -import chdb -``` - -Parquetファイルをクエリする際には、ファイルの内容ではなくParquetメタデータを返すために、[`ParquetMetadata`](/interfaces/formats/ParquetMetadata)入力形式を使用できます。 -この形式を使用したときに返されるフィールドを見るために`DESCRIBE`句を使用しましょう: - -```python -query = """ -DESCRIBE s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_2015.snappy.parquet', - ParquetMetadata -) -SETTINGS describe_compact_output=1 -""" - -chdb.query(query, 'TabSeparated') -``` - -```text -num_columns UInt64 -num_rows UInt64 -num_row_groups UInt64 -format_version String -metadata_size UInt64 -total_uncompressed_size UInt64 -total_compressed_size UInt64 -columns Array(Tuple(name String, path String, max_definition_level UInt64, max_repetition_level UInt64, physical_type String, logical_type String, compression String, total_uncompressed_size UInt64, total_compressed_size UInt64, space_saved String, encodings Array(String))) -row_groups Array(Tuple(num_columns UInt64, num_rows UInt64, total_uncompressed_size UInt64, total_compressed_size UInt64, columns Array(Tuple(name String, path String, total_compressed_size UInt64, total_uncompressed_size UInt64, have_statistics Bool, statistics Tuple(num_values Nullable(UInt64), null_count Nullable(UInt64), distinct_count Nullable(UInt64), min Nullable(String), max Nullable(String)))))) -``` - -このファイルのメタデータを見てみましょう。 -`columns`と`row_groups`は、それぞれ多くのプロパティを含むタプルの配列を含んでいるため、今回はこれを除外します。 - -```python -query = """ -SELECT * EXCEPT(columns, row_groups) -FROM s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_2015.snappy.parquet', - ParquetMetadata -) -""" - -chdb.query(query, 'Vertical') -``` - -```text -Row 1: -────── -num_columns: 15 -num_rows: 41905631 -num_row_groups: 42 -format_version: 2.6 -metadata_size: 79730 -total_uncompressed_size: 14615827169 -total_compressed_size: 9272262304 -``` - -この出力から、このParquetファイルは4200万行以上を持ち、42の行グループに分割され、各行に15カラムのデータがあることがわかります。 -行グループは、データを行に水平に論理的にパーティショニングしたものです。 -各行グループには関連するメタデータがあり、クエリツールはそのメタデータを利用してファイルを効率的にクエリできます。 - -行グループの1つを見てみましょう: - -```python -query = """ -WITH rowGroups AS ( - SELECT rg - FROM s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_2015.snappy.parquet', - ParquetMetadata - ) - ARRAY JOIN row_groups AS rg - LIMIT 1 -) -SELECT tupleElement(c, 'name') AS name, tupleElement(c, 'total_compressed_size') AS total_compressed_size, - tupleElement(c, 'total_uncompressed_size') AS total_uncompressed_size, - tupleElement(tupleElement(c, 'statistics'), 'min') AS min, - tupleElement(tupleElement(c, 'statistics'), 'max') AS max -FROM rowGroups -ARRAY JOIN tupleElement(rg, 'columns') AS c -""" - -chdb.query(query, 'DataFrame') -``` - -```text - name total_compressed_size total_uncompressed_size min max -0 review_date 493 646 16455 16472 -1 marketplace 66 64 US US -2 customer_id 5207967 7997207 10049 53096413 -3 review_id 14748425 17991290 R10004U8OQDOGE RZZZUTBAV1RYI -4 product_id 8003456 13969668 0000032050 BT00DDVMVQ -5 product_parent 5758251 7974737 645 999999730 -6 product_title 41068525 63355320 ! Small S 1pc Black 1pc Navy (Blue) Replacemen... 🌴 Vacation On The Beach -7 product_category 1726 1815 Apparel Pet Products -8 star_rating 369036 374046 1 5 -9 helpful_votes 538940 1022990 0 3440 -10 total_votes 610902 1080520 0 3619 -11 vine 11426 125999 0 1 -12 verified_purchase 102634 125999 0 1 -13 review_headline 16538189 27634740 🤹🏽‍♂️🎤Great product. Practice makes perfect. D... -14 review_body 145886383 232457911 🚅 +🐧=💥 😀 -``` - -## Querying Parquet files {#querying-parquet-files} - -次に、ファイルの内容をクエリします。 -上記のクエリから`ParquetMetadata`を削除することで、すべてのレビューにわたる最も人気のある`star_rating`を計算できます: - -```python -query = """ -SELECT star_rating, count() AS count, formatReadableQuantity(count) -FROM s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_2015.snappy.parquet' -) -GROUP BY ALL -ORDER BY star_rating -""" - -chdb.query(query, 'DataFrame') -``` - -```text - star_rating count formatReadableQuantity(count()) -0 1 3253070 3.25 million -1 2 1865322 1.87 million -2 3 3130345 3.13 million -3 4 6578230 6.58 million -4 5 27078664 27.08 million -``` - -興味深いことに、5つ星のレビューは他のすべての評価を合わせたよりも多いです! -アマゾンの製品が好まれているようです、あるいは、もし好まれていないのなら、評価を提出していないだけかもしれません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md.hash deleted file mode 100644 index 513b356b2e0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-parquet.md.hash +++ /dev/null @@ -1 +0,0 @@ -988a838efd05b5ea diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md deleted file mode 100644 index 2ae699ed1ac..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: 'S3 バケット内のデータのクエリ方法' -sidebar_label: 'S3 でのデータクエリ' -slug: '/chdb/guides/querying-s3' -description: 'chDB で S3 バケット内のデータをクエリする方法を学びます。' -keywords: -- 'chdb' -- 's3' ---- - - - -A lot of the world's data lives in Amazon S3 buckets. -このガイドでは、chDBを使用してそのデータをクエリする方法を学びます。 - -## Setup {#setup} - -まずは仮想環境を作成しましょう: - -```bash -python -m venv .venv -source .venv/bin/activate -``` - -次にchDBをインストールします。 -バージョン2.0.2以上であることを確認してください: - -```bash -pip install "chdb>=2.0.2" -``` - -続いてIPythonをインストールします: - -```bash -pip install ipython -``` - -`ipython`を使って、ガイドの残りのコマンドを実行します。 -以下のコマンドで`ipython`を起動できます: - -```bash -ipython -``` - -または、Pythonスクリプトやお好みのノートブックでもこのコードを使用できます。 - -## Listing files in an S3 bucket {#listing-files-in-an-s3-bucket} - -最初に、[Amazonレビューを含むS3バケットの全ファイルをリストアップ](/getting-started/example-datasets/amazon-reviews)しましょう。 -これを行うには、[`s3`テーブル関数](/sql-reference/table-functions/s3)を使用し、ファイルへのパスまたはワイルドカードを渡します。 - -:::tip -バケット名のみを渡すと、例外が発生します。 -::: - -また、[`One`](/interfaces/formats#data-format-one)入力フォーマットを使用して、ファイルが解析されず、ファイルごとに1行が返され、`_file`仮想カラムと`_path`仮想カラム経由でファイルやパスにアクセスできるようにします。 - -```python -import chdb - -chdb.query(""" -SELECT - _file, - _path -FROM s3('s3://datasets-documentation/amazon_reviews/*.parquet', One) -SETTINGS output_format_pretty_row_numbers=0 -""", 'PrettyCompact') -``` - -```text -┌─_file───────────────────────────────┬─_path─────────────────────────────────────────────────────────────────────┐ -│ amazon_reviews_2010.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2010.snappy.parquet │ -│ amazon_reviews_1990s.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_1990s.snappy.parquet │ -│ amazon_reviews_2013.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2013.snappy.parquet │ -│ amazon_reviews_2015.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2015.snappy.parquet │ -│ amazon_reviews_2014.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2014.snappy.parquet │ -│ amazon_reviews_2012.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2012.snappy.parquet │ -│ amazon_reviews_2000s.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2000s.snappy.parquet │ -│ amazon_reviews_2011.snappy.parquet │ datasets-documentation/amazon_reviews/amazon_reviews_2011.snappy.parquet │ -└─────────────────────────────────────┴───────────────────────────────────────────────────────────────────────────┘ -``` - -このバケットには、Parquetファイルのみが含まれています。 - -## Querying files in an S3 bucket {#querying-files-in-an-s3-bucket} - -次に、これらのファイルをクエリする方法を学びましょう。 -これらのファイルの各行数を数えたい場合、以下のクエリを実行できます: - -```python -chdb.query(""" -SELECT - _file, - count() AS count, - formatReadableQuantity(count) AS readableCount -FROM s3('s3://datasets-documentation/amazon_reviews/*.parquet') -GROUP BY ALL -SETTINGS output_format_pretty_row_numbers=0 -""", 'PrettyCompact') -``` - -```text -┌─_file───────────────────────────────┬────count─┬─readableCount───┐ -│ amazon_reviews_2013.snappy.parquet │ 28034255 │ 28.03 million │ -│ amazon_reviews_1990s.snappy.parquet │ 639532 │ 639.53 thousand │ -│ amazon_reviews_2011.snappy.parquet │ 6112495 │ 6.11 million │ -│ amazon_reviews_2015.snappy.parquet │ 41905631 │ 41.91 million │ -│ amazon_reviews_2012.snappy.parquet │ 11541011 │ 11.54 million │ -│ amazon_reviews_2000s.snappy.parquet │ 14728295 │ 14.73 million │ -│ amazon_reviews_2014.snappy.parquet │ 44127569 │ 44.13 million │ -│ amazon_reviews_2010.snappy.parquet │ 3868472 │ 3.87 million │ -└─────────────────────────────────────┴──────────┴─────────────────┘ -``` - -S3バケットのHTTP URIを渡すことでも同じ結果が得られます: - -```python -chdb.query(""" -SELECT - _file, - count() AS count, - formatReadableQuantity(count) AS readableCount -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/*.parquet') -GROUP BY ALL -SETTINGS output_format_pretty_row_numbers=0 -""", 'PrettyCompact') -``` - -`DESCRIBE`句を使用してこれらのParquetファイルのスキーマを確認しましょう: - -```python -chdb.query(""" -DESCRIBE s3('s3://datasets-documentation/amazon_reviews/*.parquet') -SETTINGS describe_compact_output=1 -""", 'PrettyCompact') -``` - -```text - ┌─name──────────────┬─type─────────────┐ - 1. │ review_date │ Nullable(UInt16) │ - 2. │ marketplace │ Nullable(String) │ - 3. │ customer_id │ Nullable(UInt64) │ - 4. │ review_id │ Nullable(String) │ - 5. │ product_id │ Nullable(String) │ - 6. │ product_parent │ Nullable(UInt64) │ - 7. │ product_title │ Nullable(String) │ - 8. │ product_category │ Nullable(String) │ - 9. │ star_rating │ Nullable(UInt8) │ -10. │ helpful_votes │ Nullable(UInt32) │ -11. │ total_votes │ Nullable(UInt32) │ -12. │ vine │ Nullable(Bool) │ -13. │ verified_purchase │ Nullable(Bool) │ -14. │ review_headline │ Nullable(String) │ -15. │ review_body │ Nullable(String) │ - └───────────────────┴──────────────────┘ -``` - -今、レビュー数に基づいてトップの製品カテゴリを計算し、平均星評価を計算しましょう: - -```python -chdb.query(""" -SELECT product_category, count() AS reviews, round(avg(star_rating), 2) as avg -FROM s3('s3://datasets-documentation/amazon_reviews/*.parquet') -GROUP BY ALL -LIMIT 10 -""", 'PrettyCompact') -``` - -```text - ┌─product_category─┬──reviews─┬──avg─┐ - 1. │ Toys │ 4864056 │ 4.21 │ - 2. │ Apparel │ 5906085 │ 4.11 │ - 3. │ Luggage │ 348644 │ 4.22 │ - 4. │ Kitchen │ 4880297 │ 4.21 │ - 5. │ Books │ 19530930 │ 4.34 │ - 6. │ Outdoors │ 2302327 │ 4.24 │ - 7. │ Video │ 380596 │ 4.19 │ - 8. │ Grocery │ 2402365 │ 4.31 │ - 9. │ Shoes │ 4366757 │ 4.24 │ -10. │ Jewelry │ 1767667 │ 4.14 │ - └──────────────────┴──────────┴──────┘ -``` - -## Querying files in a private S3 bucket {#querying-files-in-a-private-s3-bucket} - -プライベートS3バケットのファイルをクエリする場合、アクセスキーとシークレットを渡す必要があります。 -これらの認証情報を`s3`テーブル関数に渡すことができます: - -```python -chdb.query(""" -SELECT product_category, count() AS reviews, round(avg(star_rating), 2) as avg -FROM s3('s3://datasets-documentation/amazon_reviews/*.parquet', 'access-key', 'secret') -GROUP BY ALL -LIMIT 10 -""", 'PrettyCompact') -``` - -:::note -このクエリは公開バケットのため、動作しません! -::: - -別の方法は、[名前付きコレクション](/operations/named-collections)を使用することですが、このアプローチはまだchDBによってサポートされていません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md.hash deleted file mode 100644 index fcf4da4e5b5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/querying-s3-bucket.md.hash +++ /dev/null @@ -1 +0,0 @@ -22a180324f488326 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md deleted file mode 100644 index 5895e0a3c2a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: 'chDB' -sidebar_label: '概要' -slug: '/chdb' -description: 'chDB は ClickHouse によってパワーアップされたインプロセス SQL OLAP エンジンです。' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'in-process' -- 'in process' ---- - - - - -# chDB - -chDBは、[ClickHouse](https://github.com/clickhouse/clickhouse)に基づいた、高速なプロセス内SQL OLAPエンジンです。ClickHouseサーバーに接続することなく、プログラミング言語でClickHouseの機能を利用したいときに使用できます。 - -## chDBはどの言語をサポートしていますか? {#what-languages-are-supported-by-chdb} - -chDBには以下の言語バインディングがあります: - -* [Python](install/python.md) -* [Go](install/go.md) -* [Rust](install/rust.md) -* [NodeJS](install/nodejs.md) -* [Bun](install/bun.md) - -## どの入力および出力フォーマットがサポートされていますか? {#what-input-and-output-formats-are-supported} - -chDBはParquet、CSV、JSON、Apache Arrow、ORC、および[60以上のフォーマット](/interfaces/formats)をサポートしています。 - -## どのように始めればよいですか? {#how-do-i-get-started} - -* [Go](install/go.md)、[Rust](install/rust.md)、[NodeJS](install/nodejs.md)、または[Bun](install/bun.md)を使用している場合は、対応する言語ページを参照してください。 -* Pythonを使用している場合は、[はじめに開発者ガイド](getting-started.md)を参照してください。一般的なタスクを行う方法を示すガイドもあります: - * [JupySQL](guides/jupysql.md) - * [Pandasのクエリ](guides/querying-pandas.md) - * [Apache Arrowのクエリ](guides/querying-apache-arrow.md) - * [S3のデータのクエリ](guides/querying-s3-bucket.md) - * [Parquetファイルのクエリ](guides/querying-parquet.md) - * [リモートClickHouseのクエリ](guides/query-remote-clickhouse.md) - * [clickhouse-localデータベースの使用](guides/clickhouse-local.md) - - - -## イントロダクションビデオ {#an-introductory-video} - -ClickHouseの元クリエイターであるAlexey Milovidovによる、chDBプロジェクトの簡単な紹介をお聞きいただけます: - -
- -
- -## chDBについて {#about-chdb} - -- [Auxtenのブログ](https://clickhouse.com/blog/chdb-embedded-clickhouse-rocket-engine-on-a-bicycle)でchDBプロジェクトの誕生の全ストーリーをお読みください -- [公式ClickHouseブログ](https://clickhouse.com/blog/welcome-chdb-to-clickhouse)でchDBとそのユースケースについてお読みください -- [codapi examples](https://antonz.org/trying-chdb/)を使ってブラウザでchDBを発見してください。 - -## どのライセンスを使用していますか? {#what-license-does-it-use} - -chDBはApache License, Version 2.0のもとで提供されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md.hash deleted file mode 100644 index 0a5f5e3777b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -3b039ff406752922 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md deleted file mode 100644 index 877728261b1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: 'Bun 用の chDB のインストール' -sidebar_label: 'Bun' -slug: '/chdb/install/bun' -description: 'Bun 用の chDB のインストール方法' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'bun' -- 'install' ---- - - - - -# Bun用の chDB のインストール - -## 要件 {#requirements} - -[libchdb](https://github.com/chdb-io/chdb) をインストールします: - -```bash -curl -sL https://lib.chdb.io | bash -``` - -## インストール {#install} - -参照: [chdb-bun](https://github.com/chdb-io/chdb-bun) - -## GitHub リポジトリ {#github-repository} - -プロジェクトの GitHub リポジトリは [chdb-io/chdb-bun](https://github.com/chdb-io/chdb-bun) で見つけることができます。 - -## 使用法 {#usage} - -### Query(query, *format) (エフェメラル) {#queryquery-format-ephemeral} - -```javascript -// クエリ (エフェメラル) -var result = query("SELECT version()", "CSV"); -console.log(result); // 23.10.1.1 -``` - - -### Session.Query(query, *format) {#sessionqueryquery-format} - - -```javascript -const sess = new Session('./chdb-bun-tmp'); - -// セッションでのクエリ (永続的) -sess.query("CREATE FUNCTION IF NOT EXISTS hello AS () -> 'Hello chDB'", "CSV"); -var result = sess.query("SELECT hello()", "CSV"); -console.log(result); - -// クリーンアップ前に、`./chdb-bun-tmp` にデータベースファイルが見つかります。 - -sess.cleanup(); // セッションをクリーンアップします。これによりデータベースが削除されます。 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md.hash deleted file mode 100644 index 01682d0fb36..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md.hash +++ /dev/null @@ -1 +0,0 @@ -38ae1ccc991c9c80 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md deleted file mode 100644 index d7194aed933..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: 'C および C++ 用の chDB のインストール' -sidebar_label: 'C および C++' -slug: '/chdb/install/c' -description: 'C および C++ 用の chDB のインストール方法' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'install' ---- - - - - -# chDBのCおよびC++へのインストール - -## 要件 {#requirements} - -[libchdb](https://github.com/chdb-io/chdb) をインストールします: - -```bash -curl -sL https://lib.chdb.io | bash -``` - -## 使用法 {#usage} - -[libchdb](https://github.com/chdb-io/chdb/blob/main/bindings.md) の手順に従って始めてください。 - -`chdb.h` - -```c -#pragma once -#include -#include - -extern "C" { -struct local_result -{ - char * buf; - size_t len; - void * _vec; // std::vector *, 解放用 - double elapsed; - uint64_t rows_read; - uint64_t bytes_read; -}; - -local_result * query_stable(int argc, char ** argv); -void free_result(local_result * result); -} -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md.hash deleted file mode 100644 index b22edc99a88..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md.hash +++ /dev/null @@ -1 +0,0 @@ -66d7e1632baf1f01 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md deleted file mode 100644 index 891b4f97ab9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: 'Installing chDB for Go' -sidebar_label: 'Go' -slug: '/chdb/install/go' -description: 'How to install chDB for Go' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'go' -- 'install' ---- - - - - -# Go のための chDB のインストール - -## 要件 {#requirements} - -[libchdb](https://github.com/chdb-io/chdb) をインストールします: - -```bash -curl -sL https://lib.chdb.io | bash -``` - -## インストール {#install} - -詳しくは: [chdb-go](https://github.com/chdb-io/chdb-go) - -## GitHub リポジトリ {#github-repository} - -プロジェクトの GitHub リポジトリは [chdb-io/chdb-go](https://github.com/chdb-io/chdb-go) で見つけることができます。 - -## 使用法 {#usage} - -- API ドキュメント: [高レベル API](https://github.com/chdb-io/chdb-go/blob/main/chdb.md) -- 低レベル API ドキュメント: [低レベル API](https://github.com/chdb-io/chdb-go/blob/main/lowApi.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md.hash deleted file mode 100644 index 34b334f21a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md.hash +++ /dev/null @@ -1 +0,0 @@ -b2b885728e769679 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md deleted file mode 100644 index 8c7a74e3089..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 'Language Integrations Index' -slug: '/chdb/install' -description: 'Index page for chDB language integrations' -keywords: -- 'python' -- 'NodeJS' -- 'Go' -- 'Rust' -- 'Bun' -- 'C' -- 'C++' ---- - - - -chDBのセットアップに関する手順は、以下の言語およびランタイムに対して利用可能です: - -| 言語 | -|----------------------------------------| -| [Python](/chdb/install/python) | -| [NodeJS](/chdb/install/nodejs) | -| [Go](/chdb/install/go) | -| [Rust](/chdb/install/rust) | -| [Bun](/chdb/install/bun) | -| [C and C++](/chdb/install/c) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md.hash deleted file mode 100644 index e253c9f8431..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -76e84f59d6a713d8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md deleted file mode 100644 index 1a29e13bb93..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: 'NodeJS 用の chDB のインストール' -sidebar_label: 'NodeJS' -slug: '/chdb/install/nodejs' -description: 'NodeJS 用の chDB のインストール方法' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'NodeJS' -- 'install' ---- - - - - -# chDBのNodeJS用インストール - -## 要件 {#requirements} - -[libchdb](https://github.com/chdb-io/chdb)をインストールします: - -```bash -curl -sL https://lib.chdb.io | bash -``` - -## インストール {#install} - -```bash -npm i chdb -``` - -## GitHubリポジトリ {#github-repository} - -プロジェクトのGitHubリポジトリは[chdb-io/chdb-node](https://github.com/chdb-io/chdb-node)で見つけることができます。 - - -## 使用法 {#usage} - -NodeJSアプリケーションでchdbの力を活用するために、chdb-nodeモジュールをインポートして使用します: - -```javascript -const { query, Session } = require("chdb"); - -var ret; - -// スタンドアロンクエリをテスト -ret = query("SELECT version(), 'Hello chDB', chdb()", "CSV"); -console.log("スタンドアロンクエリの結果:", ret); - -// セッションクエリをテスト -// 新しいセッションインスタンスを作成 -const session = new Session("./chdb-node-tmp"); -ret = session.query("SELECT 123", "CSV") -console.log("セッションクエリの結果:", ret); -ret = session.query("CREATE DATABASE IF NOT EXISTS testdb;" + - "CREATE TABLE IF NOT EXISTS testdb.testtable (id UInt32) ENGINE = MergeTree() ORDER BY id;"); - -session.query("USE testdb; INSERT INTO testtable VALUES (1), (2), (3);") - -ret = session.query("SELECT * FROM testtable;") -console.log("セッションクエリの結果:", ret); - -// セッションをクリーンアップ -session.cleanup(); -``` - -## ソースからビルド {#build-from-source} - -```bash -npm run libchdb -npm install -npm run test -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md.hash deleted file mode 100644 index fe9fd326416..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md.hash +++ /dev/null @@ -1 +0,0 @@ -63a51001fc4d0ca5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md deleted file mode 100644 index 86b5184f361..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md +++ /dev/null @@ -1,269 +0,0 @@ ---- -title: 'Installing chDB for Python' -sidebar_label: 'Python' -slug: '/chdb/install/python' -description: 'How to install chDB for Python' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'python' -- 'install' ---- - - - - -# chDB のインストール - -## 必要条件 {#requirements} - -macOS および Linux (x86_64 および ARM64) 上の Python 3.8+ - -## インストール {#install} - -```bash -pip install chdb -``` - -## 使用法 {#usage} - -CLI の例: - -```python -python3 -m chdb [SQL] [OutputFormat] -``` - -```python -python3 -m chdb "SELECT 1, 'abc'" Pretty -``` - -Python ファイルの例: - -```python -import chdb - -res = chdb.query("SELECT 1, 'abc'", "CSV") -print(res, end="") -``` - -クエリは、任意の [サポートされているフォーマット](/interfaces/formats)や `Dataframe`、`Debug` を使用してデータを返すことができます。 - -## GitHub リポジトリ {#github-repository} - -プロジェクトの GitHub リポジトリは [chdb-io/chdb](https://github.com/chdb-io/chdb) で見つけることができます。 - -## データ入力 {#data-input} - -ディスク上およびメモリ内のデータ形式にアクセスするための以下のメソッドが利用可能です。 - -### ファイルクエリ (Parquet, CSV, JSON, Arrow, ORC と 60+ 形式) {#query-on-file-parquet-csv-json-arrow-orc-and-60} - -SQL を実行し、希望の形式のデータを返すことができます。 - -```python -import chdb -res = chdb.query('select version()', 'Pretty'); print(res) -``` - -**Parquet または CSV で操作する** - -```python - -# tests/format_output.py にてさらに多くのデータ型フォーマットを参照 -res = chdb.query('select * from file("data.parquet", Parquet)', 'JSON'); print(res) -res = chdb.query('select * from file("data.csv", CSV)', 'CSV'); print(res) -print(f"SQL が {res.rows_read()} 行を読み取り、{res.bytes_read()} バイト、経過時間 {res.elapsed()} 秒") -``` - -**Pandas DataFrame 出力** -```python - -# https://clickhouse.com/docs/interfaces/formats にてさらに参照 -chdb.query('select * from file("data.parquet", Parquet)', 'Dataframe') -``` - -### テーブルクエリ (Pandas DataFrame, Parquet ファイル/バイト, Arrow バイト) {#query-on-table-pandas-dataframe-parquet-filebytes-arrow-bytes} - -**Pandas DataFrame でのクエリ** - -```python -import chdb.dataframe as cdf -import pandas as pd - -# 2 つの DataFrame を結合 -df1 = pd.DataFrame({'a': [1, 2, 3], 'b': ["one", "two", "three"]}) -df2 = pd.DataFrame({'c': [1, 2, 3], 'd': ["①", "②", "③"]}) -ret_tbl = cdf.query(sql="select * from __tbl1__ t1 join __tbl2__ t2 on t1.a = t2.c", - tbl1=df1, tbl2=df2) -print(ret_tbl) - -# DataFrame テーブルでのクエリ -print(ret_tbl.query('select b, sum(a) from __table__ group by b')) -``` - -### ステートフルセッションを使用したクエリ {#query-with-stateful-session} - - セッションは、クエリの状態を保持します。すべての DDL および DML 状態はディレクトリに保持されます。ディレクトリパスは引数として渡すことができます。渡されない場合、一時ディレクトリが作成されます。 - -パスが指定されていない場合、セッションオブジェクトが削除されると一時ディレクトリも削除されます。さもなければ、パスが保持されます。 - -デフォルトのデータベースは `_local` で、デフォルトのエンジンは `Memory` であるため、すべてのデータがメモリに保存されます。ディスクにデータを保存したい場合は、別のデータベースを作成する必要があります。 - -```python -from chdb import session as chs - -## 一時セッションで DB、テーブル、ビューを作成し、セッション削除時に自動的にクリーンアップ -sess = chs.Session() -sess.query("CREATE DATABASE IF NOT EXISTS db_xxx ENGINE = Atomic") -sess.query("CREATE TABLE IF NOT EXISTS db_xxx.log_table_xxx (x String, y Int) ENGINE = Log;") -sess.query("INSERT INTO db_xxx.log_table_xxx VALUES ('a', 1), ('b', 3), ('c', 2), ('d', 5);") -sess.query( - "CREATE VIEW db_xxx.view_xxx AS SELECT * FROM db_xxx.log_table_xxx LIMIT 4;" -) -print("ビューから選択:\n") -print(sess.query("SELECT * FROM db_xxx.view_xxx", "Pretty")) -``` - -こちらも参照: [test_stateful.py](https://github.com/chdb-io/chdb/blob/main/tests/test_stateful.py). - -### Python DB-API 2.0 を使用したクエリ {#query-with-python-db-api-20} - -```python -import chdb.dbapi as dbapi -print("chdb ドライバーのバージョン: {0}".format(dbapi.get_client_info())) - -conn1 = dbapi.connect() -cur1 = conn1.cursor() -cur1.execute('select version()') -print("説明: ", cur1.description) -print("データ: ", cur1.fetchone()) -cur1.close() -conn1.close() -``` - -### UDF (ユーザー定義関数) を使用したクエリ {#query-with-udf-user-defined-functions} - -```python -from chdb.udf import chdb_udf -from chdb import query - -@chdb_udf() -def sum_udf(lhs, rhs): - return int(lhs) + int(rhs) - -print(query("select sum_udf(12,22)")) -``` - -chDB Python UDF (ユーザー定義関数) デコレーターについてのいくつかの注意点。 -1. 関数はステートレスである必要があります。UDF のみがサポートされており、UDAF (ユーザー定義集計関数) はサポートされていません。 -2. デフォルトの戻り値の型は String です。戻り値の型を変更したい場合は、引数として戻り値の型を渡すことができます。戻り値の型は [以下のいずれか](/sql-reference/data-types) にする必要があります。 -3. 関数は String 型の引数を取る必要があります。入力が TabSeparated であるため、全ての引数は文字列となります。 -4. 関数は入力の各行に対して呼び出されます。例: - ```python - def sum_udf(lhs, rhs): - return int(lhs) + int(rhs) - - for line in sys.stdin: - args = line.strip().split('\t') - lhs = args[0] - rhs = args[1] - print(sum_udf(lhs, rhs)) - sys.stdout.flush() - ``` -5. 関数は純粋な Python 関数である必要があります。関数内で使用されるすべての Python モジュールをインポートする必要があります。 - ```python - def func_use_json(arg): - import json - ... - ``` -6. 使用される Python インタープリターは、スクリプトを実行するのに使用されるものと同じです。`sys.executable` から取得できます。 - -こちらも参照: [test_udf.py](https://github.com/chdb-io/chdb/blob/main/tests/test_udf.py). - -### Python テーブルエンジン {#python-table-engine} - -### Pandas DataFrame でのクエリ {#query-on-pandas-dataframe} - -```python -import chdb -import pandas as pd -df = pd.DataFrame( - { - "a": [1, 2, 3, 4, 5, 6], - "b": ["tom", "jerry", "auxten", "tom", "jerry", "auxten"], - } -) - -chdb.query("SELECT b, sum(a) FROM Python(df) GROUP BY b ORDER BY b").show() -``` - -### Arrow テーブルでのクエリ {#query-on-arrow-table} - -```python -import chdb -import pyarrow as pa -arrow_table = pa.table( - { - "a": [1, 2, 3, 4, 5, 6], - "b": ["tom", "jerry", "auxten", "tom", "jerry", "auxten"], - } -) - -chdb.query( - "SELECT b, sum(a) FROM Python(arrow_table) GROUP BY b ORDER BY b", "debug" -).show() -``` - -### chdb.PyReader クラスインスタンスでのクエリ {#query-on-chdbpyreader-class-instance} - -1. chdb.PyReader クラスを継承し、`read` メソッドを実装する必要があります。 -2. `read` メソッドは次のようにするべきです: - 1. 列の最初の次元、行の二次元のリストを返すこと。列の順序は最初の引数 `col_names` と同じである必要があります。 - 1. 読み取るデータがもうない場合は空のリストを返すこと。 - 1. ステートフルであり、カーソルは `read` メソッド内で更新される必要があります。 -3. オプションで `get_schema` メソッドを実装して、テーブルのスキーマを返すことができます。プロトタイプは `def get_schema(self) -> List[Tuple[str, str]]:` であり、戻り値は各タプルが列名と列型を含むタプルのリストです。列型は [以下のいずれか](/sql-reference/data-types) である必要があります。 - -
- -```python -import chdb - -class myReader(chdb.PyReader): - def __init__(self, data): - self.data = data - self.cursor = 0 - super().__init__(data) - - def read(self, col_names, count): - print("Python func read", col_names, count, self.cursor) - if self.cursor >= len(self.data["a"]): - return [] - block = [self.data[col] for col in col_names] - self.cursor += len(block[0]) - return block - -reader = myReader( - { - "a": [1, 2, 3, 4, 5, 6], - "b": ["tom", "jerry", "auxten", "tom", "jerry", "auxten"], - } -) - -chdb.query( - "SELECT b, sum(a) FROM Python(reader) GROUP BY b ORDER BY b" -).show() -``` - -こちらも参照: [test_query_py.py](https://github.com/chdb-io/chdb/blob/main/tests/test_query_py.py). - -## 制限事項 {#limitations} - -1. サポートされているカラム型: `pandas.Series`, `pyarrow.array`, `chdb.PyReader` -1. サポートされているデータ型: Int, UInt, Float, String, Date, DateTime, Decimal -1. Python オブジェクト型は String に変換されます -1. Pandas DataFrame のパフォーマンスは最高で、Arrow テーブルは PyReader よりも優れています - -
- -さらに多くの例については、[examples](https://github.com/chdb-io/chdb/tree/main/examples) と [tests](https://github.com/chdb-io/chdb/tree/main/tests) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md.hash deleted file mode 100644 index aca9c2fa810..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md.hash +++ /dev/null @@ -1 +0,0 @@ -ecb29b9291e6fa5c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md deleted file mode 100644 index f2741feb544..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Rust用のchDBのインストール' -sidebar_label: 'Rust' -slug: '/chdb/install/rust' -description: 'Rust用のchDBのインストール方法' -keywords: -- 'chdb' -- 'embedded' -- 'clickhouse-lite' -- 'bun' -- 'install' ---- - - - -## 要件 {#requirements} - -[libchdb](https://github.com/chdb-io/chdb) をインストールします: - -```bash -curl -sL https://lib.chdb.io | bash -``` - -## 使用法 {#usage} - -このバインディングは進行中の作業です。始めるには [chdb-rust](https://github.com/chdb-io/chdb-rust) の指示に従ってください。 - -## GitHub リポジトリ {#github-repository} - -プロジェクトの GitHub リポジトリは [chdb-io/chdb-rust](https://github.com/chdb-io/chdb-rust) で見つけることができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md.hash deleted file mode 100644 index 78064df2556..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md.hash +++ /dev/null @@ -1 +0,0 @@ -5e56761c0e898d17 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md deleted file mode 100644 index ba625c7ea11..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: 'データ形式' -sidebar_label: 'データ形式' -slug: '/chdb/reference/data-formats' -description: 'chDBのデータ形式' -keywords: -- 'chdb' -- 'data formats' ---- - - - -When it comes to data formats, chDB is 100% feature compatible with ClickHouse. - -Input formats are used to parse the data provided to `INSERT` and `SELECT` from a file-backed table such as `File`, `URL` or `S3`. -Output formats are used to arrange the results of a `SELECT`, and to perform `INSERT`s into a file-backed table. -As well as the data formats that ClickHouse supports, chDB also supports: - -- `ArrowTable` as an output format, the type is Python `pyarrow.Table` -- `DataFrame` as an input and output format, the type is Python `pandas.DataFrame`. For examples, see [`test_joindf.py`](https://github.com/chdb-io/chdb/blob/main/tests/test_joindf.py) -- `Debug` as ab output (as an alias of `CSV`), but with enabled debug verbose output from ClickHouse. - -The supported data formats from ClickHouse are: - -| Format | Input | Output | -|---------------------------------|-------|--------| -| TabSeparated | ✔ | ✔ | -| TabSeparatedRaw | ✔ | ✔ | -| TabSeparatedWithNames | ✔ | ✔ | -| TabSeparatedWithNamesAndTypes | ✔ | ✔ | -| TabSeparatedRawWithNames | ✔ | ✔ | -| TabSeparatedRawWithNamesAndTypes| ✔ | ✔ | -| Template | ✔ | ✔ | -| TemplateIgnoreSpaces | ✔ | ✗ | -| CSV | ✔ | ✔ | -| CSVWithNames | ✔ | ✔ | -| CSVWithNamesAndTypes | ✔ | ✔ | -| CustomSeparated | ✔ | ✔ | -| CustomSeparatedWithNames | ✔ | ✔ | -| CustomSeparatedWithNamesAndTypes| ✔ | ✔ | -| SQLInsert | ✗ | ✔ | -| Values | ✔ | ✔ | -| Vertical | ✗ | ✔ | -| JSON | ✔ | ✔ | -| JSONAsString | ✔ | ✗ | -| JSONStrings | ✔ | ✔ | -| JSONColumns | ✔ | ✔ | -| JSONColumnsWithMetadata | ✔ | ✔ | -| JSONCompact | ✔ | ✔ | -| JSONCompactStrings | ✗ | ✔ | -| JSONCompactColumns | ✔ | ✔ | -| JSONEachRow | ✔ | ✔ | -| PrettyJSONEachRow | ✗ | ✔ | -| JSONEachRowWithProgress | ✗ | ✔ | -| JSONStringsEachRow | ✔ | ✔ | -| JSONStringsEachRowWithProgress | ✗ | ✔ | -| JSONCompactEachRow | ✔ | ✔ | -| JSONCompactEachRowWithNames | ✔ | ✔ | -| JSONCompactEachRowWithNamesAndTypes | ✔ | ✔ | -| JSONCompactStringsEachRow | ✔ | ✔ | -| JSONCompactStringsEachRowWithNames | ✔ | ✔ | -| JSONCompactStringsEachRowWithNamesAndTypes | ✔ | ✔ | -| JSONObjectEachRow | ✔ | ✔ | -| BSONEachRow | ✔ | ✔ | -| TSKV | ✔ | ✔ | -| Pretty | ✗ | ✔ | -| PrettyNoEscapes | ✗ | ✔ | -| PrettyMonoBlock | ✗ | ✔ | -| PrettyNoEscapesMonoBlock | ✗ | ✔ | -| PrettyCompact | ✗ | ✔ | -| PrettyCompactNoEscapes | ✗ | ✔ | -| PrettyCompactMonoBlock | ✗ | ✔ | -| PrettyCompactNoEscapesMonoBlock | ✗ | ✔ | -| PrettySpace | ✗ | ✔ | -| PrettySpaceNoEscapes | ✗ | ✔ | -| PrettySpaceMonoBlock | ✗ | ✔ | -| PrettySpaceNoEscapesMonoBlock | ✗ | ✔ | -| Prometheus | ✗ | ✔ | -| Protobuf | ✔ | ✔ | -| ProtobufSingle | ✔ | ✔ | -| Avro | ✔ | ✔ | -| AvroConfluent | ✔ | ✗ | -| Parquet | ✔ | ✔ | -| ParquetMetadata | ✔ | ✗ | -| Arrow | ✔ | ✔ | -| ArrowStream | ✔ | ✔ | -| ORC | ✔ | ✔ | -| One | ✔ | ✗ | -| RowBinary | ✔ | ✔ | -| RowBinaryWithNames | ✔ | ✔ | -| RowBinaryWithNamesAndTypes | ✔ | ✔ | -| RowBinaryWithDefaults | ✔ | ✔ | -| Native | ✔ | ✔ | -| Null | ✗ | ✔ | -| XML | ✗ | ✔ | -| CapnProto | ✔ | ✔ | -| LineAsString | ✔ | ✔ | -| Regexp | ✔ | ✗ | -| RawBLOB | ✔ | ✔ | -| MsgPack | ✔ | ✔ | -| MySQLDump | ✔ | ✗ | -| Markdown | ✗ | ✔ | - -For further information and examples, see [ClickHouse formats for input and output data](/interfaces/formats). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md.hash deleted file mode 100644 index 8cecc50002d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/data-formats.md.hash +++ /dev/null @@ -1 +0,0 @@ -c012a1bdc7473677 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md deleted file mode 100644 index e28075f1a0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'chDB Technical Reference' -slug: '/chdb/reference' -description: 'Data Formats for chDB' -keywords: -- 'chdb' -- 'data formats' ---- - - - -| リファレンスページ | -|----------------------| -| [データフォーマット](/chdb/reference/data-formats) | -| [SQLリファレンス](/chdb/reference/sql-reference) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md.hash deleted file mode 100644 index 1ad71c93bc8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -4079d1f7568f6f2b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md deleted file mode 100644 index 70791443e4c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 'SQLリファレンス' -sidebar_label: 'SQLリファレンス' -slug: '/chdb/reference/sql-reference' -description: 'chDBのSQLリファレンス' -keywords: -- 'chdb' -- 'sql reference' ---- - - - -chdbは、ClickHouseと同じSQL構文、ステートメント、エンジン、関数をサポートしています: - -| トピック | -|----------------------------| -| [SQL 構文](/sql-reference/syntax) | -| [ステートメント](/sql-reference/statements) | -| [テーブルエンジン](/engines/table-engines) | -| [データベースエンジン](/engines/database-engines) | -| [通常の関数](/sql-reference/functions) | -| [集約関数](/sql-reference/aggregate-functions) | -| [テーブル関数](/sql-reference/table-functions) | -| [ウィンドウ関数](/sql-reference/window-functions) | - -さらなる情報と例については、[ClickHouse SQL リファレンス](/sql-reference)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md.hash deleted file mode 100644 index 379625e2b43..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/reference/sql-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -ede8d67f012db877 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud-index.md.hash deleted file mode 100644 index 5d558480400..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -c9b65a8f4acfae0f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/_category_.yml deleted file mode 100644 index 4fcbe452846..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 1 -label: 'Benefits' -collapsible: true -collapsed: true -link: - type: doc - id: en/cloud/index diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/_category_.yml deleted file mode 100644 index 1648e8a79cb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: 'Best Practices' -collapsible: true -collapsed: true -link: - type: generated-index - title: Best Practices - slug: /cloud/bestpractices/ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/asyncinserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/asyncinserts.md.hash deleted file mode 100644 index 5ca45542eb9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/asyncinserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ad47390f78c238e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidmutations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidmutations.md.hash deleted file mode 100644 index 4eebef9ec28..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidmutations.md.hash +++ /dev/null @@ -1 +0,0 @@ -944757e3d2a9f8e0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidnullablecolumns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidnullablecolumns.md.hash deleted file mode 100644 index d432bc18960..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidnullablecolumns.md.hash +++ /dev/null @@ -1 +0,0 @@ -8cc14d0577040679 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidoptimizefinal.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidoptimizefinal.md.hash deleted file mode 100644 index 9245141fa86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/avoidoptimizefinal.md.hash +++ /dev/null @@ -1 +0,0 @@ -51af22c8f22aa66c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/bulkinserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/bulkinserts.md.hash deleted file mode 100644 index 433cf8ae19f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/bulkinserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -ebc4a8012d353316 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md deleted file mode 100644 index 21da928f53a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -slug: '/cloud/bestpractices' -keywords: -- 'Cloud' -- 'Best Practices' -- 'Bulk Inserts' -- 'Asynchronous Inserts' -- 'Avoid Mutations' -- 'Avoid Nullable Columns' -- 'Avoid Optimize Final' -- 'Low Cardinality Partitioning Key' -- 'Multi Tenancy' -- 'Usage Limits' -title: '概要' -hide_title: true -description: 'ClickHouse Cloud の Best Practices セクションのランディングページ' ---- - - - - -# Best Practices in ClickHouse Cloud {#best-practices-in-clickhouse-cloud} - -このセクションでは、ClickHouse Cloudを最大限に活用するために従うべきベストプラクティスを提供します。 - -| ページ | 説明 | -|----------------------------------------------------------|--------------------------------------------------------------------------| -| [Usage Limits](/cloud/bestpractices/usage-limits)| ClickHouseの制限について調査します。 | -| [Multi tenancy](/cloud/bestpractices/multi-tenancy)| マルチテナンシーを実装するためのさまざまな戦略について学びます。 | - -これらは、すべてのClickHouseのデプロイメントに適用される標準的なベストプラクティスに追加されたものです。 - -| ページ | 説明 | -|----------------------------------------------------------------------|--------------------------------------------------------------------------| -| [Choosing a Primary Key](/best-practices/choosing-a-primary-key) | ClickHouseで効果的な主キーを選択するためのガイダンス。 | -| [Select Data Types](/best-practices/select-data-types) | 適切なデータ型を選択するための推奨事項。 | -| [Use Materialized Views](/best-practices/use-materialized-views) | マテリアライズドビューの利点を得るためのタイミングと方法。 | -| [Minimize and Optimize JOINs](/best-practices/minimize-optimize-joins)| JOIN操作を最小限に抑え、最適化するためのベストプラクティス。 | -| [Choosing a Partitioning Key](/best-practices/choosing-a-partitioning-key) | パーティショニングキーを効果的に選択および適用する方法。 | -| [Selecting an Insert Strategy](/best-practices/selecting-an-insert-strategy) | ClickHouseでの効率的なデータ挿入のための戦略。 | -| [Data Skipping Indices](/best-practices/use-data-skipping-indices-where-appropriate) | パフォーマンス向上のためにデータスキッピングインデックスを適用するタイミング。 | -| [Avoid Mutations](/best-practices/avoid-mutations) | 突然変異を避ける理由と、それなしで設計する方法。 | -| [Avoid OPTIMIZE FINAL](/best-practices/avoid-optimize-final) | `OPTIMIZE FINAL`がコストがかかる理由と、その回避方法。 | -| [Use JSON where appropriate](/best-practices/use-json-where-appropriate) | ClickHouseでJSONカラムを使用する際の考慮事項。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md.hash deleted file mode 100644 index 1ae056246fd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -8c4d3e48a9af0c2f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md deleted file mode 100644 index a26167e7562..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md +++ /dev/null @@ -1,379 +0,0 @@ ---- -slug: '/cloud/bestpractices/multi-tenancy' -sidebar_label: 'Implement multi tenancy' -title: 'Multi tenancy' -description: 'Best practices to implement multi tenancy' ---- - - - -On a SaaSデータ分析プラットフォームでは、組織、顧客、またはビジネスユニットなどの複数のテナントが同じデータベースインフラストラクチャを共有しつつ、それぞれのデータを論理的に分離しておくことが一般的です。これにより、異なるユーザーが同じプラットフォーム内で自分のデータに安全にアクセスすることが可能になります。 - -要件に応じて、マルチテナンシーを実装するためのさまざまな方法があります。以下は、ClickHouse Cloudを使用してそれらを実装する方法のガイドです。 - -## Shared table {#shared-table} - -このアプローチでは、すべてのテナントのデータが1つの共有テーブルに格納され、各テナントのデータを識別するためにフィールド(またはフィールドのセット)が使用されます。パフォーマンスを最大化するために、このフィールドは [primary key](/sql-reference/statements/create/table#primary-key) に含めるべきです。ユーザーがそれぞれのテナントに属するデータのみアクセスできるようにするために、[role-based access control](/operations/access-rights) を使用し、[row policies](/operations/access-rights#row-policy-management)を介して実装します。 - -> **私たちはこのアプローチを推奨します。これは管理が最も簡単であり、特にすべてのテナントが同じデータスキーマを共有し、データ量が中程度(< TBs)である場合に有効です。** - -すべてのテナントデータを1つのテーブルに集約することで、最適化されたデータ圧縮とメタデータのオーバーヘッドの削減により、ストレージの効率が向上します。加えて、すべてのデータが中央管理されているため、スキーマの更新も簡素化されます。 - -この手法は、大量のテナント(数百万の可能性があります)を処理するために特に効果的です。 - -ただし、テナントが異なるデータスキーマを持つ場合や、時間の経過とともに分岐することが予想される場合は、他のアプローチがより適しているかもしれません。 - -テナント間でデータの量に大きな差がある場合は、小規模なテナントが不必要なクエリパフォーマンスの影響を受ける可能性があります。この問題は、テナントフィールドを主キーに含めることで大幅に軽減されます。 - -### Example {#shared-table-example} - -これは共有テーブルのマルチテナンシーモデルの実装例です。 - -まず、`tenant_id`フィールドを主キーに含む共有テーブルを作成します。 - -```sql ---- Create table events. Using tenant_id as part of the primary key -CREATE TABLE events -( - tenant_id UInt32, -- テナント識別子 - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (tenant_id, timestamp) -``` - -次に、偽データを挿入します。 - -```sql --- Insert some dummy rows -INSERT INTO events (tenant_id, id, type, timestamp, user_id, data) -VALUES -(1, '7b7e0439-99d0-4590-a4f7-1cfea1e192d1', 'user_login', '2025-03-19 08:00:00', 1001, '{"device": "desktop", "location": "LA"}'), -(1, '846aa71f-f631-47b4-8429-ee8af87b4182', 'purchase', '2025-03-19 08:05:00', 1002, '{"item": "phone", "amount": 799}'), -(1, '6b4d12e4-447d-4398-b3fa-1c1e94d71a2f', 'user_logout', '2025-03-19 08:10:00', 1001, '{"device": "desktop", "location": "LA"}'), -(2, '7162f8ea-8bfd-486a-a45e-edfc3398ca93', 'user_login', '2025-03-19 08:12:00', 2001, '{"device": "mobile", "location": "SF"}'), -(2, '6b5f3e55-5add-479e-b89d-762aa017f067', 'purchase', '2025-03-19 08:15:00', 2002, '{"item": "headphones", "amount": 199}'), -(2, '43ad35a1-926c-4543-a133-8672ddd504bf', 'user_logout', '2025-03-19 08:20:00', 2001, '{"device": "mobile", "location": "SF"}'), -(1, '83b5eb72-aba3-4038-bc52-6c08b6423615', 'purchase', '2025-03-19 08:45:00', 1003, '{"item": "monitor", "amount": 450}'), -(1, '975fb0c8-55bd-4df4-843b-34f5cfeed0a9', 'user_login', '2025-03-19 08:50:00', 1004, '{"device": "desktop", "location": "LA"}'), -(2, 'f50aa430-4898-43d0-9d82-41e7397ba9b8', 'purchase', '2025-03-19 08:55:00', 2003, '{"item": "laptop", "amount": 1200}'), -(2, '5c150ceb-b869-4ebb-843d-ab42d3cb5410', 'user_login', '2025-03-19 09:00:00', 2004, '{"device": "mobile", "location": "SF"}'), -``` - -次に、`user_1` と `user_2` の2つのユーザーを作成します。 - -```sql --- Create users -CREATE USER user_1 IDENTIFIED BY '' -CREATE USER user_2 IDENTIFIED BY '' -``` - -私たちは [create row policies](/sql-reference/statements/create/row-policy) を作成し、`user_1` と `user_2` のテナントデータのみにアクセスを制限します。 - -```sql --- Create row policies -CREATE ROW POLICY user_filter_1 ON default.events USING tenant_id=1 TO user_1 -CREATE ROW POLICY user_filter_2 ON default.events USING tenant_id=2 TO user_2 -``` - -次に、共通の役割を使用して共有テーブルに対して [`GRANT SELECT`](/sql-reference/statements/grant#usage) 権限を付与します。 - -```sql --- Create role -CREATE ROLE user_role - --- Grant read only to events table. -GRANT SELECT ON default.events TO user_role -GRANT user_role TO user_1 -GRANT user_role TO user_2 -``` - -これで、`user_1`として接続し、シンプルなセレクトを実行できます。最初のテナントからの行のみが返されます。 - -```sql --- Logged as user_1 -SELECT * -FROM events - - ┌─tenant_id─┬─id───────────────────────────────────┬─type────────┬───────────timestamp─┬─user_id─┬─data────────────────────────────────────┐ -1. │ 1 │ 7b7e0439-99d0-4590-a4f7-1cfea1e192d1 │ user_login │ 2025-03-19 08:00:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -2. │ 1 │ 846aa71f-f631-47b4-8429-ee8af87b4182 │ purchase │ 2025-03-19 08:05:00 │ 1002 │ {"item": "phone", "amount": 799} │ -3. │ 1 │ 6b4d12e4-447d-4398-b3fa-1c1e94d71a2f │ user_logout │ 2025-03-19 08:10:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -4. │ 1 │ 83b5eb72-aba3-4038-bc52-6c08b6423615 │ purchase │ 2025-03-19 08:45:00 │ 1003 │ {"item": "monitor", "amount": 450} │ -5. │ 1 │ 975fb0c8-55bd-4df4-843b-34f5cfeed0a9 │ user_login │ 2025-03-19 08:50:00 │ 1004 │ {"device": "desktop", "location": "LA"} │ - └───────────┴──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ -``` - -## Separate tables {#separate-tables} - -このアプローチでは、各テナントのデータが同じデータベース内の別のテーブルに格納され、テナントを識別するための特定のフィールドが不要になります。ユーザーアクセスは [GRANT statement](/sql-reference/statements/grant) を使用して強制され、各ユーザーは自分のテナントデータを含むテーブルにのみアクセスできるようにします。 - -> **テナントが異なるデータスキーマを持つ場合、別のテーブルを使用することは良い選択です。** - -非常に大きなデータセットを持つ少数のテナントが関与するシナリオでは、クエリパフォーマンスが重要な場合、このアプローチは共有テーブルモデルを上回ることがあります。他のテナントのデータをフィルタリングする必要がないため、クエリがより効率的になることができます。さらに、主キーは追加のフィールド(テナントIDなど)を主キーに含める必要がないため、さらに最適化できます。 - -ただし、このアプローチは1000のテナントにはスケーラブルではありません。 [usage limits](/cloud/bestpractices/usage-limits) を参照してください。 - -### Example {#separate-tables-example} - -これは、別々のテーブルのマルチテナンシーモデルの実装例です。 - -まず、`tenant_1`からのイベント用のテーブルと、`tenant_2`からのイベント用のテーブルの2つを作成します。 - -```sql --- Create table for tenant 1 -CREATE TABLE events_tenant_1 -( - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (timestamp, user_id) -- 主キーは他の属性に焦点を当てることができます - --- Create table for tenant 2 -CREATE TABLE events_tenant_2 -( - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (timestamp, user_id) -- 主キーは他の属性に焦点を当てることができます -``` - -偽データを挿入します。 - -```sql -INSERT INTO events_tenant_1 (id, type, timestamp, user_id, data) -VALUES -('7b7e0439-99d0-4590-a4f7-1cfea1e192d1', 'user_login', '2025-03-19 08:00:00', 1001, '{"device": "desktop", "location": "LA"}'), -('846aa71f-f631-47b4-8429-ee8af87b4182', 'purchase', '2025-03-19 08:05:00', 1002, '{"item": "phone", "amount": 799}'), -('6b4d12e4-447d-4398-b3fa-1c1e94d71a2f', 'user_logout', '2025-03-19 08:10:00', 1001, '{"device": "desktop", "location": "LA"}'), -('83b5eb72-aba3-4038-bc52-6c08b6423615', 'purchase', '2025-03-19 08:45:00', 1003, '{"item": "monitor", "amount": 450}'), -('975fb0c8-55bd-4df4-843b-34f5cfeed0a9', 'user_login', '2025-03-19 08:50:00', 1004, '{"device": "desktop", "location": "LA"}') - -INSERT INTO events_tenant_2 (id, type, timestamp, user_id, data) -VALUES -('7162f8ea-8bfd-486a-a45e-edfc3398ca93', 'user_login', '2025-03-19 08:12:00', 2001, '{"device": "mobile", "location": "SF"}'), -('6b5f3e55-5add-479e-b89d-762aa017f067', 'purchase', '2025-03-19 08:15:00', 2002, '{"item": "headphones", "amount": 199}'), -('43ad35a1-926c-4543-a133-8672ddd504bf', 'user_logout', '2025-03-19 08:20:00', 2001, '{"device": "mobile", "location": "SF"}'), -('f50aa430-4898-43d0-9d82-41e7397ba9b8', 'purchase', '2025-03-19 08:55:00', 2003, '{"item": "laptop", "amount": 1200}'), -('5c150ceb-b869-4ebb-843d-ab42d3cb5410', 'user_login', '2025-03-19 09:00:00', 2004, '{"device": "mobile", "location": "SF"}') -``` - -次に、`user_1`と`user_2`の2つのユーザーを作成します。 - -```sql --- Create users -CREATE USER user_1 IDENTIFIED BY '' -CREATE USER user_2 IDENTIFIED BY '' -``` - -次に、それぞれのテーブルに対して `GRANT SELECT` 権限を付与します。 - -```sql --- Grant read only to events table. -GRANT SELECT ON default.events_tenant_1 TO user_1 -GRANT SELECT ON default.events_tenant_2 TO user_2 -``` - -これで、`user_1`として接続し、このユーザーに対応するテーブルからシンプルなセレクトを実行できます。最初のテナントからの行のみが返されます。 - -```sql --- Logged as user_1 -SELECT * -FROM default.events_tenant_1 - - ┌─id───────────────────────────────────┬─type────────┬───────────timestamp─┬─user_id─┬─data────────────────────────────────────┐ -1. │ 7b7e0439-99d0-4590-a4f7-1cfea1e192d1 │ user_login │ 2025-03-19 08:00:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -2. │ 846aa71f-f631-47b4-8429-ee8af87b4182 │ purchase │ 2025-03-19 08:05:00 │ 1002 │ {"item": "phone", "amount": 799} │ -3. │ 6b4d12e4-447d-4398-b3fa-1c1e94d71a2f │ user_logout │ 2025-03-19 08:10:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -4. │ 83b5eb72-aba3-4038-bc52-6c08b6423615 │ purchase │ 2025-03-19 08:45:00 │ 1003 │ {"item": "monitor", "amount": 450} │ -5. │ 975fb0c8-55bd-4df4-843b-34f5cfeed0a9 │ user_login │ 2025-03-19 08:50:00 │ 1004 │ {"device": "desktop", "location": "LA"} │ - └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ -``` - -## Separate databases {#separate-databases} - -各テナントのデータは、同じClickHouseサービス内の別々のデータベースに格納されます。 - -> **このアプローチは、各テナントが多数のテーブルと場合によってはマテリアライズドビューを必要とし、異なるデータスキーマを持つ場合に便利です。ただし、テナントの数が多い場合は管理が難しくなることがあります。** - -実装は、別のテーブルアプローチと似ていますが、権限をテーブルレベルで付与する代わりに、データベースレベルで権限が付与されます。 - -このアプローチは、1000のテナントにはスケーラブルではありません。 [usage limits](/cloud/bestpractices/usage-limits) を参照してください。 - -### Example {#separate-databases-example} - -これは、別のデータベースのマルチテナンシーモデルの実装例です。 - -まず、`tenant_1`用のデータベースと、`tenant_2`用のデータベースの2つを作成します。 - -```sql --- Create database for tenant_1 -CREATE DATABASE tenant_1; - --- Create database for tenant_2 -CREATE DATABASE tenant_2; -``` - -```sql --- Create table for tenant_1 -CREATE TABLE tenant_1.events -( - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (timestamp, user_id); - --- Create table for tenant_2 -CREATE TABLE tenant_2.events -( - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (timestamp, user_id); -``` - -偽データを挿入します。 - -```sql -INSERT INTO tenant_1.events (id, type, timestamp, user_id, data) -VALUES -('7b7e0439-99d0-4590-a4f7-1cfea1e192d1', 'user_login', '2025-03-19 08:00:00', 1001, '{"device": "desktop", "location": "LA"}'), -('846aa71f-f631-47b4-8429-ee8af87b4182', 'purchase', '2025-03-19 08:05:00', 1002, '{"item": "phone", "amount": 799}'), -('6b4d12e4-447d-4398-b3fa-1c1e94d71a2f', 'user_logout', '2025-03-19 08:10:00', 1001, '{"device": "desktop", "location": "LA"}'), -('83b5eb72-aba3-4038-bc52-6c08b6423615', 'purchase', '2025-03-19 08:45:00', 1003, '{"item": "monitor", "amount": 450}'), -('975fb0c8-55bd-4df4-843b-34f5cfeed0a9', 'user_login', '2025-03-19 08:50:00', 1004, '{"device": "desktop", "location": "LA"}') - -INSERT INTO tenant_2.events (id, type, timestamp, user_id, data) -VALUES -('7162f8ea-8bfd-486a-a45e-edfc3398ca93', 'user_login', '2025-03-19 08:12:00', 2001, '{"device": "mobile", "location": "SF"}'), -('6b5f3e55-5add-479e-b89d-762aa017f067', 'purchase', '2025-03-19 08:15:00', 2002, '{"item": "headphones", "amount": 199}'), -('43ad35a1-926c-4543-a133-8672ddd504bf', 'user_logout', '2025-03-19 08:20:00', 2001, '{"device": "mobile", "location": "SF"}'), -('f50aa430-4898-43d0-9d82-41e7397ba9b8', 'purchase', '2025-03-19 08:55:00', 2003, '{"item": "laptop", "amount": 1200}'), -('5c150ceb-b869-4ebb-843d-ab42d3cb5410', 'user_login', '2025-03-19 09:00:00', 2004, '{"device": "mobile", "location": "SF"}') -``` - -次に、`user_1`と`user_2`の2つのユーザーを作成します。 - -```sql --- Create users -CREATE USER user_1 IDENTIFIED BY '' -CREATE USER user_2 IDENTIFIED BY '' -``` - -次に、それぞれのテーブルに対して `GRANT SELECT` 権限を付与します。 - -```sql --- Grant read only to events table. -GRANT SELECT ON tenant_1.events TO user_1 -GRANT SELECT ON tenant_2.events TO user_2 -``` - -これで、`user_1`として接続し、適切なデータベースのイベントテーブルでシンプルなセレクトを実行できます。最初のテナントからの行のみが返されます。 - -```sql --- Logged as user_1 -SELECT * -FROM tenant_1.events - - ┌─id───────────────────────────────────┬─type────────┬───────────timestamp─┬─user_id─┬─data────────────────────────────────────┐ -1. │ 7b7e0439-99d0-4590-a4f7-1cfea1e192d1 │ user_login │ 2025-03-19 08:00:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -2. │ 846aa71f-f631-47b4-8429-ee8af87b4182 │ purchase │ 2025-03-19 08:05:00 │ 1002 │ {"item": "phone", "amount": 799} │ -3. │ 6b4d12e4-447d-4398-b3fa-1c1e94d71a2f │ user_logout │ 2025-03-19 08:10:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -4. │ 83b5eb72-aba3-4038-bc52-6c08b6423615 │ purchase │ 2025-03-19 08:45:00 │ 1003 │ {"item": "monitor", "amount": 450} │ -5. │ 975fb0c8-55bd-4df4-843b-34f5cfeed0a9 │ user_login │ 2025-03-19 08:50:00 │ 1004 │ {"device": "desktop", "location": "LA"} │ - └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ -``` - -## Compute-compute separation {#compute-compute-separation} - -上記で説明した3つのアプローチは、[Warehouses](/cloud/reference/warehouses#what-is-a-warehouse)を使用してさらに分離することができます。データは共通のオブジェクトストレージを介して共有されますが、各テナントは [compute-compute separation](/cloud/reference/warehouses#what-is-compute-compute-separation) により異なるCPU/メモリ比率を持つ独自のコンピューティングサービスを持つことができます。 - -ユーザー管理は、ウェアハウス内のすべてのサービスが [share access controls](/cloud/reference/warehouses#database-credentials) を共有するため、前述のアプローチと似ています。 - -ウェアハウス内の子サービスの数は限られていますので、[Warehouse limitations](/cloud/reference/warehouses#limitations) を参照してください。 - -## Separate Cloud service {#separate-service} - -最も根本的なアプローチは、テナントごとに異なるClickHouseサービスを使用することです。 - -> **この一般的ではない方法は、テナントのデータが法律、セキュリティ、または近接性の理由から異なる地域に保存される必要がある場合に解決策となるでしょう。** - -各サービスにおいて、ユーザーはそれぞれのテナントのデータにアクセスするためのユーザーアカウントを作成する必要があります。 - -このアプローチは管理が難しく、各サービスには独自のインフラストラクチャが必要なため、オーバーヘッドが生じます。サービスは、[ClickHouse Cloud API](/cloud/manage/api/api-overview)を介して管理することができ、[official Terraform provider](https://registry.terraform.io/providers/ClickHouse/clickhouse/latest/docs)を使用してオーケストレーションも可能です。 - -### Example {#separate-service-example} - -これは、別サービスのマルチテナンシーモデルの実装例です。例では、1つのClickHouseサービスにテーブルとユーザーを作成する方法が示されていますが、これをすべてのサービスに複製する必要があります。 - -まず、`events` テーブルを作成します。 - -```sql --- Create table for tenant_1 -CREATE TABLE events -( - id UUID, -- ユニークイベントID - type LowCardinality(String), -- イベントの種類 - timestamp DateTime, -- イベントのタイムスタンプ - user_id UInt32, -- イベントをトリガーしたユーザーのID - data String, -- イベントデータ -) -ORDER BY (timestamp, user_id); -``` - -偽データを挿入します。 - -```sql -INSERT INTO events (id, type, timestamp, user_id, data) -VALUES -('7b7e0439-99d0-4590-a4f7-1cfea1e192d1', 'user_login', '2025-03-19 08:00:00', 1001, '{"device": "desktop", "location": "LA"}'), -('846aa71f-f631-47b4-8429-ee8af87b4182', 'purchase', '2025-03-19 08:05:00', 1002, '{"item": "phone", "amount": 799}'), -('6b4d12e4-447d-4398-b3fa-1c1e94d71a2f', 'user_logout', '2025-03-19 08:10:00', 1001, '{"device": "desktop", "location": "LA"}'), -('83b5eb72-aba3-4038-bc52-6c08b6423615', 'purchase', '2025-03-19 08:45:00', 1003, '{"item": "monitor", "amount": 450}'), -('975fb0c8-55bd-4df4-843b-34f5cfeed0a9', 'user_login', '2025-03-19 08:50:00', 1004, '{"device": "desktop", "location": "LA"}') -``` - -次に、`user_1` を作成します。 - -```sql --- Create users -CREATE USER user_1 IDENTIFIED BY '' -``` - -次に、対応するテーブルに対して `GRANT SELECT` 権限を付与します。 - -```sql --- Grant read only to events table. -GRANT SELECT ON events TO user_1 -``` - -これで、テナント1のサービスで`user_1`として接続し、シンプルなセレクトを実行できます。最初のテナントからの行のみが返されます。 - -```sql --- Logged as user_1 -SELECT * -FROM events - - ┌─id───────────────────────────────────┬─type────────┬───────────timestamp─┬─user_id─┬─data────────────────────────────────────┐ -1. │ 7b7e0439-99d0-4590-a4f7-1cfea1e192d1 │ user_login │ 2025-03-19 08:00:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -2. │ 846aa71f-f631-47b4-8429-ee8af87b4182 │ purchase │ 2025-03-19 08:05:00 │ 1002 │ {"item": "phone", "amount": 799} │ -3. │ 6b4d12e4-447d-4398-b3fa-1c1e94d71a2f │ user_logout │ 2025-03-19 08:10:00 │ 1001 │ {"device": "desktop", "location": "LA"} │ -4. │ 83b5eb72-aba3-4038-bc52-6c08b6423615 │ purchase │ 2025-03-19 08:45:00 │ 1003 │ {"item": "monitor", "amount": 450} │ -5. │ 975fb0c8-55bd-4df4-843b-34f5cfeed0a9 │ user_login │ 2025-03-19 08:50:00 │ 1004 │ {"device": "desktop", "location": "LA"} │ - └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md.hash deleted file mode 100644 index bf23ea018cd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/multitenancy.md.hash +++ /dev/null @@ -1 +0,0 @@ -2815dfa7d00cce3f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/partitioningkey.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/partitioningkey.md.hash deleted file mode 100644 index afe43430bad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/partitioningkey.md.hash +++ /dev/null @@ -1 +0,0 @@ -b533d09fbcc8be35 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md deleted file mode 100644 index f9752749cf0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -slug: '/cloud/bestpractices/usage-limits' -sidebar_label: 'Usage Limits' -title: 'Usage limits' -description: 'Describes the recommended usage limits in ClickHouse Cloud' ---- - - - -While ClickHouse is known for its speed and reliability, optimal performance is achieved within certain operating parameters. For example, having too many tables, databases or parts could negatively impact performance. To avoid this, Clickhouse Cloud has guardrails set up for several types of items. You can find details of these guardrails below. - -:::tip -もしこれらのガードレールにぶつかった場合、最適化されていない方法でユースケースを実装している可能性があります。サポートチームにお問い合わせいただければ、ガードレールを超えないようにユースケースを洗練するお手伝いを喜んでさせていただきます。また、制御された方法でガードレールを引き上げる方法を一緒に考えることもできます。 -::: - -| 次元 | 限界 | -|-----------|-------| -|**データベース**| 1000| -|**テーブル**| 5000| -|**カラム**| ∼1000 (コンパクトよりもワイドフォーマットが推奨されます)| -|**パーティション**| 50k| -|**パーツ**| 100k(インスタンス全体)| -|**パートサイズ**| 150gb| -|**組織ごとのサービス**| 20(ソフトリミット)| -|**倉庫ごとのサービス**| 5(ソフトリミット)| -|**低順序数**| 10k以下| -|**テーブル内の主キー**| データを十分にフィルターする4-5個| -|**クエリの同時実行**| 1000| -|**バッチ取り込み**| 1Mを超えるものは、システムによって1M行のブロックに分割されます| - -:::note -シングルレプリカサービスの場合、データベースの最大数は100に制限され、テーブルの最大数は500に制限されます。さらに、ベーシックティアサービスのストレージは1 TBに制限されています。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md.hash deleted file mode 100644 index a06bfa084e9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/bestpractices/usagelimits.md.hash +++ /dev/null @@ -1 +0,0 @@ -b8f70f140fe904c7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-10.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-10.md deleted file mode 100644 index e75eec40655..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-10.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -slug: /changelogs/24.10 -title: 'v24.10 Changelog for Cloud' -description: 'Fast release changelog for v24.10' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v24.10' ---- - -Relevant changes for ClickHouse Cloud services based on the v24.10 release. - -## Backward Incompatible Change {#backward-incompatible-change} -- Allow to write `SETTINGS` before `FORMAT` in a chain of queries with `UNION` when subqueries are inside parentheses. This closes [#39712](https://github.com/ClickHouse/ClickHouse/issues/39712). Change the behavior when a query has the SETTINGS clause specified twice in a sequence. The closest SETTINGS clause will have a preference for the corresponding subquery. In the previous versions, the outermost SETTINGS clause could take a preference over the inner one. [#60197](https://github.com/ClickHouse/ClickHouse/pull/60197)[#68614](https://github.com/ClickHouse/ClickHouse/pull/68614) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not cast to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Pavel Kruglov](https://github.com/Avogar)). -- Expressions like `a[b].c` are supported for named tuples, as well as named subscripts from arbitrary expressions, e.g., `expr().name`. This is useful for processing JSON. This closes [#54965](https://github.com/ClickHouse/ClickHouse/issues/54965). In previous versions, an expression of form `expr().name` was parsed as `tupleElement(expr(), name)`, and the query analyzer was searching for a column `name` rather than for the corresponding tuple element; while in the new version, it is changed to `tupleElement(expr(), 'name')`. In most cases, the previous version was not working, but it is possible to imagine a very unusual scenario when this change could lead to incompatibility: if you stored names of tuple elements in a column or an alias, that was named differently than the tuple element's name: `SELECT 'b' AS a, CAST([tuple(123)] AS 'Array(Tuple(b UInt8))') AS t, t[1].a`. It is very unlikely that you used such queries, but we still have to mark this change as potentially backward incompatible. [#68435](https://github.com/ClickHouse/ClickHouse/pull/68435) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- When the setting `print_pretty_type_names` is enabled, it will print `Tuple` data type in a pretty form in `SHOW CREATE TABLE` statements, `formatQuery` function, and in the interactive mode in `clickhouse-client` and `clickhouse-local`. In previous versions, this setting was only applied to `DESCRIBE` queries and `toTypeName`. This closes [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). [#68492](https://github.com/ClickHouse/ClickHouse/pull/68492) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Reordering of filter conditions from `[PRE]WHERE` clause is now allowed by default. It could be disabled by setting `allow_reorder_prewhere_conditions` to `false`. [#70657](https://github.com/ClickHouse/ClickHouse/pull/70657) ([Nikita Taranov](https://github.com/nickitat)). -- Fix `optimize_functions_to_subcolumns` optimization (previously could lead to `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got LowCardinality(String)` error), by preserving `LowCardinality` type in `mapKeys`/`mapValues`. [#70716](https://github.com/ClickHouse/ClickHouse/pull/70716) ([Azat Khuzhin](https://github.com/azat)). - - -## New Feature {#new-feature} -- Refreshable materialized views are production ready. [#70550](https://github.com/ClickHouse/ClickHouse/pull/70550) ([Michael Kolupaev](https://github.com/al13n321)). Refreshable materialized views are now supported in Replicated databases. [#60669](https://github.com/ClickHouse/ClickHouse/pull/60669) ([Michael Kolupaev](https://github.com/al13n321)). -- Function `toStartOfInterval()` now has a new overload which emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function. ([#55619](https://github.com/ClickHouse/ClickHouse/issues/55619)). It allows to align date or timestamp values to multiples of a given interval from an *arbitrary* origin (instead of 0000-01-01 00:00:00.000 as *fixed* origin). For example, `SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));` returns `2023-01-01 14:44:30` which is a multiple of 1 minute intervals, starting from origin `2023-01-01 14:35:30`. [#56738](https://github.com/ClickHouse/ClickHouse/pull/56738) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- MongoDB integration refactored: migration to new driver mongocxx from deprecated Poco::MongoDB, remove support for deprecated old protocol, support for connection by URI, support for all MongoDB types, support for WHERE and ORDER BY statements on MongoDB side, restriction for expression unsupported by MongoDB. [#63279](https://github.com/ClickHouse/ClickHouse/pull/63279) ([Kirill Nikiforov](https://github.com/allmazz)). -- A new `--progress-table` option in clickhouse-client prints a table with metrics changing during query execution; a new `--enable-progress-table-toggle` is associated with the `--progress-table` option, and toggles the rendering of the progress table by pressing the control key (Space). [#63689](https://github.com/ClickHouse/ClickHouse/pull/63689) ([Maria Khristenko](https://github.com/mariaKhr)). -- This allows to grant access to the wildcard prefixes. `GRANT SELECT ON db.table_pefix_* TO user`. [#65311](https://github.com/ClickHouse/ClickHouse/pull/65311) ([pufit](https://github.com/pufit)). -- Introduced JSONCompactWithProgress format where ClickHouse outputs each row as a newline-delimited JSON object, including metadata, data, progress, totals, and statistics. [#66205](https://github.com/ClickHouse/ClickHouse/pull/66205) ([Alexey Korepanov](https://github.com/alexkorep)). -- Add system.query_metric_log which contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk. [#66532](https://github.com/ClickHouse/ClickHouse/pull/66532) ([Pablo Marcos](https://github.com/pamarcos)). -- Add the `input_format_json_empty_as_default` setting which, when enabled, treats empty fields in JSON inputs as default values. Closes [#59339](https://github.com/ClickHouse/ClickHouse/issues/59339). [#66782](https://github.com/ClickHouse/ClickHouse/pull/66782) ([Alexis Arnaud](https://github.com/a-a-f)). -- Added functions `overlay` and `overlayUTF8` which replace parts of a string by another string. Example: `SELECT overlay('Hello New York', 'Jersey', 11)` returns `Hello New Jersey`. [#66933](https://github.com/ClickHouse/ClickHouse/pull/66933) ([李扬](https://github.com/taiyang-li)). -- Add new Command, Lightweight Delete In Partition ``` DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr; ``` ``` VM-114-29-tos :) select * from ads_app_poster_ip_source_channel_di_replicated_local;. [#67805](https://github.com/ClickHouse/ClickHouse/pull/67805) ([sunny](https://github.com/sunny19930321)). -- Implemented comparison for `Interval` data type values so they are converting now to the least supertype. [#68057](https://github.com/ClickHouse/ClickHouse/pull/68057) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- Add create_if_not_exists setting to default to IF NOT EXISTS behavior during CREATE statements. [#68164](https://github.com/ClickHouse/ClickHouse/pull/68164) ([Peter Nguyen](https://github.com/petern48)). -- Makes possible to read Iceberg tables in Azure and locally. [#68210](https://github.com/ClickHouse/ClickHouse/pull/68210) ([Daniil Ivanik](https://github.com/divanik)). -- Add aggregate functions distinctDynamicTypes/distinctJSONPaths/distinctJSONPathsAndTypes for better introspection of JSON column type content. [#68463](https://github.com/ClickHouse/ClickHouse/pull/68463) ([Pavel Kruglov](https://github.com/Avogar)). -- Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)). -- A simple SELECT query can be written with implicit SELECT to enable calculator-style expressions, e.g., `ch "1 + 2"`. This is controlled by a new setting, `implicit_select`. [#68502](https://github.com/ClickHouse/ClickHouse/pull/68502) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Support --copy mode for clickhouse local as a shortcut for format conversion [#68503](https://github.com/ClickHouse/ClickHouse/issues/68503). [#68583](https://github.com/ClickHouse/ClickHouse/pull/68583) ([Denis Hananein](https://github.com/denis-hananein)). -- Added `ripeMD160` function, which computes the RIPEMD-160 cryptographic hash of a string. Example: `SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'))` returns `37F332F68DB77BD9D7EDD4969571AD671CF9DD3B`. [#68639](https://github.com/ClickHouse/ClickHouse/pull/68639) ([Dergousov Maxim](https://github.com/m7kss1)). -- Add virtual column _headers for url table engine. Closes [#65026](https://github.com/ClickHouse/ClickHouse/issues/65026). [#68867](https://github.com/ClickHouse/ClickHouse/pull/68867) ([flynn](https://github.com/ucasfl)). -- Adding `system.projections` table to track available projections. [#68901](https://github.com/ClickHouse/ClickHouse/pull/68901) ([Jordi Villar](https://github.com/jrdi)). -- Add support for `arrayUnion` function. [#68989](https://github.com/ClickHouse/ClickHouse/pull/68989) ([Peter Nguyen](https://github.com/petern48)). -- Add new function `arrayZipUnaligned` for spark compatiablity(arrays_zip), which allowed unaligned arrays based on original `arrayZip`. ``` sql SELECT arrayZipUnaligned([1], [1, 2, 3]). [#69030](https://github.com/ClickHouse/ClickHouse/pull/69030) ([李扬](https://github.com/taiyang-li)). -- Support aggregate function `quantileExactWeightedInterpolated`, which is a interpolated version based on quantileExactWeighted. Some people may wonder why we need a new `quantileExactWeightedInterpolated` since we already have `quantileExactInterpolatedWeighted`. The reason is the new one is more accurate than the old one. BTW, it is for spark compatibility in Apache Gluten. [#69619](https://github.com/ClickHouse/ClickHouse/pull/69619) ([李扬](https://github.com/taiyang-li)). -- Support function arrayElementOrNull. It returns null if array index is out of range or map key not found. [#69646](https://github.com/ClickHouse/ClickHouse/pull/69646) ([李扬](https://github.com/taiyang-li)). -- Support Dynamic type in most functions by executing them on internal types inside Dynamic. [#69691](https://github.com/ClickHouse/ClickHouse/pull/69691) ([Pavel Kruglov](https://github.com/Avogar)). -- Adds argument `scale` (default: `true`) to function `arrayAUC` which allows to skip the normalization step (issue [#69609](https://github.com/ClickHouse/ClickHouse/issues/69609)). [#69717](https://github.com/ClickHouse/ClickHouse/pull/69717) ([gabrielmcg44](https://github.com/gabrielmcg44)). -- Re-added `RIPEMD160` function, which computes the RIPEMD-160 cryptographic hash of a string. Example: `SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy dog'))` returns `37F332F68DB77BD9D7EDD4969571AD671CF9DD3B`. [#70087](https://github.com/ClickHouse/ClickHouse/pull/70087) ([Dergousov Maxim](https://github.com/m7kss1)). -- Allow to cache read files for object storage table engines and data lakes using hash from ETag + file path as cache key. [#70135](https://github.com/ClickHouse/ClickHouse/pull/70135) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Support reading Iceberg tables on HDFS. [#70268](https://github.com/ClickHouse/ClickHouse/pull/70268) ([flynn](https://github.com/ucasfl)). -- Allow to read/write JSON type as binary string in RowBinary format under settings `input_format_binary_read_json_as_string/output_format_binary_write_json_as_string`. [#70288](https://github.com/ClickHouse/ClickHouse/pull/70288) ([Pavel Kruglov](https://github.com/Avogar)). -- Allow to serialize/deserialize JSON column as single String column in Native format. For output use setting `output_format_native_write_json_as_string`. For input, use serialization version `1` before the column data. [#70312](https://github.com/ClickHouse/ClickHouse/pull/70312) ([Pavel Kruglov](https://github.com/Avogar)). -- Supports standard CTE, `with insert`, as previously only supports `insert ... with ...`. [#70593](https://github.com/ClickHouse/ClickHouse/pull/70593) ([Shichao Jin](https://github.com/jsc0218)). - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-12.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-12.md deleted file mode 100644 index feb1b2fe93a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-12.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -slug: /changelogs/24.12 -title: 'v24.12 Changelog for Cloud' -description: 'Fast release changelog for v24.12' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v24.12' ---- - -Relevant changes for ClickHouse Cloud services based on the v24.12 release. - -## Backward Incompatible Changes {#backward-incompatible-changes} - -- Functions `greatest` and `least` now ignore NULL input values, whereas they previously returned NULL if one of the arguments was NULL. For example, `SELECT greatest(1, 2, NULL)` now returns 2. This makes the behavior compatible with PostgreSQL. [#65519](https://github.com/ClickHouse/ClickHouse/pull/65519) ([kevinyhzou](https://github.com/KevinyhZou)). -- Don't allow Variant/Dynamic types in ORDER BY/GROUP BY/PARTITION BY/PRIMARY KEY by default because it may lead to unexpected results. [#69731](https://github.com/ClickHouse/ClickHouse/pull/69731) ([Pavel Kruglov](https://github.com/Avogar)). -- Remove system tables `generate_series` and `generateSeries`. They were added by mistake here: [#59390](https://github.com/ClickHouse/ClickHouse/issues/59390). [#71091](https://github.com/ClickHouse/ClickHouse/pull/71091) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Remove `StorageExternalDistributed`. Closes [#70600](https://github.com/ClickHouse/ClickHouse/issues/70600). [#71176](https://github.com/ClickHouse/ClickHouse/pull/71176) ([flynn](https://github.com/ucasfl)). -- Settings from server config (users.xml) now apply on the client too. Useful for format settings, e.g. `date_time_output_format`. [#71178](https://github.com/ClickHouse/ClickHouse/pull/71178) ([Michael Kolupaev](https://github.com/al13n321)). -- Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)). -- The table engines Kafka, NATS and RabbitMQ are now covered by their own grants in the `SOURCES` hierarchy. Add grants to any non-default database users that create tables with these engine types. [#71250](https://github.com/ClickHouse/ClickHouse/pull/71250) ([Christoph Wurm](https://github.com/cwurm)). -- Check the full mutation query before executing it (including subqueries). This prevents accidentally running an invalid query and building up dead mutations that block valid mutations. [#71300](https://github.com/ClickHouse/ClickHouse/pull/71300) ([Christoph Wurm](https://github.com/cwurm)). -- Rename filesystem cache setting `skip_download_if_exceeds_query_cache` to `filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit`. [#71578](https://github.com/ClickHouse/ClickHouse/pull/71578) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Forbid Dynamic/Variant types in min/max functions to avoid confusion. [#71761](https://github.com/ClickHouse/ClickHouse/pull/71761) ([Pavel Kruglov](https://github.com/Avogar)). -- Remove support for `Enum` as well as `UInt128` and `UInt256` arguments in `deltaSumTimestamp`. Remove support for `Int8`, `UInt8`, `Int16`, and `UInt16` of the second ("timestamp") argument of `deltaSumTimestamp`. [#71790](https://github.com/ClickHouse/ClickHouse/pull/71790) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Added source query validation when ClickHouse is used as a source for a dictionary. [#72548](https://github.com/ClickHouse/ClickHouse/pull/72548) ([Alexey Katsman](https://github.com/alexkats)). - -## New Features {#new-features} - -- Implement SYSTEM LOAD PRIMARY KEY command to load primary indexes for all parts of a specified table or for all tables if no table is specified. This will be useful for benchmarks and to prevent extra latency during query execution. [#66252](https://github.com/ClickHouse/ClickHouse/pull/66252) ([ZAWA_ll](https://github.com/Zawa-ll)). -- Added statement `SYSTEM LOAD PRIMARY KEY` for loading the primary indexes of all parts in a specified table or for all tables if no table is specified. This can be useful for benchmarking and to prevent extra latency during query execution. [#67733](https://github.com/ClickHouse/ClickHouse/pull/67733) ([ZAWA_ll](https://github.com/Zawa-ll)). -- Add `CHECK GRANT` query to check whether the current user/role has been granted the specific privilege and whether the corresponding table/column exists in the memory. [#68885](https://github.com/ClickHouse/ClickHouse/pull/68885) ([Unalian](https://github.com/Unalian)). -- Added SQL syntax to describe workload and resource management. https://clickhouse.com/docs/en/operations/workload-scheduling. [#69187](https://github.com/ClickHouse/ClickHouse/pull/69187) ([Sergei Trifonov](https://github.com/serxa)). -- [The Iceberg data storage](https://iceberg.apache.org/spec/#file-system-operations) format provides the user with extensive options for modifying the schema of their table. In this pull request, reading a table in Iceberg format has been implemented, where the order of columns, column names, and simple type extensions have been changed. [#69445](https://github.com/ClickHouse/ClickHouse/pull/69445) ([Daniil Ivanik](https://github.com/divanik)). -- Allow each authentication method to have its own expiration date, remove from user entity. [#70090](https://github.com/ClickHouse/ClickHouse/pull/70090) ([Arthur Passos](https://github.com/arthurpassos)). -- Push external user roles from query originator to other nodes in cluster. Helpful when only originator has access to the external authenticator (like LDAP). [#70332](https://github.com/ClickHouse/ClickHouse/pull/70332) ([Andrey Zvonov](https://github.com/zvonand)). -- Support alter from String to JSON. This PR also changes the serialization of JSON and Dynamic types to new version V2. Old version V1 can be still used by enabling setting `merge_tree_use_v1_object_and_dynamic_serialization` (can be used during upgrade to be able to rollback the version without issues). [#70442](https://github.com/ClickHouse/ClickHouse/pull/70442) ([Pavel Kruglov](https://github.com/Avogar)). -- Add function `toUnixTimestamp64Second` which converts a `DateTime64` to a `Int64` value with fixed second precision, so we can support return negative value if date is before 00:00:00 UTC on Thursday, 1 January 1970. [#70597](https://github.com/ClickHouse/ClickHouse/pull/70597) ([zhanglistar](https://github.com/zhanglistar)). -- Add new setting `enforce_index_structure_match_on_partition_manipulation` to allow attach when source table's projections and secondary indices is a subset of those in the target table. Close [#70602](https://github.com/ClickHouse/ClickHouse/issues/70602). [#70603](https://github.com/ClickHouse/ClickHouse/pull/70603) ([zwy991114](https://github.com/zwy991114)). -- The output of function `cast` differs with Apache Spark which cause difference in gluten project, see https://github.com/apache/incubator-gluten/issues/7602 This PR adds Spark text output format support feature, default closed. [#70957](https://github.com/ClickHouse/ClickHouse/pull/70957) ([zhanglistar](https://github.com/zhanglistar)). -- Added a new header type for S3 endpoints for user authentication (`access_header`). This allows to get some access header with the lowest priority, which will be overwritten with `access_key_id` from any other source (for example, a table schema or a named collection). [#71011](https://github.com/ClickHouse/ClickHouse/pull/71011) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -- Initial implementation of settings tiers. [#71145](https://github.com/ClickHouse/ClickHouse/pull/71145) ([Raúl Marín](https://github.com/Algunenano)). -- Add support for staleness clause in order by with fill operator. [#71151](https://github.com/ClickHouse/ClickHouse/pull/71151) ([Mikhail Artemenko](https://github.com/Michicosun)). -- Implement simple CAST from Map/Tuple/Object to new JSON through serialization/deserialization from JSON string. [#71320](https://github.com/ClickHouse/ClickHouse/pull/71320) ([Pavel Kruglov](https://github.com/Avogar)). -- Added aliases `anyRespectNulls`, `firstValueRespectNulls`, and `anyValueRespectNulls` for aggregation function `any`. Also added aliases `anyLastRespectNulls` and `lastValueRespectNulls` for aggregation function `anyLast`. This allows using more natural camel-case-only syntax rather than mixed camel-case/underscore syntax, for example: `SELECT anyLastRespectNullsStateIf` instead of `anyLast_respect_nullsStateIf`. [#71403](https://github.com/ClickHouse/ClickHouse/pull/71403) ([Peter Nguyen](https://github.com/petern48)). -- Added the configuration `date_time_utc` parameter, enabling JSON log formatting to support UTC date-time in RFC 3339/ISO8601 format. [#71560](https://github.com/ClickHouse/ClickHouse/pull/71560) ([Ali](https://github.com/xogoodnow)). -- Added an option to select the side of the join that will act as the inner (build) table in the query plan. This is controlled by `query_plan_join_swap_table`, which can be set to `auto`. In this mode, ClickHouse will try to choose the table with the smallest number of rows. [#71577](https://github.com/ClickHouse/ClickHouse/pull/71577) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Optimized memory usage for values of index granularity if granularity is constant for part. Added an ability to always select constant granularity for part (setting `use_const_adaptive_granularity`), which helps to ensure that it is always optimized in memory. It helps in large workloads (trillions of rows in shared storage) to avoid constantly growing memory usage by metadata (values of index granularity) of data parts. [#71786](https://github.com/ClickHouse/ClickHouse/pull/71786) ([Anton Popov](https://github.com/CurtizJ)). -- Implement `allowed_feature_tier` as a global switch to disable all experimental / beta features. [#71841](https://github.com/ClickHouse/ClickHouse/pull/71841) ([Raúl Marín](https://github.com/Algunenano)). -- Add `iceberg[S3;HDFS;Azure]Cluster`, `deltaLakeCluster`, `hudiCluster` table functions. [#72045](https://github.com/ClickHouse/ClickHouse/pull/72045) ([Mikhail Artemenko](https://github.com/Michicosun)). -- Add syntax `ALTER USER {ADD|MODIFY|DROP SETTING}`, `ALTER USER {ADD|DROP PROFILE}`, the same for `ALTER ROLE` and `ALTER PROFILE`. [#72050](https://github.com/ClickHouse/ClickHouse/pull/72050) ([pufit](https://github.com/pufit)). -- Added `arrayPrAUC` function, which calculates the AUC (Area Under the Curve) for the Precision Recall curve. [#72073](https://github.com/ClickHouse/ClickHouse/pull/72073) ([Emmanuel](https://github.com/emmanuelsdias)). -- Added cache for primary index of `MergeTree` tables (can be enabled by table setting `use_primary_key_cache`). If lazy load and cache are enabled for primary index, it will be loaded to cache on demand (similar to mark cache) instead of keeping it in memory forever. Added prewarm of primary index on inserts/mergs/fetches of data parts and on restarts of table (can be enabled by setting `prewarm_primary_key_cache`). [#72102](https://github.com/ClickHouse/ClickHouse/pull/72102) ([Anton Popov](https://github.com/CurtizJ)). -- Add indexOfAssumeSorted function for array types. Optimizes the search in the case of a sorted in non-decreasing order array. [#72517](https://github.com/ClickHouse/ClickHouse/pull/72517) ([Eric Kurbanov](https://github.com/erickurbanov)). -- Allows to use a delimiter as a optional second argument for aggregate function `groupConcat`. [#72540](https://github.com/ClickHouse/ClickHouse/pull/72540) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- A new setting, `http_response_headers` which allows you to customize the HTTP response headers. For example, you can tell the browser to render a picture that is stored in the database. This closes [#59620](https://github.com/ClickHouse/ClickHouse/issues/59620). [#72656](https://github.com/ClickHouse/ClickHouse/pull/72656) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Add function `fromUnixTimestamp64Second` which converts a Int64 Unix timestamp value to a DateTime64. [#73146](https://github.com/ClickHouse/ClickHouse/pull/73146) ([Robert Schulze](https://github.com/rschu1ze)). - -## Performance Improvements {#performance-improvements} - -- Add 2 new settings `short_circuit_function_evaluation_for_nulls` and `short_circuit_function_evaluation_for_nulls_threshold` that allow to execute functions over `Nullable` columns in short-circuit manner when the ratio of NULL values in the block of data exceeds the specified threshold. It means that the function will be executed only on rows with non-null values. It applies only to functions that return NULL value for rows where at least one argument is NULL. [#60129](https://github.com/ClickHouse/ClickHouse/pull/60129) ([李扬](https://github.com/taiyang-li)). -- Memory usage of `clickhouse disks remove --recursive` is reduced for object storage disks. [#67323](https://github.com/ClickHouse/ClickHouse/pull/67323) ([Kirill](https://github.com/kirillgarbar)). -- Now we won't copy input blocks columns for `join_algorithm='parallel_hash'` when distribute them between threads for parallel processing. [#67782](https://github.com/ClickHouse/ClickHouse/pull/67782) ([Nikita Taranov](https://github.com/nickitat)). -- Enable JIT compilation for more expressions: `abs`/`bitCount`/`sign`/`modulo`/`pmod`/`isNull`/`isNotNull`/`assumeNotNull`/`to(U)Int*`/`toFloat*`, comparison functions(`=`, `<`, `>`, `>=`, `<=`), logical functions(`and`, `or`). [#70598](https://github.com/ClickHouse/ClickHouse/pull/70598) ([李扬](https://github.com/taiyang-li)). -- Now `parallel_hash` algorithm will be used (if applicable) when `join_algorithm` setting is set to `default`. Two previous alternatives (`direct` and `hash`) are still considered when `parallel_hash` cannot be used. [#70788](https://github.com/ClickHouse/ClickHouse/pull/70788) ([Nikita Taranov](https://github.com/nickitat)). -- Optimized `Replacing` merge algorithm for non intersecting parts. [#70977](https://github.com/ClickHouse/ClickHouse/pull/70977) ([Anton Popov](https://github.com/CurtizJ)). -- Do not list detached parts from readonly and write-once disks for metrics and system.detached_parts. [#71086](https://github.com/ClickHouse/ClickHouse/pull/71086) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Do not calculate heavy asynchronous metrics by default. The feature was introduced in [#40332](https://github.com/ClickHouse/ClickHouse/issues/40332), but it isn't good to have a heavy background job that is needed for only a single customer. [#71087](https://github.com/ClickHouse/ClickHouse/pull/71087) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Improve the performance and accuracy of system.query_metric_log collection interval by reducing the critical region. [#71473](https://github.com/ClickHouse/ClickHouse/pull/71473) ([Pablo Marcos](https://github.com/pamarcos)). -- Add option to extract common expressions from `WHERE` and `ON` expressions in order to reduce the number of hash tables used during joins. Can be enabled by `optimize_extract_common_expressions = 1`. [#71537](https://github.com/ClickHouse/ClickHouse/pull/71537) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -- Allows to use indexes on `SELECT` with `LowCardinality(String)`. [#71598](https://github.com/ClickHouse/ClickHouse/pull/71598) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- During query execution with parallel replicas and enabled local plan, skip index analysis on workers. The coordinator will choose ranges to read for workers based on index analysis on its side (on query initiator). [#72109](https://github.com/ClickHouse/ClickHouse/pull/72109) ([Igor Nikonov](https://github.com/devcrafter)). -- Bring back optimization for reading subcolumns of single column in Compact parts from https://github.com/ClickHouse/ClickHouse/pull/57631. It was deleted accidentally. [#72285](https://github.com/ClickHouse/ClickHouse/pull/72285) ([Pavel Kruglov](https://github.com/Avogar)). -- Speedup sorting of `LowCardinality(String)` columns by de-virtualizing calls in comparator. [#72337](https://github.com/ClickHouse/ClickHouse/pull/72337) ([Alexander Gololobov](https://github.com/davenger)). -- Optimize function argMin/Max for some simple data types. [#72350](https://github.com/ClickHouse/ClickHouse/pull/72350) ([alesapin](https://github.com/alesapin)). -- Optimize locking with shared locks in the memory tracker to reduce lock contention. [#72375](https://github.com/ClickHouse/ClickHouse/pull/72375) ([Jiebin Sun](https://github.com/jiebinn)). -- Add a new setting, `use_async_executor_for_materialized_views`. Use async and potentially multithreaded execution of materialized view query, can speedup views processing during INSERT, but also consume more memory. [#72497](https://github.com/ClickHouse/ClickHouse/pull/72497) ([alesapin](https://github.com/alesapin)). -- Default values for settings `max_size_to_preallocate_for_aggregation`, `max_size_to_preallocate_for_joins` were further increased to `10^12`, so the optimisation will be applied in more cases. [#72555](https://github.com/ClickHouse/ClickHouse/pull/72555) ([Nikita Taranov](https://github.com/nickitat)). -- Improved performance of deserialization of states of aggregate functions (in data type `AggregateFunction` and in distributed queries). Slightly improved performance of parsing of format `RowBinary`. [#72818](https://github.com/ClickHouse/ClickHouse/pull/72818) ([Anton Popov](https://github.com/CurtizJ)). - -## Improvement {#improvement} - -- Higher-order functions with constant arrays and constant captured arguments will return constants. [#58400](https://github.com/ClickHouse/ClickHouse/pull/58400) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Read-in-order optimization via generating virtual rows, so less data would be read during merge sort especially useful when multiple parts exist. [#62125](https://github.com/ClickHouse/ClickHouse/pull/62125) ([Shichao Jin](https://github.com/jsc0218)). -- Query plan step names (`EXPLAIN PLAN json=1`) and pipeline processor names (`EXPLAIN PIPELINE compact=0,graph=1`) now have a unique id as a suffix. This allows to match processors profiler output and OpenTelemetry traces with explain output. [#63518](https://github.com/ClickHouse/ClickHouse/pull/63518) ([qhsong](https://github.com/qhsong)). -- Added option to check object exists after writing to Azure Blob Storage, this is controlled by setting `check_objects_after_upload`. [#64847](https://github.com/ClickHouse/ClickHouse/pull/64847) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -- Fix use-after-dtor logic in HashTable destroyElements. [#65279](https://github.com/ClickHouse/ClickHouse/pull/65279) ([cangyin](https://github.com/cangyin)). -- Use `Atomic` database by default in `clickhouse-local`. Address items 1 and 5 from [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). Closes [#44817](https://github.com/ClickHouse/ClickHouse/issues/44817). [#68024](https://github.com/ClickHouse/ClickHouse/pull/68024) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Write buffer has to be canceled or finalized explicitly. Exceptions break the HTTP protocol in order to alert the client about error. [#68800](https://github.com/ClickHouse/ClickHouse/pull/68800) ([Sema Checherinda](https://github.com/CheSema)). -- Report running DDLWorker hosts by creating replica_dir and mark replicas active in DDLWorker. [#69658](https://github.com/ClickHouse/ClickHouse/pull/69658) ([Tuan Pham Anh](https://github.com/tuanpach)). -- 1. Refactor `DDLQueryStatusSource`: - Rename `DDLQueryStatusSource` to `DistributedQueryStatusSource`, and make it a base class - Create two subclasses `DDLOnClusterQueryStatusSource` and `ReplicatedDatabaseQueryStatusSource` derived from `DDLQueryStatusSource` to query the status of DDL tasks from `DDL On Cluster and Replicated databases respectively. 2. Support stop waiting for offline hosts in `DDLOnClusterQueryStatusSource`. [#69660](https://github.com/ClickHouse/ClickHouse/pull/69660) ([Tuan Pham Anh](https://github.com/tuanpach)). -- Adding a new cancellation logic: `CancellationChecker` checks timeouts for every started query and stops them once the timeout has reached. [#69880](https://github.com/ClickHouse/ClickHouse/pull/69880) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- Remove the `allow_experimental_join_condition` setting, allowing non-equi conditions by default. [#69910](https://github.com/ClickHouse/ClickHouse/pull/69910) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Enable `parallel_replicas_local_plan` by default. Building a full-fledged local plan on the query initiator improves parallel replicas performance with less resource consumption, provides opportunities to apply more query optimizations. [#70171](https://github.com/ClickHouse/ClickHouse/pull/70171) ([Igor Nikonov](https://github.com/devcrafter)). -- Add ability to set user/password in http_handlers (for `dynamic_query_handler`/`predefined_query_handler`). [#70725](https://github.com/ClickHouse/ClickHouse/pull/70725) ([Azat Khuzhin](https://github.com/azat)). -- Support `ALTER TABLE ... MODIFY/RESET SETTING ...` for certain settings in storage S3Queue. [#70811](https://github.com/ClickHouse/ClickHouse/pull/70811) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)). -- Add `--threads` parameter to `clickhouse-compressor`, which allows to compress data in parallel. [#70860](https://github.com/ClickHouse/ClickHouse/pull/70860) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Make the Replxx client history size configurable. [#71014](https://github.com/ClickHouse/ClickHouse/pull/71014) ([Jiří Kozlovský](https://github.com/jirislav)). -- Added a setting `prewarm_mark_cache` which enables loading of marks to mark cache on inserts, merges, fetches of parts and on startup of the table. [#71053](https://github.com/ClickHouse/ClickHouse/pull/71053) ([Anton Popov](https://github.com/CurtizJ)). -- Boolean support for parquet native reader. [#71055](https://github.com/ClickHouse/ClickHouse/pull/71055) ([Arthur Passos](https://github.com/arthurpassos)). -- Retry more errors when interacting with S3, such as "Malformed message". [#71088](https://github.com/ClickHouse/ClickHouse/pull/71088) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Lower log level for some messages about S3. [#71090](https://github.com/ClickHouse/ClickHouse/pull/71090) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Support write hdfs files with space. [#71105](https://github.com/ClickHouse/ClickHouse/pull/71105) ([exmy](https://github.com/exmy)). -- `system.session_log` is quite okay. This closes [#51760](https://github.com/ClickHouse/ClickHouse/issues/51760). [#71150](https://github.com/ClickHouse/ClickHouse/pull/71150) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Fixes RIGHT / FULL joins in queries with parallel replicas. Now, RIGHT joins can be executed with parallel replicas (right table reading is distributed). FULL joins can't be parallelized among nodes, - executed locally. [#71162](https://github.com/ClickHouse/ClickHouse/pull/71162) ([Igor Nikonov](https://github.com/devcrafter)). -- Added settings limiting the number of replicated tables, dictionaries and views. [#71179](https://github.com/ClickHouse/ClickHouse/pull/71179) ([Kirill](https://github.com/kirillgarbar)). -- Fixes [#71227](https://github.com/ClickHouse/ClickHouse/issues/71227). [#71286](https://github.com/ClickHouse/ClickHouse/pull/71286) ([Arthur Passos](https://github.com/arthurpassos)). -- Automatic `GROUP BY`/`ORDER BY` to disk based on the server/user memory usage. Controlled with `max_bytes_ratio_before_external_group_by`/`max_bytes_ratio_before_external_sort` query settings. [#71406](https://github.com/ClickHouse/ClickHouse/pull/71406) ([Azat Khuzhin](https://github.com/azat)). -- Add per host dashboards `Overview (host)` and `Cloud overview (host)` to advanced dashboard. [#71422](https://github.com/ClickHouse/ClickHouse/pull/71422) ([alesapin](https://github.com/alesapin)). -- Function `translate` now supports character deletion if the `from` argument contains more characters than the `to` argument. Example: `SELECT translate('clickhouse', 'clickhouse', 'CLICK')` now returns `CLICK`. [#71441](https://github.com/ClickHouse/ClickHouse/pull/71441) ([shuai.xu](https://github.com/shuai-xu)). -- Added new functions `parseDateTime64`, `parseDateTime64OrNull` and `parseDateTime64OrZero`. Compared to the existing function `parseDateTime` (and variants), they return a value of type `DateTime64` instead of `DateTime`. [#71581](https://github.com/ClickHouse/ClickHouse/pull/71581) ([kevinyhzou](https://github.com/KevinyhZou)). -- Shrink to fit index_granularity array in memory to reduce memory footprint for MergeTree table engines family. [#71595](https://github.com/ClickHouse/ClickHouse/pull/71595) ([alesapin](https://github.com/alesapin)). -- The command line applications will highlight syntax even for multi-statements. [#71622](https://github.com/ClickHouse/ClickHouse/pull/71622) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Command-line applications will return non-zero exit codes on errors. In previous versions, the `disks` application returned zero on errors, and other applications returned zero for errors 256 (`PARTITION_ALREADY_EXISTS`) and 512 (`SET_NON_GRANTED_ROLE`). [#71623](https://github.com/ClickHouse/ClickHouse/pull/71623) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- The `Vertical` format (which is also activated when you end your query with `\G`) gets the features of Pretty formats, such as: - highlighting thousand groups in numbers; - printing a readable number tip. [#71630](https://github.com/ClickHouse/ClickHouse/pull/71630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Allow to disable memory buffer increase for filesystem cache via setting `filesystem_cache_prefer_bigger_buffer_size`. [#71640](https://github.com/ClickHouse/ClickHouse/pull/71640) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Add a separate setting `background_download_max_file_segment_size` for background download max file segment size in filesystem cache. [#71648](https://github.com/ClickHouse/ClickHouse/pull/71648) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Changes the default value of `enable_http_compression` from 0 to 1. Closes [#71591](https://github.com/ClickHouse/ClickHouse/issues/71591). [#71774](https://github.com/ClickHouse/ClickHouse/pull/71774) ([Peter Nguyen](https://github.com/petern48)). -- Support ALTER from Object to JSON. [#71784](https://github.com/ClickHouse/ClickHouse/pull/71784) ([Pavel Kruglov](https://github.com/Avogar)). -- Slightly better JSON type parsing: if current block for the JSON path contains values of several types, try to choose the best type by trying types in special best-effort order. [#71785](https://github.com/ClickHouse/ClickHouse/pull/71785) ([Pavel Kruglov](https://github.com/Avogar)). -- Previously reading from `system.asynchronous_metrics` would wait for concurrent update to finish. This can take long time if system is under heavy load. With this change the previously collected values can always be read. [#71798](https://github.com/ClickHouse/ClickHouse/pull/71798) ([Alexander Gololobov](https://github.com/davenger)). -- Set `polling_max_timeout_ms` to 10 minutes, `polling_backoff_ms` to 30 seconds. [#71817](https://github.com/ClickHouse/ClickHouse/pull/71817) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Queries like 'SELECT - FROM t LIMIT 1' used to load part indexes even though they were not used. [#71866](https://github.com/ClickHouse/ClickHouse/pull/71866) ([Alexander Gololobov](https://github.com/davenger)). -- Allow_reorder_prewhere_conditions is on by default with old compatibility settings. [#71867](https://github.com/ClickHouse/ClickHouse/pull/71867) ([Raúl Marín](https://github.com/Algunenano)). -- Do not increment the `ILLEGAL_TYPE_OF_ARGUMENT` counter in the `system.errors` table when the `bitmapTransform` function is used, and argument types are valid. [#71971](https://github.com/ClickHouse/ClickHouse/pull/71971) ([Dmitry Novik](https://github.com/novikd)). -- When retrieving data directly from a dictionary using Dictionary storage, dictionary table function, or direct SELECT from the dictionary itself, it is now enough to have `SELECT` permission or `dictGet` permission for the dictionary. This aligns with previous attempts to prevent ACL bypasses: https://github.com/ClickHouse/ClickHouse/pull/57362 and https://github.com/ClickHouse/ClickHouse/pull/65359. It also makes the latter one backward compatible. [#72051](https://github.com/ClickHouse/ClickHouse/pull/72051) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -- On the advanced dashboard HTML page added a dropdown selector for the dashboard from `system.dashboards` table. [#72081](https://github.com/ClickHouse/ClickHouse/pull/72081) ([Sergei Trifonov](https://github.com/serxa)). -- Respect `prefer_locahost_replica` when building plan for distributed `INSERT ... SELECT`. [#72190](https://github.com/ClickHouse/ClickHouse/pull/72190) ([filimonov](https://github.com/filimonov)). -- The problem is [described here](https://github.com/ClickHouse/ClickHouse/issues/72091). Azure Iceberg Writer creates Iceberg metadata files (as well as manifest files) that violate specs. In this PR I added an attempt to read v1 iceberg format metadata with v2 reader (cause they write it in a this way), and added error when they didn't create corresponding fields in a manifest file. [#72277](https://github.com/ClickHouse/ClickHouse/pull/72277) ([Daniil Ivanik](https://github.com/divanik)). -- Move JSON/Dynamic/Variant types from experimental features to beta. [#72294](https://github.com/ClickHouse/ClickHouse/pull/72294) ([Pavel Kruglov](https://github.com/Avogar)). -- Now it's allowed to `CREATE MATERIALIZED VIEW` with `UNION [ALL]` in query. Behavior is the same as for matview with `JOIN`: **only first table in `SELECT` expression will work as trigger for insert*- , all other tables will be ignored. [#72347](https://github.com/ClickHouse/ClickHouse/pull/72347) ([alesapin](https://github.com/alesapin)). -- Speed up insertions into merge tree in case of a single value of partition key inside inserted batch. [#72348](https://github.com/ClickHouse/ClickHouse/pull/72348) ([alesapin](https://github.com/alesapin)). -- Add the new MergeTreeIndexGranularityInternalArraysTotalSize metric to system.metrics. This metric is needed to find the instances with huge datasets susceptible to the high memory usage issue. [#72490](https://github.com/ClickHouse/ClickHouse/pull/72490) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). -- All spellings of word `Null` now recognised when query uses `Format Null`. Previously other forms (e.g. `NULL`) did not result in exceptions being thrown, but at the same time format `Null` wasn't actually used in those cases. [#72658](https://github.com/ClickHouse/ClickHouse/pull/72658) ([Nikita Taranov](https://github.com/nickitat)). -- Allow unknown values in set that are not present in Enum. Fix [#72662](https://github.com/ClickHouse/ClickHouse/issues/72662). [#72686](https://github.com/ClickHouse/ClickHouse/pull/72686) ([zhanglistar](https://github.com/zhanglistar)). -- Add total_bytes_with_inactive to system.tables to count the total bytes of inactive parts. [#72690](https://github.com/ClickHouse/ClickHouse/pull/72690) ([Kai Zhu](https://github.com/nauu)). -- Add MergeTreeSettings to system.settings_changes. [#72694](https://github.com/ClickHouse/ClickHouse/pull/72694) ([Raúl Marín](https://github.com/Algunenano)). -- Support string search operator(eg. like) for Enum data type, fix [#72661](https://github.com/ClickHouse/ClickHouse/issues/72661). [#72732](https://github.com/ClickHouse/ClickHouse/pull/72732) ([zhanglistar](https://github.com/zhanglistar)). -- Support JSON type in notEmpty function. [#72741](https://github.com/ClickHouse/ClickHouse/pull/72741) ([Pavel Kruglov](https://github.com/Avogar)). -- Support parsing GCS S3 error `AuthenticationRequired`. [#72753](https://github.com/ClickHouse/ClickHouse/pull/72753) ([Vitaly Baranov](https://github.com/vitlibar)). -- Support Dynamic type in functions ifNull and coalesce. [#72772](https://github.com/ClickHouse/ClickHouse/pull/72772) ([Pavel Kruglov](https://github.com/Avogar)). -- Added `JoinBuildTableRowCount/JoinProbeTableRowCount/JoinResultRowCount` profile events. [#72842](https://github.com/ClickHouse/ClickHouse/pull/72842) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Support Dynamic in functions toFloat64/touInt32/etc. [#72989](https://github.com/ClickHouse/ClickHouse/pull/72989) ([Pavel Kruglov](https://github.com/Avogar)). - -## Bug Fix (user-visible misbehavior in an official stable release) {#bug-fix} - -- The parts deduplicated during `ATTACH PART` query don't get stuck with the `attaching_` prefix anymore. [#65636](https://github.com/ClickHouse/ClickHouse/pull/65636) ([Kirill](https://github.com/kirillgarbar)). -- Fix for the bug when dateTime64 losing precision for the `IN` function. [#67230](https://github.com/ClickHouse/ClickHouse/pull/67230) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- Fix possible logical error when using functions with `IGNORE/RESPECT NULLS` in `ORDER BY ... WITH FILL`, close [#57609](https://github.com/ClickHouse/ClickHouse/issues/57609). [#68234](https://github.com/ClickHouse/ClickHouse/pull/68234) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Fixed rare logical errors in asynchronous inserts with format `Native` in case of reached memory limit. [#68965](https://github.com/ClickHouse/ClickHouse/pull/68965) ([Anton Popov](https://github.com/CurtizJ)). -- Fix COMMENT in CREATE TABLE for EPHEMERAL column. [#70458](https://github.com/ClickHouse/ClickHouse/pull/70458) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -- Fix logical error in JSONExtract with LowCardinality(Nullable). [#70549](https://github.com/ClickHouse/ClickHouse/pull/70549) ([Pavel Kruglov](https://github.com/Avogar)). -- Fixes behaviour when table name is too long. [#70810](https://github.com/ClickHouse/ClickHouse/pull/70810) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- Add ability to override Content-Type by user headers in the URL engine. [#70859](https://github.com/ClickHouse/ClickHouse/pull/70859) ([Artem Iurin](https://github.com/ortyomka)). -- Fix logical error in `StorageS3Queue` "Cannot create a persistent node in /processed since it already exists". [#70984](https://github.com/ClickHouse/ClickHouse/pull/70984) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Fix the bug that didn't consider _row_exists column in rebuild option of projection lightweight delete. [#71089](https://github.com/ClickHouse/ClickHouse/pull/71089) ([Shichao Jin](https://github.com/jsc0218)). -- Fix wrong value in system.query_metric_log due to unexpected race condition. [#71124](https://github.com/ClickHouse/ClickHouse/pull/71124) ([Pablo Marcos](https://github.com/pamarcos)). -- Fix mismatched aggreage function name of quantileExactWeightedInterpolated. The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/69619. cc @Algunenano. [#71168](https://github.com/ClickHouse/ClickHouse/pull/71168) ([李扬](https://github.com/taiyang-li)). -- Fix bad_weak_ptr exception with Dynamic in functions comparison. [#71183](https://github.com/ClickHouse/ClickHouse/pull/71183) ([Pavel Kruglov](https://github.com/Avogar)). -- Don't delete a blob when there are nodes using it in ReplicatedMergeTree with zero-copy replication. [#71186](https://github.com/ClickHouse/ClickHouse/pull/71186) ([Antonio Andelic](https://github.com/antonio2368)). -- Fix ignoring format settings in Native format via HTTP and Async Inserts. [#71193](https://github.com/ClickHouse/ClickHouse/pull/71193) ([Pavel Kruglov](https://github.com/Avogar)). -- SELECT queries run with setting `use_query_cache = 1` are no longer rejected if the name of a system table appears as a literal, e.g. `SELECT - FROM users WHERE name = 'system.metrics' SETTINGS use_query_cache = true;` now works. [#71254](https://github.com/ClickHouse/ClickHouse/pull/71254) ([Robert Schulze](https://github.com/rschu1ze)). -- Fix bug of memory usage increase if enable_filesystem_cache=1, but disk in storage configuration did not have any cache configuration. [#71261](https://github.com/ClickHouse/ClickHouse/pull/71261) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Fix possible error "Cannot read all data" erros during deserialization of LowCardinality dictionary from Dynamic column. [#71299](https://github.com/ClickHouse/ClickHouse/pull/71299) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix incomplete cleanup of parallel output format in the client. [#71304](https://github.com/ClickHouse/ClickHouse/pull/71304) ([Raúl Marín](https://github.com/Algunenano)). -- Added missing unescaping in named collections. Without fix clickhouse-server can't start. [#71308](https://github.com/ClickHouse/ClickHouse/pull/71308) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -- Fix async inserts with empty blocks via native protocol. [#71312](https://github.com/ClickHouse/ClickHouse/pull/71312) ([Anton Popov](https://github.com/CurtizJ)). -- Fix inconsistent AST formatting when granting wrong wildcard grants [#71309](https://github.com/ClickHouse/ClickHouse/issues/71309). [#71332](https://github.com/ClickHouse/ClickHouse/pull/71332) ([pufit](https://github.com/pufit)). -- Check suspicious and experimental types in JSON type hints. [#71369](https://github.com/ClickHouse/ClickHouse/pull/71369) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix crash in `mongodb` table function when passing wrong arguments (e.g. `NULL`). [#71426](https://github.com/ClickHouse/ClickHouse/pull/71426) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)). -- Fix NoSuchKey error during transaction rollback when creating a directory fails for the palin_rewritable disk. [#71439](https://github.com/ClickHouse/ClickHouse/pull/71439) ([Julia Kartseva](https://github.com/jkartseva)). -- Fixed the usage of setting `max_insert_delayed_streams_for_parallel_write` in inserts. Previously it worked incorrectly which could lead to high memory usage in inserts which write data into several partitions. [#71474](https://github.com/ClickHouse/ClickHouse/pull/71474) ([Anton Popov](https://github.com/CurtizJ)). -- Fix possible error `Argument for function must be constant` (old analyzer) in case when arrayJoin can apparently appear in `WHERE` condition. Regression after https://github.com/ClickHouse/ClickHouse/pull/65414. [#71476](https://github.com/ClickHouse/ClickHouse/pull/71476) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -- Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)). -- Fix date32 out of range caused by uninitialized orc data. For more details, refer to https://github.com/apache/incubator-gluten/issues/7823. [#71500](https://github.com/ClickHouse/ClickHouse/pull/71500) ([李扬](https://github.com/taiyang-li)). -- Fix counting column size in wide part for Dynamic and JSON types. [#71526](https://github.com/ClickHouse/ClickHouse/pull/71526) ([Pavel Kruglov](https://github.com/Avogar)). -- Analyzer fix when query inside materialized view uses IN with CTE. Closes [#65598](https://github.com/ClickHouse/ClickHouse/issues/65598). [#71538](https://github.com/ClickHouse/ClickHouse/pull/71538) ([Maksim Kita](https://github.com/kitaisreal)). -- Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)). -- Fix server crashes while using materialized view with certain engines. [#71593](https://github.com/ClickHouse/ClickHouse/pull/71593) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -- Array join with a nested data structure, which contains an alias to a constant array was leading to a null pointer dereference. This closes [#71677](https://github.com/ClickHouse/ClickHouse/issues/71677). [#71678](https://github.com/ClickHouse/ClickHouse/pull/71678) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Fix LOGICAL_ERROR when doing ALTER with empty tuple. This fixes [#71647](https://github.com/ClickHouse/ClickHouse/issues/71647). [#71679](https://github.com/ClickHouse/ClickHouse/pull/71679) ([Amos Bird](https://github.com/amosbird)). -- Don't transform constant set in predicates over partition columns in case of NOT IN operator. [#71695](https://github.com/ClickHouse/ClickHouse/pull/71695) ([Eduard Karacharov](https://github.com/korowa)). -- Fix CAST from LowCardinality(Nullable) to Dynamic. Previously it could lead to error `Bad cast from type DB::ColumnVector to DB::ColumnNullable`. [#71742](https://github.com/ClickHouse/ClickHouse/pull/71742) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix exception for toDayOfWeek on WHERE condition with primary key of DateTime64 type. [#71849](https://github.com/ClickHouse/ClickHouse/pull/71849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -- Fixed filling of defaults after parsing into sparse columns. [#71854](https://github.com/ClickHouse/ClickHouse/pull/71854) ([Anton Popov](https://github.com/CurtizJ)). -- Fix GROUPING function error when input is ALIAS on distributed table, close [#68602](https://github.com/ClickHouse/ClickHouse/issues/68602). [#71855](https://github.com/ClickHouse/ClickHouse/pull/71855) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Fixed select statements that use `WITH TIES` clause which might not return enough rows. [#71886](https://github.com/ClickHouse/ClickHouse/pull/71886) ([wxybear](https://github.com/wxybear)). -- Fix an exception of TOO_LARGE_ARRAY_SIZE caused when a column of arrayWithConstant evaluation is mistaken to cross the array size limit. [#71894](https://github.com/ClickHouse/ClickHouse/pull/71894) ([Udi](https://github.com/udiz)). -- `clickhouse-benchmark` reported wrong metrics for queries taking longer than one second. [#71898](https://github.com/ClickHouse/ClickHouse/pull/71898) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -- Fix data race between the progress indicator and the progress table in clickhouse-client. This issue is visible when FROM INFILE is used. Intercept keystrokes during INSERT queries to toggle progress table display. [#71901](https://github.com/ClickHouse/ClickHouse/pull/71901) ([Julia Kartseva](https://github.com/jkartseva)). -- Fix serialization of Dynamic values in Pretty JSON formats. [#71923](https://github.com/ClickHouse/ClickHouse/pull/71923) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix rows_processed column in system.s3/azure_queue_log broken in 24.6. Closes [#69975](https://github.com/ClickHouse/ClickHouse/issues/69975). [#71946](https://github.com/ClickHouse/ClickHouse/pull/71946) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Fixed case when `s3`/`s3Cluster` functions could return incomplete result or throw an exception. It involved using glob pattern in s3 uri (like `pattern/*`) and an empty object should exist with the key `pattern/` (such objects automatically created by S3 Console). Also default value for setting `s3_skip_empty_files` changed from `false` to `true` by default. [#71947](https://github.com/ClickHouse/ClickHouse/pull/71947) ([Nikita Taranov](https://github.com/nickitat)). -- Fix a crash in clickhouse-client syntax highlighting. Closes [#71864](https://github.com/ClickHouse/ClickHouse/issues/71864). [#71949](https://github.com/ClickHouse/ClickHouse/pull/71949) ([Nikolay Degterinsky](https://github.com/evillique)). -- Fix `Illegal type` error for `MergeTree` tables with binary monotonic function in `ORDER BY` when the first argument is constant. Fixes [#71941](https://github.com/ClickHouse/ClickHouse/issues/71941). [#71966](https://github.com/ClickHouse/ClickHouse/pull/71966) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -- Allow only SELECT queries in EXPLAIN AST used inside subquery. Other types of queries lead to logical error: 'Bad cast from type DB::ASTCreateQuery to DB::ASTSelectWithUnionQuery' or `Inconsistent AST formatting`. [#71982](https://github.com/ClickHouse/ClickHouse/pull/71982) ([Pavel Kruglov](https://github.com/Avogar)). -- When insert a record by `clickhouse-client`, client will read column descriptions from server. but there was a bug that we wrote the descritions with a wrong order , it should be [statistics, ttl, settings]. [#71991](https://github.com/ClickHouse/ClickHouse/pull/71991) ([Han Fei](https://github.com/hanfei1991)). -- Fix formatting of `MOVE PARTITION ... TO TABLE ...` alter commands when `format_alter_commands_with_parentheses` is enabled. [#72080](https://github.com/ClickHouse/ClickHouse/pull/72080) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -- Add inferred format name to create query in File/S3/URL/HDFS/Azure engines. Previously the format name was inferred each time the server was restarted, and if the specified data files were removed, it led to errors during server startup. [#72108](https://github.com/ClickHouse/ClickHouse/pull/72108) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix a bug where `min_age_to_force_merge_on_partition_only` was getting stuck trying to merge down the same partition repeatedly that was already merged to a single part and not merging partitions that had multiple parts. [#72209](https://github.com/ClickHouse/ClickHouse/pull/72209) ([Christoph Wurm](https://github.com/cwurm)). -- Fixed a crash in `SimpleSquashingChunksTransform` that occurred in rare cases when processing sparse columns. [#72226](https://github.com/ClickHouse/ClickHouse/pull/72226) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Fixed data race in `GraceHashJoin` as the result of which some rows might be missing in the join output. [#72233](https://github.com/ClickHouse/ClickHouse/pull/72233) ([Nikita Taranov](https://github.com/nickitat)). -- Fixed `ALTER DELETE` queries with materialized `_block_number` column (if setting `enable_block_number_column` is enabled). [#72261](https://github.com/ClickHouse/ClickHouse/pull/72261) ([Anton Popov](https://github.com/CurtizJ)). -- Fixed data race when `ColumnDynamic::dumpStructure()` is called concurrently e.g. in `ConcurrentHashJoin` constructor. [#72278](https://github.com/ClickHouse/ClickHouse/pull/72278) ([Nikita Taranov](https://github.com/nickitat)). -- Fix possible `LOGICAL_ERROR` with duplicate columns in `ORDER BY ... WITH FILL`. [#72387](https://github.com/ClickHouse/ClickHouse/pull/72387) ([Vladimir Cherkasov](https://github.com/vdimir)). -- Fixed mismatched types in several cases after applying `optimize_functions_to_subcolumns`. [#72394](https://github.com/ClickHouse/ClickHouse/pull/72394) ([Anton Popov](https://github.com/CurtizJ)). -- Fix failure on parsing `BACKUP DATABASE db EXCEPT TABLES db.table` queries. [#72429](https://github.com/ClickHouse/ClickHouse/pull/72429) ([Konstantin Bogdanov](https://github.com/thevar1able)). -- Don't allow creating empty Variant. [#72454](https://github.com/ClickHouse/ClickHouse/pull/72454) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix invalid formatting of `result_part_path` in `system.merges`. [#72567](https://github.com/ClickHouse/ClickHouse/pull/72567) ([Konstantin Bogdanov](https://github.com/thevar1able)). -- Fix parsing a glob with one element. [#72572](https://github.com/ClickHouse/ClickHouse/pull/72572) ([Konstantin Bogdanov](https://github.com/thevar1able)). -- Fix query generation for the follower server in case of a distributed query with ARRAY JOIN. Fixes [#69276](https://github.com/ClickHouse/ClickHouse/issues/69276). [#72608](https://github.com/ClickHouse/ClickHouse/pull/72608) ([Dmitry Novik](https://github.com/novikd)). -- Fix a bug when DateTime64 in DateTime64 returns nothing. [#72640](https://github.com/ClickHouse/ClickHouse/pull/72640) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -- Fix "No such key" error in S3Queue Unordered mode with `tracked_files_limit` setting smaller than s3 files appearance rate. [#72738](https://github.com/ClickHouse/ClickHouse/pull/72738) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Dropping mark cache might take noticeable time if it is big. If we hold context mutex during this it block many other activities, even new client connection cannot be established until it is released. And holding this mutex is not actually required for synchronization, it is enough to have a local reference to the cache via shared ptr. [#72749](https://github.com/ClickHouse/ClickHouse/pull/72749) ([Alexander Gololobov](https://github.com/davenger)). -- PK cache was heavily underestimating it's size on one of the test instances. In particular LowCardinality columns were not including dictionary size. The fix is to use column->allocatedBytes() plus some more overhead estimates for cache entry size. [#72750](https://github.com/ClickHouse/ClickHouse/pull/72750) ([Alexander Gololobov](https://github.com/davenger)). -- Fix exception thrown in RemoteQueryExecutor when user does not exist locally. [#72759](https://github.com/ClickHouse/ClickHouse/pull/72759) ([Andrey Zvonov](https://github.com/zvonand)). -- Fixed mutations with materialized `_block_number` column (if setting `enable_block_number_column` is enabled). [#72854](https://github.com/ClickHouse/ClickHouse/pull/72854) ([Anton Popov](https://github.com/CurtizJ)). -- Fix backup/restore with plain rewritable disk in case there are empty files in backup. [#72858](https://github.com/ClickHouse/ClickHouse/pull/72858) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Properly cancel inserts in DistributedAsyncInsertDirectoryQueue. [#72885](https://github.com/ClickHouse/ClickHouse/pull/72885) ([Antonio Andelic](https://github.com/antonio2368)). -- Fixed crash while parsing of incorrect data into sparse columns (can happen with enabled setting `enable_parsing_to_custom_serialization`). [#72891](https://github.com/ClickHouse/ClickHouse/pull/72891) ([Anton Popov](https://github.com/CurtizJ)). -- Fix potential crash during backup restore. [#72947](https://github.com/ClickHouse/ClickHouse/pull/72947) ([Kseniia Sumarokova](https://github.com/kssenii)). -- Fixed bug in `parallel_hash` JOIN method that might appear when query has complex condition in the `ON` clause with inequality filters. [#72993](https://github.com/ClickHouse/ClickHouse/pull/72993) ([Nikita Taranov](https://github.com/nickitat)). -- Use default format settings during JSON parsing to avoid broken deserialization. [#73043](https://github.com/ClickHouse/ClickHouse/pull/73043) ([Pavel Kruglov](https://github.com/Avogar)). -- Fix crash in transactions with unsupported storage. [#73045](https://github.com/ClickHouse/ClickHouse/pull/73045) ([Raúl Marín](https://github.com/Algunenano)). -- Check for duplicate JSON keys during Tuple parsing. Previously it could lead to logical error `Invalid number of rows in Chunk` during parsing. [#73082](https://github.com/ClickHouse/ClickHouse/pull/73082) ([Pavel Kruglov](https://github.com/Avogar)). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-5.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-5.md deleted file mode 100644 index 256c6e4c3be..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-5.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -slug: /changelogs/24.5 -title: 'v24.5 Changelog for Cloud' -description: 'Fast release changelog for v24.5' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v24.5' ---- - -# v24.5 Changelog for Cloud - -Relevant changes for ClickHouse Cloud services based on the v24.5 release. - -## Breaking Changes {#breaking-changes} - -* Change the column name from duration_ms to duration_microseconds in the system.zookeeper table to reflect the reality that the duration is in the microsecond resolution. [#60774](https://github.com/ClickHouse/ClickHouse/pull/60774) (Duc Canh Le). - -* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense. Setting it to 0 could lead to unexpected logical errors. Closes #60140. [#61201](https://github.com/ClickHouse/ClickHouse/pull/61201) (Kruglov Pavel). - -* Remove support for INSERT WATCH query (part of the experimental LIVE VIEW feature). [#62382](https://github.com/ClickHouse/ClickHouse/pull/62382) (Alexey Milovidov). - -* Usage of functions neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference deprecated (because it is error-prone). Proper window functions should be used instead. To enable them back, set allow_deprecated_error_prone_window_functions=1. [#63132](https://github.com/ClickHouse/ClickHouse/pull/63132) (Nikita Taranov). - - -## Backward Incompatible Changes {#backward-incompatible-changes} - -* In the new ClickHouse version, the functions geoDistance, greatCircleDistance, and greatCircleAngle will use 64-bit double precision floating point data type for internal calculations and return type if all the arguments are Float64. This closes #58476. In previous versions, the function always used Float32. You can switch to the old behavior by setting geo_distance_returns_float64_on_float64_arguments to false or setting compatibility to 24.2 or earlier. [#61848](https://github.com/ClickHouse/ClickHouse/pull/61848) (Alexey Milovidov). - -* Queries from system.columns will work faster if there is a large number of columns, but many databases or tables are not granted for SHOW TABLES. Note that in previous versions, if you grant SHOW COLUMNS to individual columns without granting SHOW TABLES to the corresponding tables, the system.columns table will show these columns, but in a new version, it will skip the table entirely. Remove trace log messages "Access granted" and "Access denied" that slowed down queries. [#63439](https://github.com/ClickHouse/ClickHouse/pull/63439) (Alexey Milovidov). - -* Fix crash in largestTriangleThreeBuckets. This changes the behaviour of this function and makes it to ignore NaNs in the series provided. Thus the resultset might differ from previous versions. [#62646](https://github.com/ClickHouse/ClickHouse/pull/62646) (Raúl Marín). - -## New Features {#new-features} - -* The new analyzer is enabled by default on new services. - -* Supports dropping multiple tables at the same time like drop table a,b,c;. [#58705](https://github.com/ClickHouse/ClickHouse/pull/58705) (zhongyuankai). - -* User can now parse CRLF with TSV format using a setting input_format_tsv_crlf_end_of_line. Closes #56257. [#59747](https://github.com/ClickHouse/ClickHouse/pull/59747) (Shaun Struwig). - -* Table engine is grantable now, and it won't affect existing users behavior. [#60117](https://github.com/ClickHouse/ClickHouse/pull/60117) (jsc0218). - -* Adds the Form Format to read/write a single record in the application/x-www-form-urlencoded format. [#60199](https://github.com/ClickHouse/ClickHouse/pull/60199) (Shaun Struwig). - -* Added possibility to compress in CROSS JOIN. [#60459](https://github.com/ClickHouse/ClickHouse/pull/60459) (p1rattttt). - -* New setting input_format_force_null_for_omitted_fields that forces NULL values for omitted fields. [#60887](https://github.com/ClickHouse/ClickHouse/pull/60887) (Constantine Peresypkin). - -* Support join with inequal conditions which involve columns from both left and right table. e.g. `t1.y < t2.y`. To enable, SET allow_experimental_join_condition = 1. [#60920](https://github.com/ClickHouse/ClickHouse/pull/60920) (lgbo). - -* Add a new function, getClientHTTPHeader. This closes #54665. Co-authored with @lingtaolf. [#61820](https://github.com/ClickHouse/ClickHouse/pull/61820) (Alexey Milovidov). - -* For convenience purpose, SELECT * FROM numbers() will work in the same way as SELECT * FROM system.numbers - without a limit. [#61969](https://github.com/ClickHouse/ClickHouse/pull/61969) (YenchangChan). - -* Modifying memory table settings through ALTER MODIFY SETTING is now supported. ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100, max_rows_to_keep = 1000;. [#62039](https://github.com/ClickHouse/ClickHouse/pull/62039) (zhongyuankai). - -* Analyzer support recursive CTEs. [#62074](https://github.com/ClickHouse/ClickHouse/pull/62074) (Maksim Kita). - -* Earlier our s3 storage and s3 table function didn't support selecting from archive files. I created a solution that allows to iterate over files inside archives in S3. [#62259](https://github.com/ClickHouse/ClickHouse/pull/62259) (Daniil Ivanik). - -* Support for conditional function clamp. [#62377](https://github.com/ClickHouse/ClickHouse/pull/62377) (skyoct). - -* Add npy output format. [#62430](https://github.com/ClickHouse/ClickHouse/pull/62430) (豪肥肥). - -* Analyzer support QUALIFY clause. Closes #47819. [#62619](https://github.com/ClickHouse/ClickHouse/pull/62619) (Maksim Kita). - -* Added role query parameter to the HTTP interface. It works similarly to SET ROLE x, applying the role before the statement is executed. This allows for overcoming the limitation of the HTTP interface, as multiple statements are not allowed, and it is not possible to send both SET ROLE x and the statement itself at the same time. It is possible to set multiple roles that way, e.g., ?role=x&role=y, which will be an equivalent of SET ROLE x, y. [#62669](https://github.com/ClickHouse/ClickHouse/pull/62669) (Serge Klochkov). - -* Add SYSTEM UNLOAD PRIMARY KEY. [#62738](https://github.com/ClickHouse/ClickHouse/pull/62738) (Pablo Marcos). - -* Added SQL functions generateUUIDv7, generateUUIDv7ThreadMonotonic, generateUUIDv7NonMonotonic (with different monotonicity/performance trade-offs) to generate version 7 UUIDs aka. timestamp-based UUIDs with random component. Also added a new function UUIDToNum to extract bytes from a UUID and a new function UUIDv7ToDateTime to extract timestamp component from a UUID version 7. [#62852](https://github.com/ClickHouse/ClickHouse/pull/62852) (Alexey Petrunyaka). - -* Raw as a synonym for TSVRaw. [#63394](https://github.com/ClickHouse/ClickHouse/pull/63394) (Unalian). - -* Added possibility to do cross join in temporary file if size exceeds limits. [#63432](https://github.com/ClickHouse/ClickHouse/pull/63432) (p1rattttt). - -## Performance Improvements {#performance-improvements} - -* Skip merging of newly created projection blocks during INSERT-s. [#59405](https://github.com/ClickHouse/ClickHouse/pull/59405) (Nikita Taranov). - -* Reduce overhead of the mutations for SELECTs (v2). [#60856](https://github.com/ClickHouse/ClickHouse/pull/60856) (Azat Khuzhin). - -* JOIN filter push down improvements using equivalent sets. [#61216](https://github.com/ClickHouse/ClickHouse/pull/61216) (Maksim Kita). - -* Add a new analyzer pass to optimize in single value. [#61564](https://github.com/ClickHouse/ClickHouse/pull/61564) (LiuNeng). - -* Process string functions XXXUTF8 'asciily' if input strings are all ASCII chars. Inspired by apache/doris#29799. Overall speed up by 1.07x~1.62x. Notice that peak memory usage had been decreased in some cases. [#61632](https://github.com/ClickHouse/ClickHouse/pull/61632) (李扬). - -* Enabled fast Parquet encoder by default (output_format_parquet_use_custom_encoder). [#62088](https://github.com/ClickHouse/ClickHouse/pull/62088) (Michael Kolupaev). - -* Improve JSONEachRowRowInputFormat by skipping all remaining fields when all required fields are read. [#62210](https://github.com/ClickHouse/ClickHouse/pull/62210) (lgbo). - -* Functions splitByChar and splitByRegexp were speed up significantly. [#62392](https://github.com/ClickHouse/ClickHouse/pull/62392) (李扬). - -* Improve trivial insert select from files in file/s3/hdfs/url/... table functions. Add separate max_parsing_threads setting to control the number of threads used in parallel parsing. [#62404](https://github.com/ClickHouse/ClickHouse/pull/62404) (Kruglov Pavel). - -* Support parallel write buffer for AzureBlobStorage managed by setting azure_allow_parallel_part_upload. [#62534](https://github.com/ClickHouse/ClickHouse/pull/62534) (SmitaRKulkarni). - -* Functions to_utc_timestamp and from_utc_timestamp are now about 2x faster. [#62583](https://github.com/ClickHouse/ClickHouse/pull/62583) (KevinyhZou). - -* Functions parseDateTimeOrNull, parseDateTimeOrZero, parseDateTimeInJodaSyntaxOrNull and parseDateTimeInJodaSyntaxOrZero now run significantly faster (10x - 1000x) when the input contains mostly non-parseable values. [#62634](https://github.com/ClickHouse/ClickHouse/pull/62634) (LiuNeng). - -* Change HostResolver behavior on fail to keep only one record per IP [#62652](https://github.com/ClickHouse/ClickHouse/pull/62652) (Anton Ivashkin). - -* Add a new configurationprefer_merge_sort_block_bytes to control the memory usage and speed up sorting 2 times when merging when there are many columns. [#62904](https://github.com/ClickHouse/ClickHouse/pull/62904) (LiuNeng). - -* QueryPlan convert OUTER JOIN to INNER JOIN optimization if filter after JOIN always filters default values. Optimization can be controlled with setting query_plan_convert_outer_join_to_inner_join, enabled by default. [#62907](https://github.com/ClickHouse/ClickHouse/pull/62907) (Maksim Kita). - -* Enable optimize_rewrite_sum_if_to_count_if by default. [#62929](https://github.com/ClickHouse/ClickHouse/pull/62929) (Raúl Marín). - -* Micro-optimizations for the new analyzer. [#63429](https://github.com/ClickHouse/ClickHouse/pull/63429) (Raúl Marín). - -* Index analysis will work if DateTime is compared to DateTime64. This closes #63441. [#63443](https://github.com/ClickHouse/ClickHouse/pull/63443) (Alexey Milovidov). - -* Speed up indices of type set a little (around 1.5 times) by removing garbage. [#64098](https://github.com/ClickHouse/ClickHouse/pull/64098) (Alexey Milovidov). - -# Improvements - -* Remove optimize_monotonous_functions_in_order_by setting this is becoming a no-op. [#63004](https://github.com/ClickHouse/ClickHouse/pull/63004) (Raúl Marín). - -* Maps can now have Float32, Float64, Array(T), Map(K,V) and Tuple(T1, T2, ...) as keys. Closes #54537. [#59318](https://github.com/ClickHouse/ClickHouse/pull/59318) (李扬). - -* Add asynchronous WriteBuffer for AzureBlobStorage similar to S3. [#59929](https://github.com/ClickHouse/ClickHouse/pull/59929) (SmitaRKulkarni). - -* Multiline strings with border preservation and column width change. [#59940](https://github.com/ClickHouse/ClickHouse/pull/59940) (Volodyachan). - -* Make RabbitMQ nack broken messages. Closes #45350. [#60312](https://github.com/ClickHouse/ClickHouse/pull/60312) (Kseniia Sumarokova). - -* Add a setting first_day_of_week which affects the first day of the week considered by functions toStartOfInterval(..., INTERVAL ... WEEK). This allows for consistency with function toStartOfWeek which defaults to Sunday as the first day of the week. [#60598](https://github.com/ClickHouse/ClickHouse/pull/60598) (Jordi Villar). - -* Added persistent virtual column _block_offset which stores original number of row in block that was assigned at insert. Persistence of column _block_offset can be enabled by setting enable_block_offset_column. Added virtual column_part_data_version which contains either min block number or mutation version of part. Persistent virtual column _block_number is not considered experimental anymore. [#60676](https://github.com/ClickHouse/ClickHouse/pull/60676) (Anton Popov). - -* Functions date_diff and age now calculate their result at nanosecond instead of microsecond precision. They now also offer nanosecond (or nanoseconds or ns) as a possible value for the unit parameter. [#61409](https://github.com/ClickHouse/ClickHouse/pull/61409) (Austin Kothig). - -* Now marks are not loaded for wide parts during merges. [#61551](https://github.com/ClickHouse/ClickHouse/pull/61551) (Anton Popov). - -* Enable output_format_pretty_row_numbers by default. It is better for usability. [#61791](https://github.com/ClickHouse/ClickHouse/pull/61791) (Alexey Milovidov). - -* The progress bar will work for trivial queries with LIMIT from system.zeros, system.zeros_mt (it already works for system.numbers and system.numbers_mt), and the generateRandom table function. As a bonus, if the total number of records is greater than the max_rows_to_read limit, it will throw an exception earlier. This closes #58183. [#61823](https://github.com/ClickHouse/ClickHouse/pull/61823) (Alexey Milovidov). - -* Add TRUNCATE ALL TABLES. [#61862](https://github.com/ClickHouse/ClickHouse/pull/61862) (豪肥肥). - -* Add a setting input_format_json_throw_on_bad_escape_sequence, disabling it allows saving bad escape sequences in JSON input formats. [#61889](https://github.com/ClickHouse/ClickHouse/pull/61889) (Kruglov Pavel). - -* Fixed grammar from "a" to "the" in the warning message. There is only one Atomic engine, so it should be "to the new Atomic engine" instead of "to a new Atomic engine". [#61952](https://github.com/ClickHouse/ClickHouse/pull/61952) (shabroo). - -* Fix logical-error when undoing quorum insert transaction. [#61953](https://github.com/ClickHouse/ClickHouse/pull/61953) (Han Fei). - -* Automatically infer Nullable column types from Apache Arrow schema. [#61984](https://github.com/ClickHouse/ClickHouse/pull/61984) (Maksim Kita). - -* Allow to cancel parallel merge of aggregate states during aggregation. Example: uniqExact. [#61992](https://github.com/ClickHouse/ClickHouse/pull/61992) (Maksim Kita). - -* Dictionary source with INVALIDATE_QUERY is not reloaded twice on startup. [#62050](https://github.com/ClickHouse/ClickHouse/pull/62050) (vdimir). - -* OPTIMIZE FINAL for ReplicatedMergeTree now will wait for currently active merges to finish and then reattempt to schedule a final merge. This will put it more in line with ordinary MergeTree behaviour. [#62067](https://github.com/ClickHouse/ClickHouse/pull/62067) (Nikita Taranov). - -* While read data from a hive text file, it would use the first line of hive text file to resize of number of input fields, and sometimes the fields number of first line is not matched with the hive table defined , such as the hive table is defined to have 3 columns, like test_tbl(a Int32, b Int32, c Int32), but the first line of text file only has 2 fields, and in this situation, the input fields will be resized to 2, and if the next line of the text file has 3 fields, then the third field can not be read but set a default value 0, which is not right. [#62086](https://github.com/ClickHouse/ClickHouse/pull/62086) (KevinyhZou). - -* The syntax highlighting while typing in the client will work on the syntax level (previously, it worked on the lexer level). [#62123](https://github.com/ClickHouse/ClickHouse/pull/62123) (Alexey Milovidov). - -* Fix an issue where when a redundant = 1 or = 0 is added after a boolean expression involving the primary key, the primary index is not used. For example, both `SELECT * FROM WHERE IN () = 1` and `SELECT * FROM
WHERE NOT IN () = 0` will both perform a full table scan, when the primary index can be used. [#62142](https://github.com/ClickHouse/ClickHouse/pull/62142) (josh-hildred). - -* Added setting lightweight_deletes_sync (default value: 2 - wait all replicas synchronously). It is similar to setting mutations_sync but affects only behaviour of lightweight deletes. [#62195](https://github.com/ClickHouse/ClickHouse/pull/62195) (Anton Popov). - -* Distinguish booleans and integers while parsing values for custom settings: SET custom_a = true; SET custom_b = 1;. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) (Vitaly Baranov). - -* Support S3 access through AWS Private Link Interface endpoints. Closes #60021, #31074 and #53761. [#62208](https://github.com/ClickHouse/ClickHouse/pull/62208) (Arthur Passos). - -* Client has to send header 'Keep-Alive: timeout=X' to the server. If a client receives a response from the server with that header, client has to use the value from the server. Also for a client it is better not to use a connection which is nearly expired in order to avoid connection close race. [#62249](https://github.com/ClickHouse/ClickHouse/pull/62249) (Sema Checherinda). - -* Added nano- micro- milliseconds unit for date_trunc. [#62335](https://github.com/ClickHouse/ClickHouse/pull/62335) (Misz606). - -* The query cache now no longer caches results of queries against system tables (system.*, information_schema.*, INFORMATION_SCHEMA.*). [#62376](https://github.com/ClickHouse/ClickHouse/pull/62376) (Robert Schulze). - -* MOVE PARTITION TO TABLE query can be delayed or can throw TOO_MANY_PARTS exception to avoid exceeding limits on the part count. The same settings and limits are applied as for theINSERT query (see max_parts_in_total, parts_to_delay_insert, parts_to_throw_insert, inactive_parts_to_throw_insert, inactive_parts_to_delay_insert, max_avg_part_size_for_too_many_parts, min_delay_to_insert_ms and max_delay_to_insert settings). [#62420](https://github.com/ClickHouse/ClickHouse/pull/62420) (Sergei Trifonov). - -* Make transform always return the first match. [#62518](https://github.com/ClickHouse/ClickHouse/pull/62518) (Raúl Marín). - -* Avoid evaluating table DEFAULT expressions while executing RESTORE. [#62601](https://github.com/ClickHouse/ClickHouse/pull/62601) (Vitaly Baranov). - -* Allow quota key with different auth scheme in HTTP requests. [#62842](https://github.com/ClickHouse/ClickHouse/pull/62842) (Kseniia Sumarokova). - -* Close session if user's valid_until is reached. [#63046](https://github.com/ClickHouse/ClickHouse/pull/63046) (Konstantin Bogdanov). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-6.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-6.md deleted file mode 100644 index 3dc8d747ea5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-6.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -slug: /changelogs/24.6 -title: 'v24.6 Changelog for Cloud' -description: 'Fast release changelog for v24.6' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v24.6' ---- - -# v24.6 Changelog for Cloud - -Relevant changes for ClickHouse Cloud services based on the v24.6 release. - -## Backward Incompatible Change {#backward-incompatible-change} -* Rework parallel processing in `Ordered` mode of storage `S3Queue`. This PR is backward incompatible for Ordered mode if you used settings `s3queue_processing_threads_num` or `s3queue_total_shards_num`. Setting `s3queue_total_shards_num` is deleted, previously it was allowed to use only under `s3queue_allow_experimental_sharded_mode`, which is now deprecated. A new setting is added - `s3queue_buckets`. [#64349](https://github.com/ClickHouse/ClickHouse/pull/64349) ([Kseniia Sumarokova](https://github.com/kssenii)). -* New functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` were added. Unlike the existing functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake`, the new functions are compatible with function `generateSnowflakeID`, i.e. they accept the snowflake IDs generated by `generateSnowflakeID` and produce snowflake IDs of the same type as `generateSnowflakeID` (i.e. `UInt64`). Furthermore, the new functions default to the UNIX epoch (aka. 1970-01-01), just like `generateSnowflakeID`. If necessary, a different epoch, e.g. Twitter's/X's epoch 2010-11-04 aka. 1288834974657 msec since UNIX epoch, can be passed. The old conversion functions are deprecated and will be removed after a transition period: to use them regardless, enable setting `allow_deprecated_snowflake_conversion_functions`. [#64948](https://github.com/ClickHouse/ClickHouse/pull/64948) ([Robert Schulze](https://github.com/rschu1ze)). - -## New Feature {#new-feature} - -* Support empty tuples. [#55061](https://github.com/ClickHouse/ClickHouse/pull/55061) ([Amos Bird](https://github.com/amosbird)). -* Add Hilbert Curve encode and decode functions. [#60156](https://github.com/ClickHouse/ClickHouse/pull/60156) ([Artem Mustafin](https://github.com/Artemmm91)). -* Add support for index analysis over `hilbertEncode`. [#64662](https://github.com/ClickHouse/ClickHouse/pull/64662) ([Artem Mustafin](https://github.com/Artemmm91)). -* Added support for reading `LINESTRING` geometry in the WKT format using function `readWKTLineString`. [#62519](https://github.com/ClickHouse/ClickHouse/pull/62519) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Added new SQL functions `generateSnowflakeID` for generating Twitter-style Snowflake IDs. [#63577](https://github.com/ClickHouse/ClickHouse/pull/63577) ([Danila Puzov](https://github.com/kazalika)). -* Add support for comparing `IPv4` and `IPv6` types using the `=` operator. [#64292](https://github.com/ClickHouse/ClickHouse/pull/64292) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). -* Support decimal arguments in binary math functions (pow, atan2, max2, min2, hypot). [#64582](https://github.com/ClickHouse/ClickHouse/pull/64582) ([Mikhail Gorshkov](https://github.com/mgorshkov)). -* Added SQL functions `parseReadableSize` (along with `OrNull` and `OrZero` variants). [#64742](https://github.com/ClickHouse/ClickHouse/pull/64742) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). -* Add `_time` virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)). -* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)). -* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)). -* Add `http_response_headers` configuration to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)). -* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)). This is useful for testing. -* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)). -* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)). - -## Performance Improvement {#performance-improvement} - -* Fix performance regression in cross join introduced in #60459 (24.5). #65243 (Nikita Taranov). -* Improve io_uring resubmits visibility. Rename profile event IOUringSQEsResubmits -> IOUringSQEsResubmitsAsync and add a new one IOUringSQEsResubmitsSync. #63699 (Tomer Shafir). -* Introduce assertions to verify all functions are called with columns of the right size. #63723 (Raúl Marín). -* Add the ability to reshuffle rows during insert to optimize for size without violating the order set by `PRIMARY KEY`. It's controlled by the setting `optimize_row_order` (off by default). [#63578](https://github.com/ClickHouse/ClickHouse/pull/63578) ([Igor Markelov](https://github.com/ElderlyPassionFruit)). -* Add a native parquet reader, which can read parquet binary to ClickHouse Columns directly. It's controlled by the setting `input_format_parquet_use_native_reader` (disabled by default). [#60361](https://github.com/ClickHouse/ClickHouse/pull/60361) ([ZhiHong Zhang](https://github.com/copperybean)). -* Support partial trivial count optimization when the query filter is able to select exact ranges from merge tree tables. [#60463](https://github.com/ClickHouse/ClickHouse/pull/60463) ([Amos Bird](https://github.com/amosbird)). -* Reduce max memory usage of multi-threaded `INSERT`s by collecting chunks of multiple threads in a single transform. [#61047](https://github.com/ClickHouse/ClickHouse/pull/61047) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Reduce the memory usage when using Azure object storage by using fixed memory allocation, avoiding the allocation of an extra buffer. [#63160](https://github.com/ClickHouse/ClickHouse/pull/63160) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). -* Reduce the number of virtual function calls in `ColumnNullable::size`. [#60556](https://github.com/ClickHouse/ClickHouse/pull/60556) ([HappenLee](https://github.com/HappenLee)). -* Speedup `splitByRegexp` when the regular expression argument is a single-character. [#62696](https://github.com/ClickHouse/ClickHouse/pull/62696) ([Robert Schulze](https://github.com/rschu1ze)). -* Speed up aggregation by 8-bit and 16-bit keys by keeping track of the min and max keys used. This allows to reduce the number of cells that need to be verified. [#62746](https://github.com/ClickHouse/ClickHouse/pull/62746) ([Jiebin Sun](https://github.com/jiebinn)). -* Optimize operator IN when the left hand side is `LowCardinality` and the right is a set of constants. [#64060](https://github.com/ClickHouse/ClickHouse/pull/64060) ([Zhiguo Zhou](https://github.com/ZhiguoZh)). -* Use a thread pool to initialize and destroy hash tables inside `ConcurrentHashJoin`. [#64241](https://github.com/ClickHouse/ClickHouse/pull/64241) ([Nikita Taranov](https://github.com/nickitat)). -* Optimized vertical merges in tables with sparse columns. [#64311](https://github.com/ClickHouse/ClickHouse/pull/64311) ([Anton Popov](https://github.com/CurtizJ)). -* Enabled prefetches of data from remote filesystem during vertical merges. It improves latency of vertical merges in tables with data stored on remote filesystem. [#64314](https://github.com/ClickHouse/ClickHouse/pull/64314) ([Anton Popov](https://github.com/CurtizJ)). -* Reduce redundant calls to `isDefault` of `ColumnSparse::filter` to improve performance. [#64426](https://github.com/ClickHouse/ClickHouse/pull/64426) ([Jiebin Sun](https://github.com/jiebinn)). -* Speedup `find_super_nodes` and `find_big_family` keeper-client commands by making multiple asynchronous getChildren requests. [#64628](https://github.com/ClickHouse/ClickHouse/pull/64628) ([Alexander Gololobov](https://github.com/davenger)). -* Improve function `least`/`greatest` for nullable numberic type arguments. [#64668](https://github.com/ClickHouse/ClickHouse/pull/64668) ([KevinyhZou](https://github.com/KevinyhZou)). -* Allow merging two consequent filtering steps of a query plan. This improves filter-push-down optimization if the filter condition can be pushed down from the parent step. [#64760](https://github.com/ClickHouse/ClickHouse/pull/64760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Remove bad optimization in the vertical final implementation and re-enable vertical final algorithm by default. [#64783](https://github.com/ClickHouse/ClickHouse/pull/64783) ([Duc Canh Le](https://github.com/canhld94)). -* Remove ALIAS nodes from the filter expression. This slightly improves performance for queries with `PREWHERE` (with the new analyzer). [#64793](https://github.com/ClickHouse/ClickHouse/pull/64793) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)). -* Added settings to disable materialization of skip indexes and statistics on inserts (`materialize_skip_indexes_on_insert` and `materialize_statistics_on_insert`). [#64391](https://github.com/ClickHouse/ClickHouse/pull/64391) ([Anton Popov](https://github.com/CurtizJ)). -* Use the allocated memory size to calculate the row group size and reduce the peak memory of the parquet writer in the single-threaded mode. [#64424](https://github.com/ClickHouse/ClickHouse/pull/64424) ([LiuNeng](https://github.com/liuneng1994)). -* Improve the iterator of sparse column to reduce call of `size`. [#64497](https://github.com/ClickHouse/ClickHouse/pull/64497) ([Jiebin Sun](https://github.com/jiebinn)). -* Update condition to use server-side copy for backups to Azure blob storage. [#64518](https://github.com/ClickHouse/ClickHouse/pull/64518) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). -* Optimized memory usage of vertical merges for tables with high number of skip indexes. [#64580](https://github.com/ClickHouse/ClickHouse/pull/64580) ([Anton Popov](https://github.com/CurtizJ)). - -## Improvement {#improvement} - -* Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts ClickHouse/ClickHouse#60994 and makes it available only under a few settings: output_format_csv_serialize_tuple_into_separate_columns, input_format_csv_deserialize_separate_columns_into_tuple and input_format_csv_try_infer_strings_from_quoted_tuples. #65170 (Nikita Mikhaylov). -* `SHOW CREATE TABLE` executed on top of system tables will now show the super handy comment unique for each table which will explain why this table is needed. [#63788](https://github.com/ClickHouse/ClickHouse/pull/63788) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* The second argument (scale) of functions `round()`, `roundBankers()`, `floor()`, `ceil()` and `trunc()` can now be non-const. [#64798](https://github.com/ClickHouse/ClickHouse/pull/64798) ([Mikhail Gorshkov](https://github.com/mgorshkov)). -* Avoid possible deadlock during MergeTree index analysis when scheduling threads in a saturated service. [#59427](https://github.com/ClickHouse/ClickHouse/pull/59427) ([Sean Haynes](https://github.com/seandhaynes)). -* Several minor corner case fixes to S3 proxy support & tunneling. [#63427](https://github.com/ClickHouse/ClickHouse/pull/63427) ([Arthur Passos](https://github.com/arthurpassos)). -* Add metrics to track the number of directories created and removed by the `plain_rewritable` metadata storage, and the number of entries in the local-to-remote in-memory map. [#64175](https://github.com/ClickHouse/ClickHouse/pull/64175) ([Julia Kartseva](https://github.com/jkartseva)). -* The query cache now considers identical queries with different settings as different. This increases robustness in cases where different settings (e.g. `limit` or `additional_table_filters`) would affect the query result. [#64205](https://github.com/ClickHouse/ClickHouse/pull/64205) ([Robert Schulze](https://github.com/rschu1ze)). -* Support the non standard error code `QpsLimitExceeded` in object storage as a retryable error. [#64225](https://github.com/ClickHouse/ClickHouse/pull/64225) ([Sema Checherinda](https://github.com/CheSema)). -* Added a new setting `input_format_parquet_prefer_block_bytes` to control the average output block bytes, and modified the default value of `input_format_parquet_max_block_size` to 65409. [#64427](https://github.com/ClickHouse/ClickHouse/pull/64427) ([LiuNeng](https://github.com/liuneng1994)). -* Settings from the user's config don't affect merges and mutations for `MergeTree` on top of object storage. [#64456](https://github.com/ClickHouse/ClickHouse/pull/64456) ([alesapin](https://github.com/alesapin)). -* Support the non standard error code `TotalQpsLimitExceeded` in object storage as a retryable error. [#64520](https://github.com/ClickHouse/ClickHouse/pull/64520) ([Sema Checherinda](https://github.com/CheSema)). -* Updated Advanced Dashboard for both open-source and ClickHouse Cloud versions to include a chart for 'Maximum concurrent network connections'. [#64610](https://github.com/ClickHouse/ClickHouse/pull/64610) ([Thom O'Connor](https://github.com/thomoco)). -* Improve progress report on `zeros_mt` and `generateRandom`. [#64804](https://github.com/ClickHouse/ClickHouse/pull/64804) ([Raúl Marín](https://github.com/Algunenano)). -* Add an asynchronous metric `jemalloc.profile.active` to show whether sampling is currently active. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. [#64842](https://github.com/ClickHouse/ClickHouse/pull/64842) ([Unalian](https://github.com/Unalian)). -* Remove mark of `allow_experimental_join_condition` as important. This mark may have prevented distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Added server Asynchronous metrics `DiskGetObjectThrottler*` and `DiskGetObjectThrottler*` reflecting request per second rate limit defined with `s3_max_get_rps` and `s3_max_put_rps` disk settings and currently available number of requests that could be sent without hitting throttling limit on the disk. Metrics are defined for every disk that has a configured limit. [#65050](https://github.com/ClickHouse/ClickHouse/pull/65050) ([Sergei Trifonov](https://github.com/serxa)). -* Add a validation when creating a user with `bcrypt_hash`. [#65242](https://github.com/ClickHouse/ClickHouse/pull/65242) ([Raúl Marín](https://github.com/Algunenano)). -* Add profile events for number of rows read during/after `PREWHERE`. [#64198](https://github.com/ClickHouse/ClickHouse/pull/64198) ([Nikita Taranov](https://github.com/nickitat)). -* Print query in `EXPLAIN PLAN` with parallel replicas. [#64298](https://github.com/ClickHouse/ClickHouse/pull/64298) ([vdimir](https://github.com/vdimir)). -* Rename `allow_deprecated_functions` to `allow_deprecated_error_prone_window_functions`. [#64358](https://github.com/ClickHouse/ClickHouse/pull/64358) ([Raúl Marín](https://github.com/Algunenano)). -* Respect `max_read_buffer_size` setting for file descriptors as well in the `file` table function. [#64532](https://github.com/ClickHouse/ClickHouse/pull/64532) ([Azat Khuzhin](https://github.com/azat)). -* Disable transactions for unsupported storages even for materialized views. [#64918](https://github.com/ClickHouse/ClickHouse/pull/64918) ([alesapin](https://github.com/alesapin)). -* Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)). - -## Bug Fix (user-visible misbehavior in an official stable release) {#bug-fix-user-visible-misbehavior-in-an-official-stable-release} -* Fixed 'set' skip index not working with IN and indexHint(). #62083 (Michael Kolupaev). -* Fix queries with FINAL give wrong result when table does not use adaptive granularity. #62432 (Duc Canh Le). -* Support executing function during assignment of parameterized view value. #63502 (SmitaRKulkarni). -* Fixed parquet memory tracking. #63584 (Michael Kolupaev). -* Fix rare case with missing data in the result of distributed query. #63691 (vdimir). -* Fixed reading of columns of type Tuple(Map(LowCardinality(String), String), ...). #63956 (Anton Popov). -* Fix resolve of unqualified COLUMNS matcher. Preserve the input columns order and forbid usage of unknown identifiers. #63962 (Dmitry Novik). -* Fix an Cyclic aliases error for cyclic aliases of different type (expression and function). #63993 (Nikolai Kochetov). -* This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline. #64079 (pufit). -* Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. #64096 (Yakov Olkhovskiy). -* Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. #64174 (Raúl Marín). -* The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. #64199 (Robert Schulze). -* Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. #64206 (Kruglov Pavel). -* Fix duplicate alias error for distributed queries with ARRAY JOIN. #64226 (Nikolai Kochetov). -* Fix unexpected accurateCast from string to integer. #64255 (wudidapaopao). -* Fixed CNF simplification, in case any OR group contains mutually exclusive atoms. #64256 (Eduard Karacharov). -* Fix Query Tree size validation. #64377 (Dmitry Novik). -* Fix Logical error: Bad cast for Buffer table with PREWHERE. #64388 (Nikolai Kochetov). -* Fixed CREATE TABLE AS queries for tables with default expressions. #64455 (Anton Popov). -* Fixed optimize_read_in_order behaviour for ORDER BY ... NULLS FIRST / LAST on tables with nullable keys. #64483 (Eduard Karacharov). -* Fix the Expression nodes list expected 1 projection names and Unknown expression or identifier errors for queries with aliases to GLOBAL IN.. #64517 (Nikolai Kochetov). -* Fix an error Cannot find column in distributed queries with constant CTE in the GROUP BY key. #64519 (Nikolai Kochetov). -* Fix the output of function formatDateTimeInJodaSyntax when a formatter generates an uneven number of characters and the last character is 0. For example, SELECT formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D') now correctly returns 150 instead of previously 15. #64614 (LiuNeng). -* Do not rewrite aggregation if -If combinator is already used. #64638 (Dmitry Novik). -* Fix type inference for float (in case of small buffer, i.e. --max_read_buffer_size 1). #64641 (Azat Khuzhin). -* Fix bug which could lead to non-working TTLs with expressions. #64694 (alesapin). -* Fix removing the WHERE and PREWHERE expressions, which are always true (for the new analyzer). #64695 (Nikolai Kochetov). -* Fixed excessive part elimination by token-based text indexes (ngrambf , full_text) when filtering by result of startsWith, endsWith, match, multiSearchAny. #64720 (Eduard Karacharov). -* Fixes incorrect behaviour of ANSI CSI escaping in the UTF8::computeWidth function. #64756 (Shaun Struwig). -* Fix a case of incorrect removal of ORDER BY / LIMIT BY across subqueries. #64766 (Raúl Marín). -* Fix (experimental) unequal join with subqueries for sets which are in the mixed join conditions. #64775 (lgbo). -* Fix crash in a local cache over plain_rewritable disk. #64778 (Julia Kartseva). -* Fix Cannot find column in distributed query with ARRAY JOIN by Nested column. Fixes #64755. #64801 (Nikolai Kochetov). -* Fix memory leak in slru cache policy. #64803 (Kseniia Sumarokova). -* Fixed possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. #64844 (Anton Popov). -* Fix the Block structure mismatch error for queries reading with PREWHERE from the materialized view when the materialized view has columns of different types than the source table. Fixes #64611. #64855 (Nikolai Kochetov). -* Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. #64858 (alesapin). -* Fix ALTER MODIFY COMMENT query that was broken for parameterized VIEWs in ClickHouse/ClickHouse#54211. #65031 (Nikolay Degterinsky). -* Fix host_id in DatabaseReplicated when cluster_secure_connection parameter is enabled. Previously all the connections within the cluster created by DatabaseReplicated were not secure, even if the parameter was enabled. #65054 (Nikolay Degterinsky). -* Fixing the Not-ready Set error after the PREWHERE optimization for StorageMerge. #65057 (Nikolai Kochetov). -* Avoid writing to finalized buffer in File-like storages. #65063 (Kruglov Pavel). -* Fix possible infinite query duration in case of cyclic aliases. Fixes #64849. #65081 (Nikolai Kochetov). -* Fix the Unknown expression identifier error for remote queries with INTERPOLATE (alias) (new analyzer). Fixes #64636. #65090 (Nikolai Kochetov). -* Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. #65104 (Dmitry Novik). -* Fix aggregate function name rewriting in the new analyzer. #65110 (Dmitry Novik). -* Respond with 5xx instead of 200 OK in case of receive timeout while reading (parts of) the request body from the client socket. #65118 (Julian Maicher). -* Fix possible crash for hedged requests. #65206 (Azat Khuzhin). -* Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. #65256 (jsc0218). -* This PR ensures that the type of the constant(IN operator's second parameter) is always visible during the IN operator's type conversion process. Otherwise, losing type information may cause some conversions to fail, such as the conversion from DateTime to Date. fix (#64487). #65315 (pn). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-8.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-8.md deleted file mode 100644 index 29cabc28e51..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-24-8.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -slug: /changelogs/24.8 -title: 'v24.8 Changelog for Cloud' -description: 'Fast release changelog for v24.8' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v24.8' ---- - -Relevant changes for ClickHouse Cloud services based on the v24.8 release. - -## Backward Incompatible Change {#backward-incompatible-change} - -- Change binary serialization of Variant data type: add compact mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting use_compact_variant_discriminators_serialization that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization should not impact you unless you have been working with support to get this feature enabled earlier. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) (Kruglov Pavel). - -- Forbid CREATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ... with Replicated databases. This specific PR is only applicable to users still using, ReplicatedMergeTree. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) (vdimir). - -- Metric KeeperOutstandingRequets was renamed to KeeperOutstandingRequests. This fixes a typo reported in [#66179](https://github.com/ClickHouse/ClickHouse/issues/66179). [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) (Robert Schulze). - -- clickhouse-client and clickhouse-local now default to multi-query mode (instead single-query mode). As an example, clickhouse-client -q "SELECT 1; SELECT 2" now works, whereas users previously had to add --multiquery (or -n). The --multiquery/-n switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is VALUES (the most common case), the end of the INSERT statement is represented by a trailing semicolon ; at the end of the query. For all other FORMATs (e.g. CSV or JSONEachRow), the end of the INSERT statement is represented by two newlines \n\n at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) (wxybear). - -- In previous versions, it was possible to use an alternative syntax for LowCardinality data types by appending WithDictionary to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to LowCardinality. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842)(Alexey Milovidov). - -- Fix logical errors with storage Buffer used with distributed destination table. It's a backward incompatible change: queries using Buffer with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/vdimir) (vdimir). - -- In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) (Alexey Milovidov). - -- In previous versions, arrayWithConstant can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) (Alexey Milovidov). - -- Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) (Azat Khuzhin). - - -## New Feature {#new-feature} - -- Extend function tuple to construct named tuples in query. Introduce function tupleNames to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) (Amos Bird). - -- ASOF JOIN support for full_sorting_join algorithm Close [#54493](https://github.com/ClickHouse/ClickHouse/issues/54493). [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) (vdimir). - -- A new table function, fuzzQuery, was added. This function allows you to modify a given query string with random variations. Example: SELECT query FROM fuzzQuery('SELECT 1');. [#62103](https://github.com/ClickHouse/ClickHouse/pull/62103) (pufit). - -- Add new window function percent_rank. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) (lgbo). - -- Support JWT authentication in clickhouse-client. [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) (Konstantin Bogdanov). - -- Add SQL functions changeYear, changeMonth, changeDay, changeHour, changeMinute, changeSecond. For example, SELECT changeMonth(toDate('2024-06-14'), 7) returns date 2024-07-14. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) (cucumber95). - -- Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) (Pablo Marcos). - -- Add aggregate function groupConcat. About the same as arrayStringConcat( groupArray(column), ',') Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) (Yarik Briukhovetskyi). - -- Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) (Kseniia Sumarokova). - -- Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) (lgbo). - -- Automatically append a wildcard * to the end of a directory path with table function file. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) (Zhidong (David) Guo). - -- Add --memory-usage option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) (vdimir). - -- Add _etag virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) (skyoct) - -- This pull request introduces Hive-style partitioning for different engines (File, URL, S3, AzureBlobStorage, HDFS). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) (Yarik Briukhovetskyi). - -- Add function printf for spark compatibility. [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) (李扬). - -- Added support for reading MULTILINESTRING geometry in WKT format using function readWKTLineString. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) (Jacob Reckhard). - -- Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc' and SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def' now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235)(sakulali). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-25_1-25_4.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-25_1-25_4.md deleted file mode 100644 index 038dd45e061..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/changelog-25_1-25_4.md +++ /dev/null @@ -1,646 +0,0 @@ ---- -slug: /changelogs/25.4 -title: 'v25.4 Changelog for Cloud' -description: 'Changelog for v25.4' -keywords: ['changelog', 'cloud'] -sidebar_label: 'v25.4' ---- - -## Backward Incompatible Changes {#backward-incompatible-changes} - -* Parquet output format converts Date and DateTime columns to date/time types supported by Parquet, instead of writing them as raw numbers. DateTime becomes DateTime64(3) (was: UInt32); setting `output_format_parquet_datetime_as_uint32` brings back the old behavior. Date becomes Date32 (was: UInt16). [#70950](https://github.com/ClickHouse/ClickHouse/pull/70950) ([Michael Kolupaev](https://github.com/al13n321)). -* Don't allow comparable types (like JSON/Object/AggregateFunction) in ORDER BY and comparison functions `less/greater/equal/etc` by default. [#73276](https://github.com/ClickHouse/ClickHouse/pull/73276) ([Pavel Kruglov](https://github.com/Avogar)). -* `JSONEachRowWithProgress` will write the progress whenever the progress happens. In previous versions, the progress was shown only after each block of the result, which made it useless. Change the way how the progress is displayed: it will not show zero values. Keep in mind that the progress is sent even if it happens frequently. It can generate a significant volume of traffic. Keep in mind that the progress is not flushed when the output is compressed. This closes [#70800](https://github.com/ClickHouse/ClickHouse/issues/70800). [#73834](https://github.com/ClickHouse/ClickHouse/pull/73834) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* The `mysql` dictionary source no longer does `SHOW TABLE STATUS` query, because it does not provide any value for InnoDB tables, as long as for any recent MySQL versions. This closes [#72636](https://github.com/ClickHouse/ClickHouse/issues/72636). This change is backward compatible, but put in this category, so you have a chance to notice it. [#73914](https://github.com/ClickHouse/ClickHouse/pull/73914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* `Merge` tables will unify the structure of underlying tables by using a union of their columns and deriving common types. This closes [#64864](https://github.com/ClickHouse/ClickHouse/issues/64864). This closes [#35307](https://github.com/ClickHouse/ClickHouse/issues/35307). In certain cases, this change could be backward incompatible. One example is when there is no common type between tables, but conversion to the type of the first table is still possible, such as in the case of UInt64 and Int64 or any numeric type and String. If you want to return to the old behavior, set `merge_table_max_tables_to_look_for_schema_inference` to `1` or set `compatibility` to `24.12` or earlier. [#73956](https://github.com/ClickHouse/ClickHouse/pull/73956) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* `CHECK TABLE` queries now require a separate, `CHECK` grant. In previous versions, it was enough to have `SHOW TABLES` grant to run these queries. But a `CHECK TABLE` query can be heavy, and usual query complexity limits for `SELECT` queries don't apply to it. It led to the potential of DoS. [#74471](https://github.com/ClickHouse/ClickHouse/pull/74471) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Check all columns in a materialized view match the target table if `allow_materialized_view_with_bad_select` is `false`. [#74481](https://github.com/ClickHouse/ClickHouse/pull/74481) ([Christoph Wurm](https://github.com/cwurm)). -* Function `h3ToGeo()` now returns the results in the order `(lat, lon)` (which is the standard order for geometric functions). Users who wish to retain the legacy result order `(lon, lat)` can set setting `h3togeo_lon_lat_result_order = true`. [#74719](https://github.com/ClickHouse/ClickHouse/pull/74719) ([Manish Gill](https://github.com/mgill25)). -* Add `JSONCompactEachRowWithProgress` and `JSONCompactStringsEachRowWithProgress` formats. Continuation of [#69989](https://github.com/ClickHouse/ClickHouse/issues/69989). The `JSONCompactWithNames` and `JSONCompactWithNamesAndTypes` no longer output "totals" - apparently, it was a mistake in the implementation. [#75037](https://github.com/ClickHouse/ClickHouse/pull/75037) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Change `format_alter_operations_with_parentheses` default to true to make alter commands list unambiguous (see https://github.com/ClickHouse/ClickHouse/pull/59532). This breaks replication with clusters prior to 24.3. If you are upgrading a cluster using older releases, turn off the setting in the server config or upgrade to 24.3 first. [#75302](https://github.com/ClickHouse/ClickHouse/pull/75302) ([Raúl Marín](https://github.com/Algunenano)). -* Disallow truncate database for replicated databases. [#76651](https://github.com/ClickHouse/ClickHouse/pull/76651) ([Bharat Nallan](https://github.com/bharatnc)). -* Disable parallel replicas by default when analyzer is disabled regardless `compatibility` setting. It's still possible to change this behavior by explicitly setting `parallel_replicas_only_with_analyzer` to `false`. [#77115](https://github.com/ClickHouse/ClickHouse/pull/77115) ([Igor Nikonov](https://github.com/devcrafter)). -* It's no longer possible to use `NaN` or `inf` for float values as settings. [#77546](https://github.com/ClickHouse/ClickHouse/pull/77546) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fixes cases where `dateTrunc` is used with negative date/datetime arguments. [#77622](https://github.com/ClickHouse/ClickHouse/pull/77622) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* The legacy MongoDB integration has been removed. Server setting `use_legacy_mongodb_integration` became obsolete and now does nothing. [#77895](https://github.com/ClickHouse/ClickHouse/pull/77895) ([Robert Schulze](https://github.com/rschu1ze)). -* Enhance SummingMergeTree validation to skip aggregation for columns used in partition or sort keys. [#78022](https://github.com/ClickHouse/ClickHouse/pull/78022) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). - -## New Features {#new-features} - -* Added an in-memory cache for deserialized skipping index granules. This should make repeated queries that use skipping indexes faster. The size of the new cache is controlled by server settings `skipping_index_cache_size` and `skipping_index_cache_max_entries`. The original motivation for the cache were vector similarity indexes which became a lot faster now. [#70102](https://github.com/ClickHouse/ClickHouse/pull/70102) ([Robert Schulze](https://github.com/rschu1ze)). -* A new implementation of the Userspace Page Cache, which allows caching data in the in-process memory instead of relying on the OS page cache. It is useful when the data is stored on a remote virtual filesystem without backing with the local filesystem cache. [#70509](https://github.com/ClickHouse/ClickHouse/pull/70509) ([Michael Kolupaev](https://github.com/al13n321)). -* Add setting to query Iceberg tables as of a specific timestamp. [#71072](https://github.com/ClickHouse/ClickHouse/pull/71072) ([Brett Hoerner](https://github.com/bretthoerner)). -* Implement Iceberg tables partition pruning for time-related transform partition operations in Iceberg. [#72044](https://github.com/ClickHouse/ClickHouse/pull/72044) ([Daniil Ivanik](https://github.com/divanik)). -* Add the ability to create min-max (skipping) indices by default for columns managed by MergeTree using settings `enable_minmax_index_for_all_numeric_columns` (for numeric columns) and `enable_minmax_index_for_all_string_columns` (for string columns). For now, both settings are disabled, so there is no behavior change yet. [#72090](https://github.com/ClickHouse/ClickHouse/pull/72090) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -* Added aggregation function sequenceMatchEvents which return timestamps of matched events for longest chain of events in pattern. [#72349](https://github.com/ClickHouse/ClickHouse/pull/72349) ([UnamedRus](https://github.com/UnamedRus)). -* `SELECT` and `VIEW` statements now support aliases, e.g. `SELECT b FROM (SELECT number, number*2 FROM numbers(2)) AS x (a, b);`. This enables TPC-H query 15 to run without modifications. [#72480](https://github.com/ClickHouse/ClickHouse/pull/72480) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Added a new setting `enable_adaptive_memory_spill_scheduler` that allows multiple grace JOINs in the same query to monitor their combined memory footprint and trigger spilling into an external storage adaptively to prevent MEMORY_LIMIT_EXCEEDED. [#72728](https://github.com/ClickHouse/ClickHouse/pull/72728) ([lgbo](https://github.com/lgbo-ustc)). -* Added function `arrayNormalizedGini`. [#72823](https://github.com/ClickHouse/ClickHouse/pull/72823) ([flynn](https://github.com/ucasfl)). -* Support low cardinality decimal data types, fix [#72256](https://github.com/ClickHouse/ClickHouse/issues/72256). [#72833](https://github.com/ClickHouse/ClickHouse/pull/72833) ([zhanglistar](https://github.com/zhanglistar)). -* When `min_age_to_force_merge_seconds` and `min_age_to_force_merge_on_partition_only` are both enabled, the part merging will ignore the max bytes limit. [#73656](https://github.com/ClickHouse/ClickHouse/pull/73656) ([Kai Zhu](https://github.com/nauu)). -* Support reading HALF_FLOAT values from Apache Arrow/Parquet/ORC (they are read into Float32). This closes [#72960](https://github.com/ClickHouse/ClickHouse/issues/72960). Keep in mind that IEEE-754 half float is not the same as BFloat16. Closes [#73835](https://github.com/ClickHouse/ClickHouse/issues/73835). [#73836](https://github.com/ClickHouse/ClickHouse/pull/73836) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* The `system.trace_log` table will contain two new columns, `symbols` and `lines` containing symbolized stack trace. It allows for easy collection and export of profile information. This is controlled by the server configuration value `symbolize` inside `trace_log` and is enabled by default. [#73896](https://github.com/ClickHouse/ClickHouse/pull/73896) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add a new function, `generateSerialID`, which can be used to generate auto-incremental numbers in tables. Continuation of [#64310](https://github.com/ClickHouse/ClickHouse/issues/64310) by [kazalika](https://github.com/kazalika). This closes [#62485](https://github.com/ClickHouse/ClickHouse/issues/62485). [#73950](https://github.com/ClickHouse/ClickHouse/pull/73950) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add syntax `query1 PARALLEL WITH query2 PARALLEL WITH query3 ... PARALLEL WITH queryN` That means subqueries `{query1, query2, ... queryN}` are allowed to run in parallel with each other (and it's preferable). [#73983](https://github.com/ClickHouse/ClickHouse/pull/73983) ([Vitaly Baranov](https://github.com/vitlibar)). -* Now, Play UI has a progress bar during query runtime. It allows cancelling queries. It displays the total number of records and the extended information about the speed. The table can be rendered incrementally as soon as data arrives. Enable HTTP compression. Rendering of the table became faster. The table header became sticky. It allows selecting cells and navigating them by arrow keys. Fix the issue when the outline of the selected cell makes it smaller. Cells no longer expand on mouse hover but only on selection. The moment to stop rendering the incoming data is decided on the client rather than on the server side. Highlight digit groups for numbers. The overall design was refreshed and became bolder. It checks if the server is reachable and the correctness of credentials and displays the server version and uptime. The cloud icon is contoured in every font, even in Safari. Big integers inside nested data types will be rendered better. It will display inf/nan correctly. It will display data types when the mouse is over a column header. [#74204](https://github.com/ClickHouse/ClickHouse/pull/74204) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add the ability to create min-max (skipping) indices by default for columns managed by MergeTree using settings `add_minmax_index_for_numeric_columns` (for numeric columns) and `add_minmax_index_for_string_columns` (for string columns). For now, both settings are disabled, so there is no behavior change yet. [#74266](https://github.com/ClickHouse/ClickHouse/pull/74266) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -* Add `script_query_number` and `script_line_number` fields to `system.query_log`, to the ClientInfo in the native protocol, and to server logs. This closes [#67542](https://github.com/ClickHouse/ClickHouse/issues/67542). Credits to [pinsvin00](https://github.com/pinsvin00) for kicking off this feature earlier in [#68133](https://github.com/ClickHouse/ClickHouse/issues/68133). [#74477](https://github.com/ClickHouse/ClickHouse/pull/74477) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add minus operator support for DateTime64, to allow subtraction between DateTime64 values, as well as DateTime. [#74482](https://github.com/ClickHouse/ClickHouse/pull/74482) ([Li Yin](https://github.com/liyinsg)). -* Support DeltaLake table engine for AzureBlobStorage. Fixes [#68043](https://github.com/ClickHouse/ClickHouse/issues/68043). [#74541](https://github.com/ClickHouse/ClickHouse/pull/74541) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -* Add bind_host setting to set the source IP address for clickhouse client connections. [#74741](https://github.com/ClickHouse/ClickHouse/pull/74741) ([Todd Yocum](https://github.com/toddyocum)). -* Added an ability to apply non-finished (not materialized by background process) mutations during the execution of `SELECT` queries immediately after submitting. It can be enabled by setting `apply_mutations_on_fly`. [#74877](https://github.com/ClickHouse/ClickHouse/pull/74877) ([Anton Popov](https://github.com/CurtizJ)). -* Fixed some previously unexpected cases when `toStartOfInterval` datetime arguments are negative. Done by implementing a new function called toStartOfIntervalAllowNegative, which does pretty much the same but returns only Date32/DateTime64. [#74933](https://github.com/ClickHouse/ClickHouse/pull/74933) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* A new function initialQueryStartTime has been added. It returns the start time of the current query. The value is the same across all shards during a distributed query. [#75087](https://github.com/ClickHouse/ClickHouse/pull/75087) ([Roman Lomonosov](https://github.com/lomik)). -* Introduce parametrized_view_parameters in system.tables. Closes https://github.com/clickhouse/clickhouse/issues/66756. [#75112](https://github.com/ClickHouse/ClickHouse/pull/75112) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)). -* Allow altering a database comment. Closes [#73351](https://github.com/ClickHouse/ClickHouse/issues/73351) ### Documentation entry for user-facing changes. [#75622](https://github.com/ClickHouse/ClickHouse/pull/75622) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)). -* Add ability to ATTACH tables without database layer (avoids UUID hack). [#75788](https://github.com/ClickHouse/ClickHouse/pull/75788) ([Azat Khuzhin](https://github.com/azat)). -* Added `concurrent_threads_scheduler` server setting that governs how CPU slots are distributed among concurrent queries. Could be set to `round_robin` (previous behavior) or `fair_round_robin` to address the issue of unfair CPU distribution between INSERTs and SELECTs. [#75949](https://github.com/ClickHouse/ClickHouse/pull/75949) ([Sergei Trifonov](https://github.com/serxa)). -* Restore QPL codec which has been removed in v24.10 due to licensing issues. [#76021](https://github.com/ClickHouse/ClickHouse/pull/76021) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Added function `arraySymmetricDifference`. It returns all elements from multiple array arguments which do not occur in all arguments. Example: `SELECT arraySymmetricDifference([1, 2], [2, 3])` returns `[1, 3]`. (issue [#61673](https://github.com/ClickHouse/ClickHouse/issues/61673)). [#76231](https://github.com/ClickHouse/ClickHouse/pull/76231) ([Filipp Abapolov](https://github.com/pheepa)). -* Add `estimatecompressionratio` aggregate function- see [#70801](https://github.com/ClickHouse/ClickHouse/issues/70801). [#76661](https://github.com/ClickHouse/ClickHouse/pull/76661) ([Tariq Almawash](https://github.com/talmawash)). -* `FilterTransformPassedRows` and `FilterTransformPassedBytes` profile events will show the number of rows and number of bytes filtered during the query execution. [#76662](https://github.com/ClickHouse/ClickHouse/pull/76662) ([Onkar Deshpande](https://github.com/onkar)). -* Added the keccak256 hash function, commonly used in blockchain implementations, especially in EVM-based systems. [#76669](https://github.com/ClickHouse/ClickHouse/pull/76669) ([Arnaud Briche](https://github.com/arnaudbriche)). -* Scram SHA256 & update postgres wire auth. [#76839](https://github.com/ClickHouse/ClickHouse/pull/76839) ([scanhex12](https://github.com/scanhex12)). -* The functionality adds the ability to define a list of headers that are forwarded from the headers of the client request to the external http authenticator. [#77054](https://github.com/ClickHouse/ClickHouse/pull/77054) ([inv2004](https://github.com/inv2004)). -* Support `IcebergMetadataFilesCache`, which will store manifest files/list and metadata.json in one cache. [#77156](https://github.com/ClickHouse/ClickHouse/pull/77156) ([Han Fei](https://github.com/hanfei1991)). -* Add functions `arrayLevenshteinDistance`, `arrayLevenshteinDistanceWeighted`, and `arraySimilarity`. [#77187](https://github.com/ClickHouse/ClickHouse/pull/77187) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Add three new functions. `icebergTruncate` according to specification. https://iceberg.apache.org/spec/#truncate-transform-details, `toYearNumSinceEpoch` and `toMonthNumSinceEpoch`. Support `truncate` transform in partition pruning for `Iceberg` engine. [#77403](https://github.com/ClickHouse/ClickHouse/pull/77403) ([alesapin](https://github.com/alesapin)). -* Allows a user to query the state of an Iceberg table as it existed at a previous point in time. [#77439](https://github.com/ClickHouse/ClickHouse/pull/77439) ([Daniil Ivanik](https://github.com/divanik)). -* Added CPU slot scheduling for workloads, see https://clickhouse.com/docs/operations/workload-scheduling#cpu_scheduling for details. [#77595](https://github.com/ClickHouse/ClickHouse/pull/77595) ([Sergei Trifonov](https://github.com/serxa)). -* The `hasAll()` function can now take advantage of the tokenbf_v1, ngrambf_v1 full-text skipping indices. [#77662](https://github.com/ClickHouse/ClickHouse/pull/77662) ([UnamedRus](https://github.com/UnamedRus)). -* `JSON` data type is production-ready. See https://jsonbench.com/. `Dynamic` and `Varaint` data types are production ready. [#77785](https://github.com/ClickHouse/ClickHouse/pull/77785) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Added an in-memory cache for deserialized vector similarity indexes. This should make repeated approximate nearest neighbor (ANN) search queries faster. The size of the new cache is controlled by server settings `vector_similarity_index_cache_size` and `vector_similarity_index_cache_max_entries`. This feature supersedes the skipping index cache feature of earlier releases. [#77905](https://github.com/ClickHouse/ClickHouse/pull/77905) ([Shankar Iyer](https://github.com/shankar-iyer)). -* Functions sparseGrams and sparseGramsHashes with UTF8 versions added. Author: [scanhex12](https://github.com/scanhex12). [#78176](https://github.com/ClickHouse/ClickHouse/pull/78176) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -* Introduce `toInterval` function. This function accepts 2 arguments (value and unit), and converts the value to a specific `Interval` type. [#78723](https://github.com/ClickHouse/ClickHouse/pull/78723) ([Andrew Davis](https://github.com/pulpdrew)). - -## Experimental features {#experimental-features} - -* Allow automatic cleanup merges of entire partitions after a configurable timeout with a new setting `enable_replacing_merge_with_cleanup_for_min_age_to_force_merge`. [#76440](https://github.com/ClickHouse/ClickHouse/pull/76440) ([Christoph Wurm](https://github.com/cwurm)). -* Add support [for Unity Catalog](https://www.databricks.com/product/unity-catalog) for DeltaLake tables on top of AWS S3 and local filesystem. [#76988](https://github.com/ClickHouse/ClickHouse/pull/76988) ([alesapin](https://github.com/alesapin)). -* Introduce experimental integration with AWS Glue service catalog for Iceberg tables. [#77257](https://github.com/ClickHouse/ClickHouse/pull/77257) ([alesapin](https://github.com/alesapin)). - -## Performance improvements {#performance-improvements} - -* Optimize performance with lazy projection to avoid reading unused columns. [#55518](https://github.com/ClickHouse/ClickHouse/pull/55518) ([Xiaozhe Yu](https://github.com/wudidapaopao)). -* Start to compare rows from most likely unequal columns first. [#63780](https://github.com/ClickHouse/ClickHouse/pull/63780) ([UnamedRus](https://github.com/UnamedRus)). -* Optimize RowBinary input format. Closes [#63805](https://github.com/ClickHouse/ClickHouse/issues/63805). [#65059](https://github.com/ClickHouse/ClickHouse/pull/65059) ([Pavel Kruglov](https://github.com/Avogar)). -* Speedup string deserialization by some low-level optimisation. [#65948](https://github.com/ClickHouse/ClickHouse/pull/65948) ([Nikita Taranov](https://github.com/nickitat)). -* Apply `preserve_most` attribute at some places in code. [#67778](https://github.com/ClickHouse/ClickHouse/pull/67778) ([Nikita Taranov](https://github.com/nickitat)). -* Implement query condition cache to improve query performance using repeated conditions. The range of the portion of data that does not meet the condition is remembered as a temporary index in memory. Subsequent queries will use this index. close [#67768](https://github.com/ClickHouse/ClickHouse/issues/67768) ### Documentation entry for user-facing changes. [#69236](https://github.com/ClickHouse/ClickHouse/pull/69236) ([zhongyuankai](https://github.com/zhongyuankai)). -* Support async io prefetch for `NativeORCBlockInputFormat`, which improves overall performance by hiding remote io latency. Speedup ratio could reach 1.47x in my test case. [#70534](https://github.com/ClickHouse/ClickHouse/pull/70534) ([李扬](https://github.com/taiyang-li)). -* Improve grace hash join performance by rerange the right join table by keys. [#72237](https://github.com/ClickHouse/ClickHouse/pull/72237) ([kevinyhzou](https://github.com/KevinyhZou)). -* Reintroduce respect `ttl_only_drop_parts` on `materialize ttl`; only read the necessary columns to recalculate TTL and drop parts by replacing them with an empty one. [#72751](https://github.com/ClickHouse/ClickHouse/pull/72751) ([Andrey Zvonov](https://github.com/zvonand)). -* Allow `arrayROCAUC` and `arrayAUCPR` to compute partial area of the whole curve, so that its calculation can be parallelized over huge datasets. [#72904](https://github.com/ClickHouse/ClickHouse/pull/72904) ([Emmanuel](https://github.com/emmanuelsdias)). -* Avoid spawn too many idle threads. [#72920](https://github.com/ClickHouse/ClickHouse/pull/72920) ([Guo Wangyang](https://github.com/guowangy)). -* Splitting of left table blocks by hash was removed from the probe phase of the `parallel_hash` JOIN algorithm. [#73089](https://github.com/ClickHouse/ClickHouse/pull/73089) ([Nikita Taranov](https://github.com/nickitat)). -* Don't list blob storage keys if we only have curly brackets expansion in table function. Closes [#73333](https://github.com/ClickHouse/ClickHouse/issues/73333). [#73518](https://github.com/ClickHouse/ClickHouse/pull/73518) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Replace Int256 and UInt256 with clang builtin i256 in arithmetic calculation according to tests in [#70502](https://github.com/ClickHouse/ClickHouse/issues/70502). [#73658](https://github.com/ClickHouse/ClickHouse/pull/73658) ([李扬](https://github.com/taiyang-li)). -* Adds a fast path for functions with all argument types is numeric. Fix performance issues in https://github.com/ClickHouse/ClickHouse/pull/72258. [#73820](https://github.com/ClickHouse/ClickHouse/pull/73820) ([李扬](https://github.com/taiyang-li)). -* Do not apply `maskedExecute` on non-function columns, improve the performance of short circuit execution. [#73965](https://github.com/ClickHouse/ClickHouse/pull/73965) ([lgbo](https://github.com/lgbo-ustc)). -* Disable header detection for Kafka/NATS/RabbitMQ/FileLog to improve performance. [#74006](https://github.com/ClickHouse/ClickHouse/pull/74006) ([Azat Khuzhin](https://github.com/azat)). -* Use log wrappers by value and don't allocate them in a heap. [#74034](https://github.com/ClickHouse/ClickHouse/pull/74034) ([Mikhail Artemenko](https://github.com/Michicosun)). -* Execute a pipeline with a higher degree of parallelism after aggregation with grouping sets. [#74082](https://github.com/ClickHouse/ClickHouse/pull/74082) ([Nikita Taranov](https://github.com/nickitat)). -* Reduce critical section in `MergeTreeReadPool`. [#74202](https://github.com/ClickHouse/ClickHouse/pull/74202) ([Guo Wangyang](https://github.com/guowangy)). -* Optimized function `indexHint`. Now, columns that are only used as arguments of function `indexHint` are not read from the table. [#74314](https://github.com/ClickHouse/ClickHouse/pull/74314) ([Anton Popov](https://github.com/CurtizJ)). -* Parallel replicas performance improvement. Packets deserialization on query initiator, for packets not related to parallel replicas protocol, now always happens in pipeline thread. Before, it could happen in a thread responsible for pipeline scheduling, which could make initiator less responsive and delay pipeline execution. [#74398](https://github.com/ClickHouse/ClickHouse/pull/74398) ([Igor Nikonov](https://github.com/devcrafter)). -* Fixed calculation of size in memory for `LowCardinality` columns. [#74688](https://github.com/ClickHouse/ClickHouse/pull/74688) ([Nikita Taranov](https://github.com/nickitat)). -* Improves the performance of whole JSON column reading in Wide parts from S3. It's done by adding prefetches for sub column prefixes deserialization, cache of deserialized prefixes and parallel deserialization of subcolumn prefixes. It improves reading of the JSON column from S3 4 times in query like `SELECT data FROM table` and about 10 times in query like `SELECT data FROM table LIMIT 10`. [#74827](https://github.com/ClickHouse/ClickHouse/pull/74827) ([Pavel Kruglov](https://github.com/Avogar)). -* Preallocate memory used by async inserts to improve performance. [#74945](https://github.com/ClickHouse/ClickHouse/pull/74945) ([Ilya Golshtein](https://github.com/ilejn)). -* Fixed double pre-allocation in `ConcurrentHashJoin` in case join sides are swapped by the optimizer. [#75149](https://github.com/ClickHouse/ClickHouse/pull/75149) ([Nikita Taranov](https://github.com/nickitat)). -* Fixed unnecessary contention in `parallel_hash` when `max_rows_in_join = max_bytes_in_join = 0`. [#75155](https://github.com/ClickHouse/ClickHouse/pull/75155) ([Nikita Taranov](https://github.com/nickitat)). -* Slight improvement in some join scenarios: precalculate number of output rows and reserve memory for them. [#75376](https://github.com/ClickHouse/ClickHouse/pull/75376) ([Alexander Gololobov](https://github.com/davenger)). -* `plain_rewritable` metadata files are small and do not need a large default buffer. Use a write buffer sized appropriately to fit the given path, improving memory utilization for a large number of active parts. ### Documentation entry for user-facing changes. [#75758](https://github.com/ClickHouse/ClickHouse/pull/75758) ([Julia Kartseva](https://github.com/jkartseva)). -* In some cases (e.g., empty array column) data parts can contain empty files. We can skip writing empty blobs to ObjectStorage and only store metadata for such files when the table resides on disk with separated metadata and object storages. [#75860](https://github.com/ClickHouse/ClickHouse/pull/75860) ([Alexander Gololobov](https://github.com/davenger)). -* It was discovered that concurrency control could lead to unfair CPU distribution between INSERTs and SELECTs. When all CPU slots are allocated unconditionally (w/o competition) to INSERTs with `max_threads` = 1 while SELECTs with high `max_threads` values suffer from poor performance due to using only a single thread. [#75941](https://github.com/ClickHouse/ClickHouse/pull/75941) ([Sergei Trifonov](https://github.com/serxa)). -* Trivial opt on wrapInNullable to avoid unnecessary null map allocation. [#76489](https://github.com/ClickHouse/ClickHouse/pull/76489) ([李扬](https://github.com/taiyang-li)). -* Improve min/max performance for Decimal32/Decimal64/DateTime64. [#76570](https://github.com/ClickHouse/ClickHouse/pull/76570) ([李扬](https://github.com/taiyang-li)). -* Actively evict data from the cache on parts removal. Do not let the cache grow to the maximum size if the amount of data is less. [#76641](https://github.com/ClickHouse/ClickHouse/pull/76641) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Query compilation (setting `compile_expressions`) now considers the machine type. This speeds up such queries significantly. [#76753](https://github.com/ClickHouse/ClickHouse/pull/76753) ([Robert Schulze](https://github.com/rschu1ze)). -* Optimize arraySort. [#76850](https://github.com/ClickHouse/ClickHouse/pull/76850) ([李扬](https://github.com/taiyang-li)). -* Speed-up building JOIN result by de-virtualizing calls to `col->insertFrom()`. [#77350](https://github.com/ClickHouse/ClickHouse/pull/77350) ([Alexander Gololobov](https://github.com/davenger)). -* Merge marks of the same part and write them to the query condition cache at one time to reduce the consumption of locks. [#77377](https://github.com/ClickHouse/ClickHouse/pull/77377) ([zhongyuankai](https://github.com/zhongyuankai)). -* Optimize order by single nullable or low-cardinality columns. [#77789](https://github.com/ClickHouse/ClickHouse/pull/77789) ([李扬](https://github.com/taiyang-li)). -* Disable `filesystem_cache_prefer_bigger_buffer_size` when the cache is used passively, such as for merges. [#77898](https://github.com/ClickHouse/ClickHouse/pull/77898) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Implement trivial count optimization for Iceberg. Now queries with `count()` and without any filters should be faster. Closes [#77639](https://github.com/ClickHouse/ClickHouse/issues/77639). [#78090](https://github.com/ClickHouse/ClickHouse/pull/78090) ([alesapin](https://github.com/alesapin)). -* Support Iceberg data pruning based on lower_bound and uppert_bound values for columns. Fixes [#77638](https://github.com/ClickHouse/ClickHouse/issues/77638). [#78242](https://github.com/ClickHouse/ClickHouse/pull/78242) ([alesapin](https://github.com/alesapin)). -* Optimize memory usage for NativeReader. [#78442](https://github.com/ClickHouse/ClickHouse/pull/78442) ([Azat Khuzhin](https://github.com/azat)). -* Trivial optimization: do not rewrite `count(if())` to countIf if `CAST` is required. Close [#78564](https://github.com/ClickHouse/ClickHouse/issues/78564). [#78565](https://github.com/ClickHouse/ClickHouse/pull/78565) ([李扬](https://github.com/taiyang-li)). - -## Improvements {#improvements} - -* Decrease the amount of Keeper requests by eliminating the use of single `get` requests, which could have caused a significant load on Keeper with the increased number of replicas, in places where `multiRead` is available. [#56862](https://github.com/ClickHouse/ClickHouse/pull/56862) ([Nikolay Degterinsky](https://github.com/evillique)). -* Add support for SSL authentication with named collections for MySQL. Closes [#59111](https://github.com/ClickHouse/ClickHouse/issues/59111). [#59452](https://github.com/ClickHouse/ClickHouse/pull/59452) ([Nikolay Degterinsky](https://github.com/evillique)). -* Improve new analyzer infrastructure performance via storing `ColumnPtr` instead of `Field` in the `ConstantNode`. Related to [#62245](https://github.com/ClickHouse/ClickHouse/issues/62245). [#63198](https://github.com/ClickHouse/ClickHouse/pull/63198) ([Dmitry Novik](https://github.com/novikd)). -* Reject queries when the server is overloaded. The decision is made based on the ratio of wait time (`OSCPUWaitMicroseconds`) to busy time (`OSCPUVirtualTimeMicroseconds`). The query is dropped with some probability, when this ratio is between `min_os_cpu_wait_time_ratio_to_throw` and `max_os_cpu_wait_time_ratio_to_throw` (those are query level settings). [#63206](https://github.com/ClickHouse/ClickHouse/pull/63206) ([Alexey Katsman](https://github.com/alexkats)). -* Drop blocks as early as possible to reduce the memory requirements. [#65647](https://github.com/ClickHouse/ClickHouse/pull/65647) ([lgbo](https://github.com/lgbo-ustc)). -* `processors_profile_log` table now has default configuration with TTL of 30 days. [#66139](https://github.com/ClickHouse/ClickHouse/pull/66139) ([Ilya Yatsishin](https://github.com/qoega)). -* Allow creating of a `bloom_filter` index on columns with datatype DateTime64. [#66416](https://github.com/ClickHouse/ClickHouse/pull/66416) ([Yutong Xiao](https://github.com/YutSean)). -* Introduce latency buckets and use them to track first byte read/write and connect times for S3 requests. That way we can later use gathered data to calculate approximate percentiles and adapt timeouts. [#69783](https://github.com/ClickHouse/ClickHouse/pull/69783) ([Alexey Katsman](https://github.com/alexkats)). -* Queries passed to `Executable` storage are no longer limited to single threaded execution. [#70084](https://github.com/ClickHouse/ClickHouse/pull/70084) ([yawnt](https://github.com/yawnt)). -* Added HTTP headers to OpenTelemetry span logs table for enhanced traceability. [#70516](https://github.com/ClickHouse/ClickHouse/pull/70516) ([jonymohajanGmail](https://github.com/jonymohajanGmail)). -* Support writing of orc files by custom time zone, not always by `GMT` time zone. [#70615](https://github.com/ClickHouse/ClickHouse/pull/70615) ([kevinyhzou](https://github.com/KevinyhZou)). -* Replace table functions with their `-Cluster` alternatives if parallel replicas are enabled. Fixes [#65024](https://github.com/ClickHouse/ClickHouse/issues/65024). [#70659](https://github.com/ClickHouse/ClickHouse/pull/70659) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Respect IO scheduling settings when writing backups across clouds. [#71093](https://github.com/ClickHouse/ClickHouse/pull/71093) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Reestablish connection to MySQL and Postgres dictionary replicas in the background so it wouldn't delay requests to corresponding dictionaries. [#71101](https://github.com/ClickHouse/ClickHouse/pull/71101) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Add metric alias name to system.asynchronous_metrics. [#71164](https://github.com/ClickHouse/ClickHouse/pull/71164) ([megao](https://github.com/jetgm)). -* Refreshes of refreshable materialized views now appear in `system.query_log`. [#71333](https://github.com/ClickHouse/ClickHouse/pull/71333) ([Michael Kolupaev](https://github.com/al13n321)). -* Evaluate parquet bloom filters and min/max indexes together. Necessary to properly support: `x = 3 or x > 5` where data = [1, 2, 4, 5]. [#71383](https://github.com/ClickHouse/ClickHouse/pull/71383) ([Arthur Passos](https://github.com/arthurpassos)). -* Interactive metrics improvements. Fix metrics from parallel replicas not being fully displayed. Display the metrics in order of the most recent update, then lexicographically by name. Do not display stale metrics. [#71631](https://github.com/ClickHouse/ClickHouse/pull/71631) ([Julia Kartseva](https://github.com/jkartseva)). -* Historically for some reason, the query `ALTER TABLE MOVE PARTITION TO TABLE` checked `SELECT` and `ALTER DELETE` rights instead of dedicated `ALTER_MOVE_PARTITION`. This PR makes use of this access type. For compatibility, this permission is also will be granted implicitly if `SELECT` and `ALTER DELETE` are granted, but this behavior will be removed in future releases. Closes [#16403](https://github.com/ClickHouse/ClickHouse/issues/16403). [#71632](https://github.com/ClickHouse/ClickHouse/pull/71632) ([pufit](https://github.com/pufit)). -* Enables setting `use_hive_partitioning` by default. [#71636](https://github.com/ClickHouse/ClickHouse/pull/71636) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Throw an exception when trying to materialize a column in the sort key instead of allowing it to break the sort order. Does not solve [#71777](https://github.com/ClickHouse/ClickHouse/issues/71777), though. [#71891](https://github.com/ClickHouse/ClickHouse/pull/71891) ([Peter Nguyen](https://github.com/petern48)). -* Allow more general join planning algorithm when hash join algorithm is enabled. [#71926](https://github.com/ClickHouse/ClickHouse/pull/71926) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Hide secrets in `EXPLAIN QUERY TREE`. [#72025](https://github.com/ClickHouse/ClickHouse/pull/72025) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Allow use of a configurable disk to store metadata files of databases and tables. The disk name can be set via `database_disk.disk` config parameter. [#72027](https://github.com/ClickHouse/ClickHouse/pull/72027) ([Tuan Pham Anh](https://github.com/tuanpach)). -* Support parquet integer logical types on native reader. [#72105](https://github.com/ClickHouse/ClickHouse/pull/72105) ([Arthur Passos](https://github.com/arthurpassos)). -* Make JSON output format pretty by default. Add new setting `output_format_json_pretty_print` to control it and enable it by default. [#72148](https://github.com/ClickHouse/ClickHouse/pull/72148) ([Pavel Kruglov](https://github.com/Avogar)). -* Interactively request credentials in the browser if the default user requires a password. In previous versions, the server returned HTTP 403; now, it returns HTTP 401. [#72198](https://github.com/ClickHouse/ClickHouse/pull/72198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* This PR converts access types `CREATE_USER`, `ALTER_USER`, `DROP_USER`, `CREATE_ROLE`, `ALTER_ROLE`, `DROP_ROLE` from global to parameterized. That means users can now grant access management grants more precise:. [#72246](https://github.com/ClickHouse/ClickHouse/pull/72246) ([pufit](https://github.com/pufit)). -* Allow to shard names in cluster configuration. [#72276](https://github.com/ClickHouse/ClickHouse/pull/72276) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -* Support CAST and ALTER between JSON types with different parameters. [#72303](https://github.com/ClickHouse/ClickHouse/pull/72303) ([Pavel Kruglov](https://github.com/Avogar)). -* Add the `latest_fail_error_code_name` column to `system.mutations`. We need this column to introduce a new metric on stuck mutations and use it to build graphs of the errors encountered in the cloud as well as, optionally, adding a new less-noisy alert. [#72398](https://github.com/ClickHouse/ClickHouse/pull/72398) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). -* Reduce the amount of allocation in attaching of partitions. [#72583](https://github.com/ClickHouse/ClickHouse/pull/72583) ([Konstantin Morozov](https://github.com/k-morozov)). -* Make max_bytes_before_external_sort limit depends on total query memory consumption (previously it was number of bytes in the sorting block for one sorting thread, now it has the same meaning as `max_bytes_before_external_group_by` - it is total limit for the whole query memory for all threads). Also one more setting added to control on disk block size - `min_external_sort_block_bytes`. [#72598](https://github.com/ClickHouse/ClickHouse/pull/72598) ([Azat Khuzhin](https://github.com/azat)). -* Ignore memory restrictions by trace collector. [#72606](https://github.com/ClickHouse/ClickHouse/pull/72606) ([Azat Khuzhin](https://github.com/azat)). -* Support subcolumns in MergeTree sorting key and skip indexes. [#72644](https://github.com/ClickHouse/ClickHouse/pull/72644) ([Pavel Kruglov](https://github.com/Avogar)). -* Add server settings `dictionaries_lazy_load` and `wait_dictionaries_load_at_startup` to `system.server_settings`. [#72664](https://github.com/ClickHouse/ClickHouse/pull/72664) ([Christoph Wurm](https://github.com/cwurm)). -* Adds setting `max_backup_bandwidth` to the list of settings that can be specified as part of `BACKUP`/`RESTORE` queries. [#72665](https://github.com/ClickHouse/ClickHouse/pull/72665) ([Christoph Wurm](https://github.com/cwurm)). -* Parallel replicas used historical information about replica availability to improve replica selection but did not update the replica's error count when the connection was unavailable. This PR updates the replica's error count when unavailable. [#72666](https://github.com/ClickHouse/ClickHouse/pull/72666) ([zoomxi](https://github.com/zoomxi)). -* Reducing the log level for appearing replicated parts in the ReplicatedMergeTree engine to help minimize the volume of logs generated in a replicated cluster. [#72876](https://github.com/ClickHouse/ClickHouse/pull/72876) ([mor-akamai](https://github.com/morkalfon)). -* A lot of new features will require better code incapsulation (what relates to Iceberg metadata) and better code abstractions. [#72941](https://github.com/ClickHouse/ClickHouse/pull/72941) ([Daniil Ivanik](https://github.com/divanik)). -* Support equal comparison for values of JSON column. [#72991](https://github.com/ClickHouse/ClickHouse/pull/72991) ([Pavel Kruglov](https://github.com/Avogar)). -* Improve formatting of identifiers with JSON subcolumns to avoid unnecessary back quotes. [#73085](https://github.com/ClickHouse/ClickHouse/pull/73085) ([Pavel Kruglov](https://github.com/Avogar)). -* Log `PREWHERE` conditions with `Test` level. [#73116](https://github.com/ClickHouse/ClickHouse/pull/73116) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Support SETTINGS with implicit ENGINE and mixing engine and query settings. [#73120](https://github.com/ClickHouse/ClickHouse/pull/73120) ([Raúl Marín](https://github.com/Algunenano)). -* Write parts with level 1 if `optimize_on_insert` is enabled. It allows to use several optimizations of queries with `FINAL` for freshly written parts. [#73132](https://github.com/ClickHouse/ClickHouse/pull/73132) ([Anton Popov](https://github.com/CurtizJ)). -* For a query like, `WHERE a[...]`, and 3. also in the configuration file, via per-connection settings `[...]`. [#74168](https://github.com/ClickHouse/ClickHouse/pull/74168) ([Christoph Wurm](https://github.com/cwurm)). -* Change prometheus remote write response success status from 200/OK to 204/NoContent. [#74170](https://github.com/ClickHouse/ClickHouse/pull/74170) ([Michael Dempsey](https://github.com/bluestealth)). -* Expose X-ClickHouse HTTP headers to JavaScript in the browser. It makes writing applications more convenient. [#74180](https://github.com/ClickHouse/ClickHouse/pull/74180) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* The `JSONEachRowWithProgress` format will include events with metadata, as well as totals and extremes. It also includes `rows_before_limit_at_least` and `rows_before_aggregation`. The format prints the exception properly if it arrives after partial results. The progress now includes elapsed nanoseconds. One final progress event is emitted at the end. The progress during query runtime will be printed no more frequently than the value of the `interactive_delay` setting. [#74181](https://github.com/ClickHouse/ClickHouse/pull/74181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Hourglass will rotate smoothly in Play UI. [#74182](https://github.com/ClickHouse/ClickHouse/pull/74182) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Even if the HTTP response is compressed, send packets as soon as they arrive. This allows the browser to receive progress packets and compressed data. [#74201](https://github.com/ClickHouse/ClickHouse/pull/74201) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add ability to reload `max_remote_read_network_bandwidth_for_serve` and `max_remote_write_network_bandwidth_for_server` on fly without restart server. [#74206](https://github.com/ClickHouse/ClickHouse/pull/74206) ([Kai Zhu](https://github.com/nauu)). -* Autodetect secure connection based on connecting to port 9440 in ClickHouse Client. [#74212](https://github.com/ClickHouse/ClickHouse/pull/74212) ([Christoph Wurm](https://github.com/cwurm)). -* Authenticate users with username only for http_handlers (previously it requires user to put the password as well). [#74221](https://github.com/ClickHouse/ClickHouse/pull/74221) ([Azat Khuzhin](https://github.com/azat)). -* Support for the alternative query languages PRQL and KQL was marked experimental. To use them, specify settings `allow_experimental_prql_dialect = 1` and `allow_experimental_kusto_dialect = 1`. [#74224](https://github.com/ClickHouse/ClickHouse/pull/74224) ([Robert Schulze](https://github.com/rschu1ze)). -* Support returning the default Enum type in more aggregate functions. [#74272](https://github.com/ClickHouse/ClickHouse/pull/74272) ([Raúl Marín](https://github.com/Algunenano)). -* In `OPTIMIZE TABLE`, it is now possible to specify keyword `FORCE` as an alternative to existing keyword `FINAL`. [#74342](https://github.com/ClickHouse/ClickHouse/pull/74342) ([Robert Schulze](https://github.com/rschu1ze)). -* Added a merge tree setting `materialize_skip_indexes_on_merge` which suppresses the creation of skip indexes during merge. This allows users to control explicitly (via `ALTER TABLE [..] MATERIALIZE INDEX [...]`) when skip indexes are created. This can be useful if skip indexes are expensive to build (e.g. vector similarity indexes). [#74401](https://github.com/ClickHouse/ClickHouse/pull/74401) ([Robert Schulze](https://github.com/rschu1ze)). -* Support subcolumns in default and materialized expressions. [#74403](https://github.com/ClickHouse/ClickHouse/pull/74403) ([Pavel Kruglov](https://github.com/Avogar)). -* Optimize keeper requests in Storage(S3/Azure)Queue. [#74410](https://github.com/ClickHouse/ClickHouse/pull/74410) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Add the IsServerShuttingDown metric, which is needed to trigger an alert when the server shutdown takes too much time. [#74429](https://github.com/ClickHouse/ClickHouse/pull/74429) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). -* Added iceberg tables names to EXPLAIN. [#74485](https://github.com/ClickHouse/ClickHouse/pull/74485) ([alekseev-maksim](https://github.com/alekseev-maksim)). -* Use up to `1000` parallel replicas by default. [#74504](https://github.com/ClickHouse/ClickHouse/pull/74504) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Provide a better error message when using RECURSIVE CTE with the old analyzer. [#74523](https://github.com/ClickHouse/ClickHouse/pull/74523) ([Raúl Marín](https://github.com/Algunenano)). -* Optimize keeper requests in Storage(S3/Azure)Queue. [#74538](https://github.com/ClickHouse/ClickHouse/pull/74538) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Improve HTTP session reuse when reading from s3 disk ([#72401](https://github.com/ClickHouse/ClickHouse/issues/72401)). [#74548](https://github.com/ClickHouse/ClickHouse/pull/74548) ([Julian Maicher](https://github.com/jmaicher)). -* Show extended error messages in `system.errors`. [#74574](https://github.com/ClickHouse/ClickHouse/pull/74574) ([Vitaly Baranov](https://github.com/vitlibar)). -* Enabled a backoff logic for all types of replicated tasks. It will provide the ability to reduce CPU usage, memory usage, and log file sizes. Added new settings `max_postpone_time_for_failed_replicated_fetches_ms`, `max_postpone_time_for_failed_replicated_merges_ms` and `max_postpone_time_for_failed_replicated_tasks_ms` which are similar to `max_postpone_time_for_failed_mutations_ms`. [#74576](https://github.com/ClickHouse/ClickHouse/pull/74576) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -* More accurate accounting for `max_joined_block_size_rows` setting for `parallel_hash` JOIN algorithm. Helps to avoid increased memory consumption compared to `hash` algorithm. [#74630](https://github.com/ClickHouse/ClickHouse/pull/74630) ([Nikita Taranov](https://github.com/nickitat)). -* Added `dfs.client.use.datanode.hostname` libhdfs3 config option support. [#74635](https://github.com/ClickHouse/ClickHouse/pull/74635) ([Mikhail Tiukavkin](https://github.com/freshertm)). -* Fixes Invalid: Codec 'snappy' doesn't support setting a compression level. [#74659](https://github.com/ClickHouse/ClickHouse/pull/74659) ([Arthur Passos](https://github.com/arthurpassos)). -* Allow using password for client communication with clickhouse-keeper. This feature is not very useful if you specify proper SSL configuration for server and client, but still can be useful for some cases. Password cannot be longer than 16 characters. It's not connected with Keeper Auth model. [#74673](https://github.com/ClickHouse/ClickHouse/pull/74673) ([alesapin](https://github.com/alesapin)). -* Allow using blob paths to calculate checksums while making a backup. [#74729](https://github.com/ClickHouse/ClickHouse/pull/74729) ([Vitaly Baranov](https://github.com/vitlibar)). -* Use dynamic sharding for JOIN if the JOIN key is a prefix of PK for both parts. This optimization is enabled with `query_plan_join_shard_by_pk_ranges` setting (disabled by default). [#74733](https://github.com/ClickHouse/ClickHouse/pull/74733) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Add error code for config reloader. [#74746](https://github.com/ClickHouse/ClickHouse/pull/74746) ([Garrett Thomas](https://github.com/garrettthomaskth)). -* Added support for IPv6 addresses in MySQL and PostgreSQL table functions and engines. [#74796](https://github.com/ClickHouse/ClickHouse/pull/74796) ([Mikhail Koviazin](https://github.com/mkmkme)). -* Parameters for the codec Gorilla will now always be saved in the table metadata in .sql file. This closes: [#70072](https://github.com/ClickHouse/ClickHouse/issues/70072). [#74814](https://github.com/ClickHouse/ClickHouse/pull/74814) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Implement short circuit optimization for `divideDecimal`. Fixes [#74280](https://github.com/ClickHouse/ClickHouse/issues/74280). [#74843](https://github.com/ClickHouse/ClickHouse/pull/74843) ([Kevin Mingtarja](https://github.com/kevinmingtarja)). -* Improve performance of larger multi requests in Keeper. [#74849](https://github.com/ClickHouse/ClickHouse/pull/74849) ([Antonio Andelic](https://github.com/antonio2368)). -* Now users can be specified inside the startup scripts. [#74894](https://github.com/ClickHouse/ClickHouse/pull/74894) ([pufit](https://github.com/pufit)). -* Fetch parts in parallel in ALTER TABLE FETCH PARTITION (thread pool size is controlled with `max_fetch_partition_thread_pool_size`). [#74978](https://github.com/ClickHouse/ClickHouse/pull/74978) ([Azat Khuzhin](https://github.com/azat)). -* Added a query ID column to `system.query_cache` (issue [#68205](https://github.com/ClickHouse/ClickHouse/issues/68205)). [#74982](https://github.com/ClickHouse/ClickHouse/pull/74982) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)). -* Enabled SSH protocol back. Fixed some critical vulnerabilities so that it is no longer possible to use custom pager or specify `server-logs-file`. Disabled the ability to pass client options through the environment variables by default (it is still possible via `ssh-server.enable_client_options_passing` in config.xml). Supported progress table, query cancellation, completion, profile events progress, stdin and `send_logs_level` option. This closes: [#74340](https://github.com/ClickHouse/ClickHouse/issues/74340). [#74989](https://github.com/ClickHouse/ClickHouse/pull/74989) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Fix formatting of exceptions using a custom format if they appear during query interpretation. In previous versions, exceptions were formatted using the default format rather than the format specified in the query. This closes [#55422](https://github.com/ClickHouse/ClickHouse/issues/55422). [#74994](https://github.com/ClickHouse/ClickHouse/pull/74994) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Implemented parsing enhancements (Sequence ID parsing: Added functionality to parse sequence identifiers in manifest files AND Avro metadata parsing: Redesigned the Avro metadata parser to be easily extendable for future enhancements). [#75010](https://github.com/ClickHouse/ClickHouse/pull/75010) ([Daniil Ivanik](https://github.com/divanik)). -* It is allowed to cancel `ALTER TABLE ... FREEZE ...` queries with `KILL QUERY` and timeout(`max_execution_time`). [#75016](https://github.com/ClickHouse/ClickHouse/pull/75016) ([Kirill](https://github.com/kirillgarbar)). -* Add support for `groupUniqArrayArrayMap` as `SimpleAggregateFunction`. [#75034](https://github.com/ClickHouse/ClickHouse/pull/75034) ([Miel Donkers](https://github.com/mdonkers)). -* Support prepared statements in postgres wire protocol. [#75035](https://github.com/ClickHouse/ClickHouse/pull/75035) ([scanhex12](https://github.com/scanhex12)). -* Hide catalog credential settings in database engine `Iceberg`. Closes [#74559](https://github.com/ClickHouse/ClickHouse/issues/74559). [#75080](https://github.com/ClickHouse/ClickHouse/pull/75080) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Added a few missing features into BuzzHouse: `ILIKE` and `REGEXP` operators, `<=>` and `IS NOT DISTINCT FROM`. [#75168](https://github.com/ClickHouse/ClickHouse/pull/75168) ([Pedro Ferreira](https://github.com/PedroTadim)). -* The setting `min_chunk_bytes_for_parallel_parsing` cannot be zero anymore. This fixes: [#71110](https://github.com/ClickHouse/ClickHouse/issues/71110). [#75239](https://github.com/ClickHouse/ClickHouse/pull/75239) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* `intExp2` / `intExp10`: Define undefined behaviour: return 0 for too small argument, `18446744073709551615` for too big argument, throw exception if `nan`. [#75312](https://github.com/ClickHouse/ClickHouse/pull/75312) ([Vitaly Baranov](https://github.com/vitlibar)). -* Support `s3.endpoint` natively from catalog config in `DatabaseIceberg`. Closes [#74558](https://github.com/ClickHouse/ClickHouse/issues/74558). [#75375](https://github.com/ClickHouse/ClickHouse/pull/75375) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Don't fail silently if user executing `SYSTEM DROP REPLICA` doesn't have enough permissions. [#75377](https://github.com/ClickHouse/ClickHouse/pull/75377) ([Bharat Nallan](https://github.com/bharatnc)). -* Add a ProfileEvent about the number of times any of system logs has failed to flush. [#75466](https://github.com/ClickHouse/ClickHouse/pull/75466) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add check and logging for decrypting and decompressing. [#75471](https://github.com/ClickHouse/ClickHouse/pull/75471) ([Vitaly Baranov](https://github.com/vitlibar)). -* Added support for the micro sign (U+00B5) in the `parseTimeDelta` function. Now both the micro sign (U+00B5) and the Greek letter mu (U+03BC) are recognized as valid representations for microseconds, aligning ClickHouse's behavior with Go’s implementation ([see time.go](https://github.com/golang/go/blob/ad7b46ee4ac1cee5095d64b01e8cf7fcda8bee5e/src/time/time.go#L983C19-L983C20) and [time/format.go](https://github.com/golang/go/blob/ad7b46ee4ac1cee5095d64b01e8cf7fcda8bee5e/src/time/format.go#L1608-L1609)). [#75472](https://github.com/ClickHouse/ClickHouse/pull/75472) ([Vitaly Orlov](https://github.com/orloffv)). -* Replace server setting (`send_settings_to_client`) with client setting (`apply_settings_from_server`) that controls whether client-side code (e.g. parsing INSERT data and formatting query output) should use settings from server's `users.xml` and user profile. Otherwise only settings from client command line, session, and the query are used. Note that this only applies to native client (not e.g. HTTP), and doesn't apply to most of query processing (which happens on the server). [#75478](https://github.com/ClickHouse/ClickHouse/pull/75478) ([Michael Kolupaev](https://github.com/al13n321)). -* Keeper improvement: disable digest calculation when committing to in-memory storage for better performance. It can be enabled with `keeper_server.digest_enabled_on_commit` config. Digest is still calculated when preprocessing requests. [#75490](https://github.com/ClickHouse/ClickHouse/pull/75490) ([Antonio Andelic](https://github.com/antonio2368)). -* Push down filter expression from JOIN ON when possible. [#75536](https://github.com/ClickHouse/ClickHouse/pull/75536) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Better error messages at syntax errors. Previously, if the query was too large, and the token whose length exceeds the limit is a very large string literal, the message about the reason was lost in the middle of two examples of this very long token. Fix the issue when a query with UTF-8 was cut incorrectly in the error message. Fix excessive quoting of query fragments. This closes [#75473](https://github.com/ClickHouse/ClickHouse/issues/75473). [#75561](https://github.com/ClickHouse/ClickHouse/pull/75561) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add profile events in storage `S3(Azure)Queue`. [#75618](https://github.com/ClickHouse/ClickHouse/pull/75618) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Disable sending settings from server to client (`send_settings_to_client=false`) for compatibility (This feature will be re-implemented as client setting later for better usability). [#75648](https://github.com/ClickHouse/ClickHouse/pull/75648) ([Michael Kolupaev](https://github.com/al13n321)). -* Add a config `memory_worker_correct_memory_tracker` to enable correction of internal memory tracker with information from different source read in the background thread periodically. [#75714](https://github.com/ClickHouse/ClickHouse/pull/75714) ([Antonio Andelic](https://github.com/antonio2368)). -* Use Analyzer in PrometheusRemoteReadProtocol. [#75729](https://github.com/ClickHouse/ClickHouse/pull/75729) ([Dmitry Novik](https://github.com/novikd)). -* We have support for gauge/counter metric types. However, they are insufficient for some metrics (e.g., the response times of requests to the keeper), so support for the histogram metric type is needed. The interface closely mirrors the Prometheus client, where you simply call `observe(value)` to increment the counter in the bucket corresponding to the value. The histogram metrics are exposed via system.histogram_metrics. [#75736](https://github.com/ClickHouse/ClickHouse/pull/75736) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). -* Add column `normalized_query_hash` into `system.processes`. Note: while it can be easily calculated on the fly with the `normalizedQueryHash` function, this is needed to prepare for subsequent changes. [#75756](https://github.com/ClickHouse/ClickHouse/pull/75756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Querying `system.tables` will not throw even if there is a `Merge` table created over a database that no longer exists. Remove the `getTotalRows` method from `Hive` tables, because we don't allow it to do complex work. [#75772](https://github.com/ClickHouse/ClickHouse/pull/75772) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Web UI now has interactive database navigation. [#75777](https://github.com/ClickHouse/ClickHouse/pull/75777) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Allow to combine read-only and read-write disks in storage policy (as multiple volumes, or multiple disks). This allows to read data from the entire volume, while inserts will prefer the writable disk (i.e. Copy-on-Write storage policy). [#75862](https://github.com/ClickHouse/ClickHouse/pull/75862) ([Azat Khuzhin](https://github.com/azat)). -* Remove trace_id from default ORDER BY for system.opentelemetry_span_log. [#75907](https://github.com/ClickHouse/ClickHouse/pull/75907) ([Azat Khuzhin](https://github.com/azat)). -* Encryption (XML attribute `encrypted_by`) can now be applied to any configuration file (config.xml, users.xml, nested configuration files). Previously, it worked only for the top-level config.xml file. [#75911](https://github.com/ClickHouse/ClickHouse/pull/75911) ([Mikhail Gorshkov](https://github.com/mgorshkov)). -* Store start_time/end_time for Backups with microseconds. [#75929](https://github.com/ClickHouse/ClickHouse/pull/75929) ([Aleksandr Musorin](https://github.com/AVMusorin)). -* Add `MemoryTrackingUncorrected` metric showing value of internal global memory tracker which is not corrected by RSS. [#75935](https://github.com/ClickHouse/ClickHouse/pull/75935) ([Antonio Andelic](https://github.com/antonio2368)). -* Calculate columns and indices sizes lazily in MergeTree. [#75938](https://github.com/ClickHouse/ClickHouse/pull/75938) ([Pavel Kruglov](https://github.com/Avogar)). -* Convert join to in subquery if output column is tied to the left table, need a uniqueness step at first, so disabled by default until the step is added later. [#75942](https://github.com/ClickHouse/ClickHouse/pull/75942) ([Shichao Jin](https://github.com/jsc0218)). -* Added a server setting `throw_on_unknown_workload` that allows to choose behavior on query with `workload` setting set to unknown value: either allow unlimited access (default) or throw a `RESOURCE_ACCESS_DENIED` error. It is useful to force all queries to use workload scheduling. [#75999](https://github.com/ClickHouse/ClickHouse/pull/75999) ([Sergei Trifonov](https://github.com/serxa)). -* Make the new, experimental Kafka table engine fully respect Keeper feature flags. [#76004](https://github.com/ClickHouse/ClickHouse/pull/76004) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Don't rewrite subcolumns to getSubcolumn in ARRAY JOIN if not necessary. [#76018](https://github.com/ClickHouse/ClickHouse/pull/76018) ([Pavel Kruglov](https://github.com/Avogar)). -* Retry coordination errors when loading tables. [#76020](https://github.com/ClickHouse/ClickHouse/pull/76020) ([Alexander Tokmakov](https://github.com/tavplubix)). -* Improve the `system.warnings` table and add some dynamic warning messages that can be added, updated or removed. [#76029](https://github.com/ClickHouse/ClickHouse/pull/76029) ([Bharat Nallan](https://github.com/bharatnc)). -* Support flushing individual logs in SYSTEM FLUSH LOGS. [#76132](https://github.com/ClickHouse/ClickHouse/pull/76132) ([Raúl Marín](https://github.com/Algunenano)). -* Improved the `/binary` server's page. Using the Hilbert curve instead of the Morton curve. Display 512 MB worth of addresses in the square, which fills the square better (in previous versions, addresses fill only half of the square). Color addresses closer to the library name rather than the function name. Allow scrolling a bit more outside of the area. [#76192](https://github.com/ClickHouse/ClickHouse/pull/76192) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* This PR makes it impossible to run a query `ALTER USER user1 ADD PROFILES a, DROP ALL PROFILES` because all `DROP` operations should come first in the order. [#76242](https://github.com/ClickHouse/ClickHouse/pull/76242) ([pufit](https://github.com/pufit)). -* Various enhancements for SYNC REPLICA (better error messages, better tests, sanity checks). [#76307](https://github.com/ClickHouse/ClickHouse/pull/76307) ([Azat Khuzhin](https://github.com/azat)). -* Retry ON CLUSTER queries in case of TOO_MANY_SIMULTANEOUS_QUERIES. [#76352](https://github.com/ClickHouse/ClickHouse/pull/76352) ([Patrick Galbraith](https://github.com/CaptTofu)). -* Changed the default value of `output_format_pretty_max_rows` from 10000 to 1000. I think it is better for usability. [#76407](https://github.com/ClickHouse/ClickHouse/pull/76407) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Support for a refresh in readonly MergeTree tables. [#76467](https://github.com/ClickHouse/ClickHouse/pull/76467) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Use correct fallback when multipart copy to S3 fails during backup with Access Denied. Multi part copy can generate Access Denied error when backup is done between buckets that have different credentials. [#76515](https://github.com/ClickHouse/ClickHouse/pull/76515) ([Antonio Andelic](https://github.com/antonio2368)). -* Faster ClickHouse Servers shutdown (get rid of 2.5sec delay). [#76550](https://github.com/ClickHouse/ClickHouse/pull/76550) ([Azat Khuzhin](https://github.com/azat)). -* Add query_id to system.errors. Related ticket [#75815](https://github.com/ClickHouse/ClickHouse/issues/75815). [#76581](https://github.com/ClickHouse/ClickHouse/pull/76581) ([Vladimir Baikov](https://github.com/bkvvldmr)). -* Upgraded librdkafka to version 2.8.0 and improved the shutdown sequence for Kafka tables, reducing delays during table drops and server restarts. The `engine=Kafka` no longer explicitly leaves the consumer group when a table is dropped. Instead, the consumer remains in the group until it is automatically removed after `session_timeout_ms` (default: 45 seconds) of inactivity. [#76621](https://github.com/ClickHouse/ClickHouse/pull/76621) ([filimonov](https://github.com/filimonov)). -* Fix validation of s3 request settings. [#76658](https://github.com/ClickHouse/ClickHouse/pull/76658) ([Vitaly Baranov](https://github.com/vitlibar)). -* Avoid excess allocation in readbufferfroms3 and other remote reading buffers, reduce their memory consumption in half. [#76692](https://github.com/ClickHouse/ClickHouse/pull/76692) ([Sema Checherinda](https://github.com/CheSema)). -* Support JSON type and subcolumns reading from View. [#76903](https://github.com/ClickHouse/ClickHouse/pull/76903) ([Pavel Kruglov](https://github.com/Avogar)). -* Adding Support for Converting UInt128 to IPv6. This allows the `bitAnd` operation and arithmatics for IPv6 and conversion back to IPv6. Closes [#76752](https://github.com/ClickHouse/ClickHouse/issues/76752). This allows the result from `bitAnd` operation on IPv6 to be converted back to IPv6, as well. See: https://github.com/ClickHouse/ClickHouse/pull/57707. [#76928](https://github.com/ClickHouse/ClickHouse/pull/76928) ([Muzammil Abdul Rehman](https://github.com/muzammilar)). -* System tables like `server_settings` or `settings` have a `default` value column which is convenient. only `merge_tree_settings` and `replicated_merge_tree_settings` do not have that column enabled. [#76942](https://github.com/ClickHouse/ClickHouse/pull/76942) ([Diego Nieto](https://github.com/lesandie)). -* Don't parse special Bool values in text formats inside Variant type by default. It can be enabled using setting `allow_special_bool_values_inside_variant`. [#76974](https://github.com/ClickHouse/ClickHouse/pull/76974) ([Pavel Kruglov](https://github.com/Avogar)). -* Support configurable per task waiting time of low priority query in session level and in server level. [#77013](https://github.com/ClickHouse/ClickHouse/pull/77013) ([VicoWu](https://github.com/VicoWu)). -* Added `ProfileEvents::QueryPreempted`, which has the same logic as `CurrentMetrics::QueryPreempted`. [#77015](https://github.com/ClickHouse/ClickHouse/pull/77015) ([VicoWu](https://github.com/VicoWu)). -* Previously database replicated might print credentials specified in a query to logs. This behaviour is fixed. This closes: [#77123](https://github.com/ClickHouse/ClickHouse/issues/77123). [#77133](https://github.com/ClickHouse/ClickHouse/pull/77133) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Bump zstd from 1.5.5 to 1.5.7 which has pretty [good performance improvements](https://github.com/facebook/zstd/releases/tag/v1.5.7). [#77137](https://github.com/ClickHouse/ClickHouse/pull/77137) ([Pradeep Chhetri](https://github.com/chhetripradeep)). -* Allow ALTER TABLE DROP PARTITION for plain_rewritable disk. [#77138](https://github.com/ClickHouse/ClickHouse/pull/77138) ([Julia Kartseva](https://github.com/jkartseva)). -* Add the ability to randomly sleep up to 500ms independent of part sizes before merges/mutations execution in case of zero-copy replication. [#77165](https://github.com/ClickHouse/ClickHouse/pull/77165) ([Alexey Katsman](https://github.com/alexkats)). -* Support atomic rename when `TRUNCATE` is used with `INTO OUTFILE`. Resolves [#70323](https://github.com/ClickHouse/ClickHouse/issues/70323). [#77181](https://github.com/ClickHouse/ClickHouse/pull/77181) ([Onkar Deshpande](https://github.com/onkar)). -* Use FixedString for PostgreSQL's CHARACTER, CHAR and BPCHAR. [#77304](https://github.com/ClickHouse/ClickHouse/pull/77304) ([Pablo Marcos](https://github.com/pamarcos)). -* Allow to explicitly specify metadata file to read for Iceberg with storage/table function setting `iceberg_metadata_file_path `. Fixes [#47412](https://github.com/ClickHouse/ClickHouse/issues/47412). [#77318](https://github.com/ClickHouse/ClickHouse/pull/77318) ([alesapin](https://github.com/alesapin)). -* Support using a remote disk for databases to store metadata files. [#77365](https://github.com/ClickHouse/ClickHouse/pull/77365) ([Tuan Pham Anh](https://github.com/tuanpach)). -* Implement comparison for values of JSON data type. Now JSON objects can be compared similarly to Maps. [#77397](https://github.com/ClickHouse/ClickHouse/pull/77397) ([Pavel Kruglov](https://github.com/Avogar)). -* Change reverted. [#77399](https://github.com/ClickHouse/ClickHouse/pull/77399) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Backup/restore setting `allow_s3_native_copy` now supports value three possible values: - `False` - s3 native copy will not be used; - `True` (old default) - ClickHouse will try s3 native copy first, if it fails then fallback to the reading+writing approach; - `'auto'` (new default) - ClickHouse will compare the source and destination credentials first. If they are same, ClickHouse will try s3 native copy and then may fallback to the reading+writing approach. If they are different, ClickHouse will go directly to the reading+writing approach. [#77401](https://github.com/ClickHouse/ClickHouse/pull/77401) ([Vitaly Baranov](https://github.com/vitlibar)). -* Support ALTER TABLE ... ATTACH|DETACH|MOVE|REPLACE PARTITION for the plain_rewritable disk. [#77406](https://github.com/ClickHouse/ClickHouse/pull/77406) ([Julia Kartseva](https://github.com/jkartseva)). -* Skipping index cache is reverted. [#77447](https://github.com/ClickHouse/ClickHouse/pull/77447) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Reduce memory usage during prefetches of JSON column in Wide parts. [#77640](https://github.com/ClickHouse/ClickHouse/pull/77640) ([Pavel Kruglov](https://github.com/Avogar)). -* Support aws session token and environment credentials usage in delta kernel for DeltaLake table engine. [#77661](https://github.com/ClickHouse/ClickHouse/pull/77661) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Support query parameters inside `additional_table_filters` setting. After the change, the following query would succeed:. [#77680](https://github.com/ClickHouse/ClickHouse/pull/77680) ([wxybear](https://github.com/wxybear)). -* User-defined functions (UDFs) can now be marked as deterministic via a new tag in their XML definition. Also, the query cache now checks if UDFs called within a query are deterministic. If this is the case, it caches the query result. (Issue [#59988](https://github.com/ClickHouse/ClickHouse/issues/59988)). [#77769](https://github.com/ClickHouse/ClickHouse/pull/77769) ([Jimmy Aguilar Mena](https://github.com/Ergus)). -* Added Buffer table engine parameters validation. [#77840](https://github.com/ClickHouse/ClickHouse/pull/77840) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -* Add config `enable_hdfs_pread` to enable or disable hdfs pread. [#77885](https://github.com/ClickHouse/ClickHouse/pull/77885) ([kevinyhzou](https://github.com/KevinyhZou)). -* Add profile events for number of zookeeper 'multi' read and write requests. [#77888](https://github.com/ClickHouse/ClickHouse/pull/77888) ([JackyWoo](https://github.com/JackyWoo)). -* Allow creating and inserting into temp table when disable_insertion_and_mutation is on. [#77901](https://github.com/ClickHouse/ClickHouse/pull/77901) ([Xu Jia](https://github.com/XuJia0210)). -* Decrease max_insert_delayed_streams_for_parallel_write (to 100). [#77919](https://github.com/ClickHouse/ClickHouse/pull/77919) ([Azat Khuzhin](https://github.com/azat)). -* Add ability to configure number of columns that merges can flush in parallel using `max_merge_delayed_streams_for_parallel_write` (this should reduce memory usage for vertical merges to S3 about 25x times). [#77922](https://github.com/ClickHouse/ClickHouse/pull/77922) ([Azat Khuzhin](https://github.com/azat)). -* Fix year parsing in joda syntax like 'yyy'. [#77973](https://github.com/ClickHouse/ClickHouse/pull/77973) ([李扬](https://github.com/taiyang-li)). -* Attaching parts of MergeTree tables will be performed in their block order, which is important for special merging algorithms, such as ReplacingMergeTree. This closes [#71009](https://github.com/ClickHouse/ClickHouse/issues/71009). [#77976](https://github.com/ClickHouse/ClickHouse/pull/77976) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Query masking rules are now able to throw a LOGICAL_ERROR in case if the match happened. This will help to check if pre-defined password is leaking anywhere in logs. [#78094](https://github.com/ClickHouse/ClickHouse/pull/78094) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Added column `index_length_column` to `information_schema.tables` for better compatibility with MySQL. [#78119](https://github.com/ClickHouse/ClickHouse/pull/78119) ([Paweł Zakrzewski](https://github.com/KrzaQ)). -* Introduce two new metrics: `TotalMergeFailures` and `NonAbortedMergeFailures`. These metrics are needed to detect the cases where too many merges fail within a short period. [#78150](https://github.com/ClickHouse/ClickHouse/pull/78150) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). -* Fix incorrect S3 uri parsing when key is not specified on path style. [#78185](https://github.com/ClickHouse/ClickHouse/pull/78185) ([Arthur Passos](https://github.com/arthurpassos)). -* Fix incorrect values of `BlockActiveTime`, `BlockDiscardTime`, `BlockWriteTime`, `BlockQueueTime`, and `BlockReadTime` asynchronous metrics (before the change 1 second was incorrectly reported as 0.001). [#78211](https://github.com/ClickHouse/ClickHouse/pull/78211) ([filimonov](https://github.com/filimonov)). -* Respect `loading_retries` limit for errors during push to materialized view for StorageS3(Azure)Queue. Before that such errors were retried indefinitely. [#78313](https://github.com/ClickHouse/ClickHouse/pull/78313) ([Kseniia Sumarokova](https://github.com/kssenii)). -* In StorageDeltaLake with delta-kernel-rs implementation, fix performance and progress bar. [#78368](https://github.com/ClickHouse/ClickHouse/pull/78368) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Vector similarity index could over-allocate main memory by up to 2x. This fix reworks the memory allocation strategy, reducing the memory consumption and improving the effectiveness of the vector similarity index cache. (issue [#78056](https://github.com/ClickHouse/ClickHouse/issues/78056)). [#78394](https://github.com/ClickHouse/ClickHouse/pull/78394) ([Shankar Iyer](https://github.com/shankar-iyer)). -* Introduce a setting `schema_type` for `system.metric_log` table with schema type. There are three allowed schemas: `wide` -- current schema, each metric/event in a separate column (most effective for reads of separate columns), `transposed` -- similar to `system.asynchronous_metric_log`, metrics/events are stored as rows, and the most interesting `transposed_with_wide_view` -- create underlying table with `transposed` schema, but also introduce a view with `wide` schema which translates queries to underlying table. In `transposed_with_wide_view` subsecond resolution for view is not supported, `event_time_microseconds` is just an alias for backward compatibility. [#78412](https://github.com/ClickHouse/ClickHouse/pull/78412) ([alesapin](https://github.com/alesapin)). -* Support `include`, `from_env`, `from_zk` for runtime disks. Closes [#78177](https://github.com/ClickHouse/ClickHouse/issues/78177). [#78470](https://github.com/ClickHouse/ClickHouse/pull/78470) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Add several convenient ways to resolve root metadata.json file in an iceberg table function and engine. Closes [#78455](https://github.com/ClickHouse/ClickHouse/issues/78455). [#78475](https://github.com/ClickHouse/ClickHouse/pull/78475) ([Daniil Ivanik](https://github.com/divanik)). -* Support partition pruning in delta lake. [#78486](https://github.com/ClickHouse/ClickHouse/pull/78486) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Support password based auth in SSH protocol in ClickHouse. [#78586](https://github.com/ClickHouse/ClickHouse/pull/78586) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Add a dynamic warning to the `system.warnings` table for long running mutations. [#78658](https://github.com/ClickHouse/ClickHouse/pull/78658) ([Bharat Nallan](https://github.com/bharatnc)). -* Drop connections if the CPU is massively overloaded. The decision is made based on the ratio of wait time (`OSCPUWaitMicroseconds`) to busy time (`OSCPUVirtualTimeMicroseconds`). The query is dropped with some probability, when this ratio is between `min_os_cpu_wait_time_ratio_to_drop_connection` and `max_os_cpu_wait_time_ratio_to_drop_connection`. [#78778](https://github.com/ClickHouse/ClickHouse/pull/78778) ([Alexey Katsman](https://github.com/alexkats)). -* Allow empty value on hive partitioning. [#78816](https://github.com/ClickHouse/ClickHouse/pull/78816) ([Arthur Passos](https://github.com/arthurpassos)). -* Fix `IN` clause type coercion for `BFloat16` (i.e. `SELECT toBFloat16(1) IN [1, 2, 3];` now returns `1`). Closes [#78754](https://github.com/ClickHouse/ClickHouse/issues/78754). [#78839](https://github.com/ClickHouse/ClickHouse/pull/78839) ([Raufs Dunamalijevs](https://github.com/rienath)). -* Do not check parts on other disks for MergeTree if disk= is set. [#78855](https://github.com/ClickHouse/ClickHouse/pull/78855) ([Azat Khuzhin](https://github.com/azat)). -* Make data types in `used_data_type_families` in `system.query_log` canonical. [#78972](https://github.com/ClickHouse/ClickHouse/pull/78972) ([Kseniia Sumarokova](https://github.com/kssenii)). - -## Bug Fix (user-visible misbehavior in an official stable release) {#bug-fix} - -* Fix cannot create SEQUENTIAL node with keeper-client. [#64177](https://github.com/ClickHouse/ClickHouse/pull/64177) ([Duc Canh Le](https://github.com/canhld94)). -* Fix identifier resolution from parent scopes. Allow the use of aliases to expressions in the WITH clause. Fixes [#58994](https://github.com/ClickHouse/ClickHouse/issues/58994). Fixes [#62946](https://github.com/ClickHouse/ClickHouse/issues/62946). Fixes [#63239](https://github.com/ClickHouse/ClickHouse/issues/63239). Fixes [#65233](https://github.com/ClickHouse/ClickHouse/issues/65233). Fixes [#71659](https://github.com/ClickHouse/ClickHouse/issues/71659). Fixes [#71828](https://github.com/ClickHouse/ClickHouse/issues/71828). Fixes [#68749](https://github.com/ClickHouse/ClickHouse/issues/68749). [#66143](https://github.com/ClickHouse/ClickHouse/pull/66143) ([Dmitry Novik](https://github.com/novikd)). -* Fix incorrect character counting in PositionImpl::vectorVector. [#71003](https://github.com/ClickHouse/ClickHouse/pull/71003) ([思维](https://github.com/heymind)). -* Fix negate function monotonicity. In previous versions, the query `select * from a where -x = -42;` where `x` is the primary key, can return a wrong result. [#71440](https://github.com/ClickHouse/ClickHouse/pull/71440) ([Michael Kolupaev](https://github.com/al13n321)). -* `RESTORE` operations for access entities required more permission than necessary because of unhandled partial revokes. This PR fixes the issue. Closes [#71853](https://github.com/ClickHouse/ClickHouse/issues/71853). [#71958](https://github.com/ClickHouse/ClickHouse/pull/71958) ([pufit](https://github.com/pufit)). -* Avoid pause after `ALTER TABLE REPLACE/MOVE PARTITION FROM/TO TABLE`. Retrieve correct settings for background task scheduling. [#72024](https://github.com/ClickHouse/ClickHouse/pull/72024) ([Aleksei Filatov](https://github.com/aalexfvk)). -* Fix empty tuple handling in arrayIntersect. This fixes [#72578](https://github.com/ClickHouse/ClickHouse/issues/72578). [#72581](https://github.com/ClickHouse/ClickHouse/pull/72581) ([Amos Bird](https://github.com/amosbird)). -* Fix handling of empty tuples in some input and output formats (e.g. Parquet, Arrow). [#72616](https://github.com/ClickHouse/ClickHouse/pull/72616) ([Michael Kolupaev](https://github.com/al13n321)). -* Column-level GRANT SELECT/INSERT statements on wildcard databases/tables now throw an error. [#72646](https://github.com/ClickHouse/ClickHouse/pull/72646) ([Johann Gan](https://github.com/johanngan)). -* Fix the situation when a user can't run `REVOKE ALL ON *.*` because of implicit grants in the target access entity. [#72872](https://github.com/ClickHouse/ClickHouse/pull/72872) ([pufit](https://github.com/pufit)). -* Fix stuck while processing pending batch for async distributed INSERT (due to i.e. `No such file or directory`). [#72939](https://github.com/ClickHouse/ClickHouse/pull/72939) ([Azat Khuzhin](https://github.com/azat)). -* Add support for Azure SAS Tokens. [#72959](https://github.com/ClickHouse/ClickHouse/pull/72959) ([Azat Khuzhin](https://github.com/azat)). -* Fix positive timezone formatting of formatDateTime scalar function. [#73091](https://github.com/ClickHouse/ClickHouse/pull/73091) ([ollidraese](https://github.com/ollidraese)). -* Fix to correctly reflect source port when connection made through PROXYv1 and `auth_use_forwarded_address` is set - previously proxy port was incorrectly used. Add `currentQueryID()` function. [#73095](https://github.com/ClickHouse/ClickHouse/pull/73095) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Propagate format settings to NativeWriter in TCPHandler, so settings like `output_format_native_write_json_as_string` are applied correctly. [#73179](https://github.com/ClickHouse/ClickHouse/pull/73179) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix reading JSON sub-object subcolumns with incorrect prefix. [#73182](https://github.com/ClickHouse/ClickHouse/pull/73182) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix crash in StorageObjectStorageQueue. [#73274](https://github.com/ClickHouse/ClickHouse/pull/73274) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix rare crash in refreshable materialized view during server shutdown. [#73323](https://github.com/ClickHouse/ClickHouse/pull/73323) ([Michael Kolupaev](https://github.com/al13n321)). -* The `%f` placeholder of function `formatDateTime` now unconditionally generates six (sub-second) digits. This makes the behavior compatible with MySQL `DATE_FORMAT` function. The previous behavior can be restored using setting `formatdatetime_f_prints_scale_number_of_digits = 1`. [#73324](https://github.com/ClickHouse/ClickHouse/pull/73324) ([ollidraese](https://github.com/ollidraese)). -* Improved datetime conversion during index analysis by enforcing saturating behavior for implicit Date to DateTime conversions. This resolves potential index analysis inaccuracies caused by datetime range limitations. This fixes [#73307](https://github.com/ClickHouse/ClickHouse/issues/73307). It also fixes explicit `toDateTime` conversion when `date_time_overflow_behavior = 'ignore'` which is the default value. [#73326](https://github.com/ClickHouse/ClickHouse/pull/73326) ([Amos Bird](https://github.com/amosbird)). -* Fixed filtering by `_etag` column while reading from `s3` storage and table function. [#73353](https://github.com/ClickHouse/ClickHouse/pull/73353) ([Anton Popov](https://github.com/CurtizJ)). -* Fix `Not-ready Set is passed as the second argument for function 'in'` error when `IN (subquery)` is used in `JOIN ON` expression, with the old analyzer. [#73382](https://github.com/ClickHouse/ClickHouse/pull/73382) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix preparing for squashin for Dynamic and JSON columns. Previously in some cases new types could be inserted into shared variant/shared data even when the limit on types/paths is not reached. [#73388](https://github.com/ClickHouse/ClickHouse/pull/73388) ([Pavel Kruglov](https://github.com/Avogar)). -* Check for corrupted sizes during types binary decoding to avoid too big allocations. [#73390](https://github.com/ClickHouse/ClickHouse/pull/73390) ([Pavel Kruglov](https://github.com/Avogar)). -* Fixed a logical error when reading from single-replica cluster with parallel replicas enabled. [#73403](https://github.com/ClickHouse/ClickHouse/pull/73403) ([Michael Kolupaev](https://github.com/al13n321)). -* Fix ObjectStorageQueue with ZooKeeper and older Keeper. [#73420](https://github.com/ClickHouse/ClickHouse/pull/73420) ([Antonio Andelic](https://github.com/antonio2368)). -* Implements fix, needed to enable hive partitioning by default. [#73479](https://github.com/ClickHouse/ClickHouse/pull/73479) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix data race when creating vector similarity index. [#73517](https://github.com/ClickHouse/ClickHouse/pull/73517) ([Antonio Andelic](https://github.com/antonio2368)). -* Fixes segfault when the source of the dictionary contains a function with wrong data. [#73535](https://github.com/ClickHouse/ClickHouse/pull/73535) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix retries on failed insert in storage S3(Azure)Queue. Closes [#70951](https://github.com/ClickHouse/ClickHouse/issues/70951). [#73546](https://github.com/ClickHouse/ClickHouse/pull/73546) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fixed error in function `tupleElement` which may appear in some cases for tuples with `LowCardinality` elelments and enabled setting `optimize_functions_to_subcolumns`. [#73548](https://github.com/ClickHouse/ClickHouse/pull/73548) ([Anton Popov](https://github.com/CurtizJ)). -* Fix parsing enum glob followed by range one. Fixes [#73473](https://github.com/ClickHouse/ClickHouse/issues/73473). [#73569](https://github.com/ClickHouse/ClickHouse/pull/73569) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Fixed parallel_replicas_for_non_replicated_merge_tree being ignored in subqueries for non-replicated tables. [#73584](https://github.com/ClickHouse/ClickHouse/pull/73584) ([Igor Nikonov](https://github.com/devcrafter)). -* Fix for `std::logical_error` thrown when a task cannot be scheduled. Found in stress tests. Example stacktrace: `2024.12.19 02:05:46.171833 [ 18190 ] {01f0daba-d3cc-4898-9e0e-c2c263306427} : Logical error: 'std::exception. Code: 1001, type: std::__1::future_error, e.what() = The associated promise has been destructed prior to the associated state becoming ready. (version 25.1.1.18724), Stack trace:.` [#73629](https://github.com/ClickHouse/ClickHouse/pull/73629) ([Alexander Gololobov](https://github.com/davenger)). -* Do not interpret queries in `EXPLAIN SYNTAX` to avoid logical errors with incorrect processing stage for distributed queries. Fixes [#65205](https://github.com/ClickHouse/ClickHouse/issues/65205). [#73634](https://github.com/ClickHouse/ClickHouse/pull/73634) ([Dmitry Novik](https://github.com/novikd)). -* Fix possible data inconsistency in Dynamic column. Fixes possible logical error `Nested columns sizes are inconsistent with local_discriminators column size`. [#73644](https://github.com/ClickHouse/ClickHouse/pull/73644) ([Pavel Kruglov](https://github.com/Avogar)). -* Fixed `NOT_FOUND_COLUMN_IN_BLOCK` in queries with `FINAL` and `SAMPLE`. Fixed incorrect result in selects with `FINAL` from `CollapsingMergeTree` and enabled optimizations of `FINAL` . [#73682](https://github.com/ClickHouse/ClickHouse/pull/73682) ([Anton Popov](https://github.com/CurtizJ)). -* Fix crash in LIMIT BY COLUMNS. [#73686](https://github.com/ClickHouse/ClickHouse/pull/73686) ([Raúl Marín](https://github.com/Algunenano)). -* Fix the bug when the normal projection is forced to use, and query is exactly the same as the projection defined, but the projection is not selected and thus error is prompted. [#73700](https://github.com/ClickHouse/ClickHouse/pull/73700) ([Shichao Jin](https://github.com/jsc0218)). -* Fix deserialization of Dynamic/Object structure. It could lead to CANNOT_READ_ALL_DATA exceptions. [#73767](https://github.com/ClickHouse/ClickHouse/pull/73767) ([Pavel Kruglov](https://github.com/Avogar)). -* Skip `metadata_version.txt` in while restoring parts from a backup. [#73768](https://github.com/ClickHouse/ClickHouse/pull/73768) ([Vitaly Baranov](https://github.com/vitlibar)). -* Fix [#73737](https://github.com/ClickHouse/ClickHouse/issues/73737). [#73775](https://github.com/ClickHouse/ClickHouse/pull/73775) ([zhanglistar](https://github.com/zhanglistar)). -* Fixes [#72078](https://github.com/ClickHouse/ClickHouse/issues/72078) ( S3 Express Support was broken ). [#73777](https://github.com/ClickHouse/ClickHouse/pull/73777) ([Sameer Tamsekar](https://github.com/stamsekar)). -* Allow merging of rows with invalid sign column values in CollapsingMergeTree tables. [#73864](https://github.com/ClickHouse/ClickHouse/pull/73864) ([Christoph Wurm](https://github.com/cwurm)). -* Fix the following error ``` Row 1: ────── hostname: c-test-wy-37-server-nlkyjyb-0.c-test-wy-37-server-headless.ns-test-wy-37.svc.cluster.local type: ExceptionWhileProcessing event_date: 2024-12-23 event_time: 2024-12-23 16:21:19 event_time_microseconds: 2024-12-23 16:21:19.824624 query_start_time: 2024-12-23 16:21:19 query_start_time_microseconds: 2024-12-23 16:21:19.747142 query_duration_ms: 77 read_rows: 1 read_bytes: 134 written_rows: 0 written_bytes: 0 result_rows: 0 result_bytes: 0 memory_usage: 7824 current_database: default query: CREATE DATABASE db0 formatted_query: normalized_query_hash: 7820917191074023511 -- 7.82 quintillion query_kind: Create databases: ['db0'] tables: [] columns: [] partitions: [] projections: [] views: [] exception_code: 170 exception: Code: 170. DB::Exception: Bad get: has Null, requested Int64: While executing DDLOnClusterQueryStatus. (BAD_GET) (version 25.1.1.19134 (official build)) stack_trace: 0. ./build_docker/./src/Common/Exception.cpp:107: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000000da5e53b 1. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088aca4c 2. DB::Exception::Exception>, std::basic_string_view>>(int, FormatStringHelperImpl>>::type, std::type_identity>>::type>, std::basic_string_view>&&, std::basic_string_view>&&) @ 0x00000000088bae8b 3. auto& DB::Field::safeGet() & @ 0x0000000008a3c748 4. ./src/Core/Field.h:484: DB::ColumnVector::insert(DB::Field const&) @ 0x0000000012e44c0f 5. ./build_docker/./src/Interpreters/DDLOnClusterQueryStatusSource.cpp:53: DB::DDLOnClusterQueryStatusSource::generateChunkWithUnfinishedHosts() const @ 0x0000000012a40214 6. ./build_docker/./src/Interpreters/DDLOnClusterQueryStatusSource.cpp:104: DB::DDLOnClusterQueryStatusSource::handleTimeoutExceeded() @ 0x0000000012a41640 7. ./build_docker/./src/Interpreters/DDLOnClusterQueryStatusSource.cpp:109: DB::DDLOnClusterQueryStatusSource::stopWaitingOfflineHosts() @ 0x0000000012a41be9 8. ./build_docker/./src/Interpreters/DistributedQueryStatusSource.cpp:182: DB::DistributedQueryStatusSource::generate() @ 0x0000000011feb3bf 9. ./build_docker/./src/Processors/ISource.cpp:139: DB::ISource::tryGenerate() @ 0x0000000014148f5b 10. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x0000000014148c47 11. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::ExecutionThreadContext::executeTask() @ 0x0000000014164fc7 12. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000141577e5 ```. [#73876](https://github.com/ClickHouse/ClickHouse/pull/73876) ([Tuan Pham Anh](https://github.com/tuanpach)). -* Fixes occasional failure to compare `map()` types due to possibility to create `Map` lacking explicit naming ('keys','values') of its nested tuple. [#73878](https://github.com/ClickHouse/ClickHouse/pull/73878) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Ignore window functions during GROUP BY ALL clause resolution. Fix [#73501](https://github.com/ClickHouse/ClickHouse/issues/73501). [#73916](https://github.com/ClickHouse/ClickHouse/pull/73916) ([Dmitry Novik](https://github.com/novikd)). -* Propogate Native format settings properly for client-server communication. [#73924](https://github.com/ClickHouse/ClickHouse/pull/73924) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix implicit privileges (worked as wildcard before). [#73932](https://github.com/ClickHouse/ClickHouse/pull/73932) ([Azat Khuzhin](https://github.com/azat)). -* Fix high memory usage during nested Maps creation. [#73982](https://github.com/ClickHouse/ClickHouse/pull/73982) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix parsing nested JSON with empty keys. [#73993](https://github.com/ClickHouse/ClickHouse/pull/73993) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix: alias can be not added to the projection if it is referenced by another alias and selected in inverse order. [#74033](https://github.com/ClickHouse/ClickHouse/pull/74033) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* A disk using the plain_rewritable metadata can be shared among multiple server instances. It is expected for one instance to read a metadata object while another modifies it. Object not found errors are ignored during plain_rewritable initialization with Azure storage, similar to the behavior implemented for S3. [#74059](https://github.com/ClickHouse/ClickHouse/pull/74059) ([Julia Kartseva](https://github.com/jkartseva)). -* Fix behaviour of `any` and `anyLast` with enum types and empty table. [#74061](https://github.com/ClickHouse/ClickHouse/pull/74061) ([Joanna Hulboj](https://github.com/jh0x)). -* Fixes case when the user specifies keyword arguments in the kafka table engine. [#74064](https://github.com/ClickHouse/ClickHouse/pull/74064) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix altering Storage `S3Queue` settings with "s3queue_" prefix to without and vice versa. [#74075](https://github.com/ClickHouse/ClickHouse/pull/74075) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Add a setting `allow_push_predicate_ast_for_distributed_subqueries`. This adds AST-based predicate push-down for distributed queries with the analyzer. This is a temporary solution that we use until distributed queries with query plan serialization are supported. Closes [#66878](https://github.com/ClickHouse/ClickHouse/issues/66878) [#69472](https://github.com/ClickHouse/ClickHouse/issues/69472) [#65638](https://github.com/ClickHouse/ClickHouse/issues/65638) [#68030](https://github.com/ClickHouse/ClickHouse/issues/68030) [#73718](https://github.com/ClickHouse/ClickHouse/issues/73718). [#74085](https://github.com/ClickHouse/ClickHouse/pull/74085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fixes issue when after [#73095](https://github.com/ClickHouse/ClickHouse/issues/73095) port can be present in the forwarded_for field, which leads to inability to resolve host name with port included. [#74116](https://github.com/ClickHouse/ClickHouse/pull/74116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Fixed incorrect formatting of `ALTER TABLE (DROP STATISTICS ...) (DROP STATISTICS ...)`. [#74126](https://github.com/ClickHouse/ClickHouse/pull/74126) ([Han Fei](https://github.com/hanfei1991)). -* Fix for issue [#66112](https://github.com/ClickHouse/ClickHouse/issues/66112). [#74128](https://github.com/ClickHouse/ClickHouse/pull/74128) ([Anton Ivashkin](https://github.com/ianton-ru)). -* It is no longer possible to use `Loop` as a table engine in `CREATE TABLE`. This combination was previously causing segfaults. [#74137](https://github.com/ClickHouse/ClickHouse/pull/74137) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix security issue to prevent SQL injection in postgresql and sqlite table functions. [#74144](https://github.com/ClickHouse/ClickHouse/pull/74144) ([Pablo Marcos](https://github.com/pamarcos)). -* Fix crash when reading a subcolumn from the compressed Memory engine table. Fixes [#74009](https://github.com/ClickHouse/ClickHouse/issues/74009). [#74161](https://github.com/ClickHouse/ClickHouse/pull/74161) ([Nikita Taranov](https://github.com/nickitat)). -* Fixed an infinite loop occurring with queries to the system.detached_tables. [#74190](https://github.com/ClickHouse/ClickHouse/pull/74190) ([Konstantin Morozov](https://github.com/k-morozov)). -* Fix logical error in s3queue during setting file as failed. [#74216](https://github.com/ClickHouse/ClickHouse/pull/74216) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Check for not supported types for some storages. [#74218](https://github.com/ClickHouse/ClickHouse/pull/74218) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix crash with query `INSERT INTO SELECT` over PostgreSQL interface on macOS (issue [#72938](https://github.com/ClickHouse/ClickHouse/issues/72938)). [#74231](https://github.com/ClickHouse/ClickHouse/pull/74231) ([Artem Yurov](https://github.com/ArtemYurov)). -* Fix native copy settings (`allow_s3_native_copy`/`allow_azure_native_copy`) for `RESTORE` from base backup. [#74286](https://github.com/ClickHouse/ClickHouse/pull/74286) ([Azat Khuzhin](https://github.com/azat)). -* Fixed the issue when the number of detached tables in the database is a multiple of max_block_size. [#74289](https://github.com/ClickHouse/ClickHouse/pull/74289) ([Konstantin Morozov](https://github.com/k-morozov)). -* Fix copying via ObjectStorage (i.e. S3) when source and destination credentials differs. [#74331](https://github.com/ClickHouse/ClickHouse/pull/74331) ([Azat Khuzhin](https://github.com/azat)). -* Fixed uninitialized max_log_ptr in the replicated database. [#74336](https://github.com/ClickHouse/ClickHouse/pull/74336) ([Konstantin Morozov](https://github.com/k-morozov)). -* Fix detection of "use the Rewrite method in the JSON API" for native copy on GCS. [#74338](https://github.com/ClickHouse/ClickHouse/pull/74338) ([Azat Khuzhin](https://github.com/azat)). -* Fix crash when inserting interval (issue [#74299](https://github.com/ClickHouse/ClickHouse/issues/74299)). [#74478](https://github.com/ClickHouse/ClickHouse/pull/74478) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)). -* Fix incorrect projection analysis when `count(nullable)` is used in aggregate projections. This fixes [#74495](https://github.com/ClickHouse/ClickHouse/issues/74495) . This PR also adds some logs around projection analysis to clarify why a projection is used or why not. [#74498](https://github.com/ClickHouse/ClickHouse/pull/74498) ([Amos Bird](https://github.com/amosbird)). -* Fix incorrect calculation of `BackgroundMergesAndMutationsPoolSize` (it was x2 from real value). [#74509](https://github.com/ClickHouse/ClickHouse/pull/74509) ([alesapin](https://github.com/alesapin)). -* Fix the bug of leaking keeper watches when enable Cluster Discovery. [#74521](https://github.com/ClickHouse/ClickHouse/pull/74521) ([RinChanNOW](https://github.com/RinChanNOWWW)). -* Fix formatting constant JSON literals. Previously it could lead to syntax errors during sending the query to another server. [#74533](https://github.com/ClickHouse/ClickHouse/pull/74533) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix mem alignment issue reported by UBSan [#74512](https://github.com/ClickHouse/ClickHouse/issues/74512). [#74534](https://github.com/ClickHouse/ClickHouse/pull/74534) ([Arthur Passos](https://github.com/arthurpassos)). -* Fix KeeperMap concurrent cleanup during table creation. [#74568](https://github.com/ClickHouse/ClickHouse/pull/74568) ([Antonio Andelic](https://github.com/antonio2368)). -* Do not remove unused projection columns in subqueries in the presence of `EXCEPT` or `INTERSECT` to preserve the correct query result. Fixes [#73930](https://github.com/ClickHouse/ClickHouse/issues/73930). Fixes [#66465](https://github.com/ClickHouse/ClickHouse/issues/66465). [#74577](https://github.com/ClickHouse/ClickHouse/pull/74577) ([Dmitry Novik](https://github.com/novikd)). -* Fix broken create query when using constant partition expressions with implicit projections enabled. This fixes [#74596](https://github.com/ClickHouse/ClickHouse/issues/74596) . [#74634](https://github.com/ClickHouse/ClickHouse/pull/74634) ([Amos Bird](https://github.com/amosbird)). -* Fixed `INSERT SELECT` queries between tables with `Tuple` columns and enabled sparse serialization. [#74698](https://github.com/ClickHouse/ClickHouse/pull/74698) ([Anton Popov](https://github.com/CurtizJ)). -* Function `right` works incorrectly for const negative offset. [#74701](https://github.com/ClickHouse/ClickHouse/pull/74701) ([Daniil Ivanik](https://github.com/divanik)). -* Fix insertion of gzip-ed data sometimes fails due to flawed decompression on client side. [#74707](https://github.com/ClickHouse/ClickHouse/pull/74707) ([siyuan](https://github.com/linkwk7)). -* Avoid leaving connection in broken state after INSERT finishes with exception. [#74740](https://github.com/ClickHouse/ClickHouse/pull/74740) ([Azat Khuzhin](https://github.com/azat)). -* Avoid reusing connections that had been left in the intermediate state. [#74749](https://github.com/ClickHouse/ClickHouse/pull/74749) ([Azat Khuzhin](https://github.com/azat)). -* Partial revokes with wildcard grants could remove more privileges than expected. Closes [#74263](https://github.com/ClickHouse/ClickHouse/issues/74263). [#74751](https://github.com/ClickHouse/ClickHouse/pull/74751) ([pufit](https://github.com/pufit)). -* Fix crash during JSON type declaration parsing when type name is not uppercase. [#74784](https://github.com/ClickHouse/ClickHouse/pull/74784) ([Pavel Kruglov](https://github.com/Avogar)). -* Keeper fix: fix reading log entries from disk. [#74785](https://github.com/ClickHouse/ClickHouse/pull/74785) ([Antonio Andelic](https://github.com/antonio2368)). -* Fixed checking grants for SYSTEM REFRESH/START/STOP VIEW, now it's not required to have this grant on `*.*` to execute a query for a specific view, only grant for this view are required. [#74789](https://github.com/ClickHouse/ClickHouse/pull/74789) ([Alexander Tokmakov](https://github.com/tavplubix)). -* The `hasColumnInTable` function doesn't account for alias columns. Fix it to also work for alias columns. [#74841](https://github.com/ClickHouse/ClickHouse/pull/74841) ([Bharat Nallan](https://github.com/bharatnc)). -* Keeper: fix logical_error when the connection had been terminated before establishing. [#74844](https://github.com/ClickHouse/ClickHouse/pull/74844) ([Michael Kolupaev](https://github.com/al13n321)). -* Fix a behavior when the server couldn't startup when there's a table using `AzureBlobStorage`. Tables are loaded without any requests to Azure. [#74880](https://github.com/ClickHouse/ClickHouse/pull/74880) ([Alexey Katsman](https://github.com/alexkats)). -* Fix missing `used_privileges` and `missing_privileges` fields in `query_log` for BACKUP and RESTORE operations. [#74887](https://github.com/ClickHouse/ClickHouse/pull/74887) ([Alexey Katsman](https://github.com/alexkats)). -* Fix FILE_DOESNT_EXIST error occurring during data parts merge for a table with an empty column in Azure Blob Storage. [#74892](https://github.com/ClickHouse/ClickHouse/pull/74892) ([Julia Kartseva](https://github.com/jkartseva)). -* Fix projection column name when joining temporary tables, close [#68872](https://github.com/ClickHouse/ClickHouse/issues/68872). [#74897](https://github.com/ClickHouse/ClickHouse/pull/74897) ([Vladimir Cherkasov](https://github.com/vdimir)). -* HDFS refresh krb ticket if sasl error during hdfs select request. [#74930](https://github.com/ClickHouse/ClickHouse/pull/74930) ([inv2004](https://github.com/inv2004)). -* Fix queries to Replicated database in startup_scripts. [#74942](https://github.com/ClickHouse/ClickHouse/pull/74942) ([Azat Khuzhin](https://github.com/azat)). -* Fix issues with expressions type aliased in the JOIN ON clause when a null-safe comparison is used. [#74970](https://github.com/ClickHouse/ClickHouse/pull/74970) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Revert part's state from deleting back to outdated when remove operation has failed. [#74985](https://github.com/ClickHouse/ClickHouse/pull/74985) ([Sema Checherinda](https://github.com/CheSema)). -* In previous versions, when there was a scalar subquery, we started writing the progress (accumulated from processing the subquery) during the initialization of the data format, which was before HTTP headers were written. This led to the loss of HTTP headers, such as X-ClickHouse-QueryId and X-ClickHouse-Format, as well as Content-Type. [#74991](https://github.com/ClickHouse/ClickHouse/pull/74991) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix `CREATE TABLE AS...` queries for `database_replicated_allow_replicated_engine_arguments=0`. [#75000](https://github.com/ClickHouse/ClickHouse/pull/75000) ([Bharat Nallan](https://github.com/bharatnc)). -* Fix leaving connection in a bad state in client after INSERT exceptions. [#75030](https://github.com/ClickHouse/ClickHouse/pull/75030) ([Azat Khuzhin](https://github.com/azat)). -* Fix crash due to uncaught exception in PSQL replication. [#75062](https://github.com/ClickHouse/ClickHouse/pull/75062) ([Azat Khuzhin](https://github.com/azat)). -* Sasl can fail any rpc call, the fix helps to repeat the call in case if krb5 ticker is expired. [#75063](https://github.com/ClickHouse/ClickHouse/pull/75063) ([inv2004](https://github.com/inv2004)). -* Fixed usage of indexes (primary and secondary) for `Array`, `Map` and `Nullable(..)` columns with enabled setting `optimize_function_to_subcolumns`. Previously, indexes for these columns could have been ignored. [#75081](https://github.com/ClickHouse/ClickHouse/pull/75081) ([Anton Popov](https://github.com/CurtizJ)). -* Disable `flatten_nested` when creating materialized views with inner tables since it will not be possible to use such flattened columns. [#75085](https://github.com/ClickHouse/ClickHouse/pull/75085) ([Christoph Wurm](https://github.com/cwurm)). -* Fix for some of IPv6 addresses (such as ::ffff:1.1.1.1) in forwarded_for field is wrongly interpreted resulting in client disconnect with exception. [#75133](https://github.com/ClickHouse/ClickHouse/pull/75133) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Fix nullsafe JOIN handling for LowCardinality nullable data type. Previously JOIN ON with nullsafe comparison, such as `IS NOT DISTINCT FROM`, `<=>` , `a IS NULL AND b IS NULL OR a == b` didn't work correctly with LowCardinality columns. [#75143](https://github.com/ClickHouse/ClickHouse/pull/75143) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Fix queries with unused interpolation with the new analyzer. [#75173](https://github.com/ClickHouse/ClickHouse/pull/75173) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Fix the crash bug of CTE with Insert. [#75188](https://github.com/ClickHouse/ClickHouse/pull/75188) ([Shichao Jin](https://github.com/jsc0218)). -* Keeper fix: avoid writing to broken changelogs when rolling back logs. [#75197](https://github.com/ClickHouse/ClickHouse/pull/75197) ([Antonio Andelic](https://github.com/antonio2368)). -* Use `BFloat16` as a supertype where appropriate. This closes: [#74404](https://github.com/ClickHouse/ClickHouse/issues/74404). [#75236](https://github.com/ClickHouse/ClickHouse/pull/75236) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Fix unexpected defaults in join result with any_join_distinct_right_table_keys and OR in JOIN ON. [#75262](https://github.com/ClickHouse/ClickHouse/pull/75262) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Mask azureblobstorage table engine credentials. [#75319](https://github.com/ClickHouse/ClickHouse/pull/75319) ([Garrett Thomas](https://github.com/garrettthomaskth)). -* Fixed behavior when ClickHouse may erroneously do a filter pushdown to an external database like PostgreSQL, MySQL, or SQLite. This closes: [#71423](https://github.com/ClickHouse/ClickHouse/issues/71423). [#75320](https://github.com/ClickHouse/ClickHouse/pull/75320) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Fix crash in protobuf schema cache that can happen during output in Protobuf format and parallel query `SYSTEM DROP FORMAT SCHEMA CACHE`. [#75357](https://github.com/ClickHouse/ClickHouse/pull/75357) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix a possible logical error or uninitialized memory issue when a filter from `HAVING` is pushed down with parallel replicas. [#75363](https://github.com/ClickHouse/ClickHouse/pull/75363) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Hide sensitive info for `icebergS3`, `icebergAzure` table functions and table engines. [#75378](https://github.com/ClickHouse/ClickHouse/pull/75378) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Function `TRIM` with computed empty trim characters are now correctly handled. Example: `SELECT TRIM(LEADING concat('') FROM 'foo')` (Issue [#69922](https://github.com/ClickHouse/ClickHouse/issues/69922)). [#75399](https://github.com/ClickHouse/ClickHouse/pull/75399) ([Manish Gill](https://github.com/mgill25)). -* Fix data race in IOutputFormat. [#75448](https://github.com/ClickHouse/ClickHouse/pull/75448) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix possible error `Elements ... and ... of Nested data structure ... (Array columns) have different array sizes` when JSON subcolumns with Array type are used in JOIN over distributed tables. [#75512](https://github.com/ClickHouse/ClickHouse/pull/75512) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix invalid result buffer size calculation. Closes [#70031](https://github.com/ClickHouse/ClickHouse/issues/70031). [#75548](https://github.com/ClickHouse/ClickHouse/pull/75548) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Fix interaction between allow_feature_tier and compatibility mergetree setting. [#75635](https://github.com/ClickHouse/ClickHouse/pull/75635) ([Raúl Marín](https://github.com/Algunenano)). -* Fix incorrect processed_rows value in system.s3queue_log in case file was retried. [#75666](https://github.com/ClickHouse/ClickHouse/pull/75666) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Respect `materialized_views_ignore_errors` when a materialized view writes to a URL engine and there is a connectivity issue. [#75679](https://github.com/ClickHouse/ClickHouse/pull/75679) ([Christoph Wurm](https://github.com/cwurm)). -* Fixed rare crashes while reading from `MergeTree` table after multiple asynchronous `RENAME` queries (with `alter_sync = 0`) between columns with different types. [#75693](https://github.com/ClickHouse/ClickHouse/pull/75693) ([Anton Popov](https://github.com/CurtizJ)). -* Fix `Block structure mismatch in QueryPipeline stream` error for some queries with `UNION ALL`. [#75715](https://github.com/ClickHouse/ClickHouse/pull/75715) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Rebuild projection on alter modify of its PK column. Previously it could lead to `CANNOT_READ_ALL_DATA` errors during selects after alter modify of the column used in projection PK. [#75720](https://github.com/ClickHouse/ClickHouse/pull/75720) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix incorrect result of `ARRAY JOIN` for scalar subqueries (with analyzer). [#75732](https://github.com/ClickHouse/ClickHouse/pull/75732) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fixed null pointer dereference in `DistinctSortedStreamTransform`. [#75734](https://github.com/ClickHouse/ClickHouse/pull/75734) ([Nikita Taranov](https://github.com/nickitat)). -* Fix `allow_suspicious_ttl_expressions` behaviour. [#75771](https://github.com/ClickHouse/ClickHouse/pull/75771) ([Aleksei Filatov](https://github.com/aalexfvk)). -* Fix uninitialized memory read in function `translate`. This closes [#75592](https://github.com/ClickHouse/ClickHouse/issues/75592). [#75794](https://github.com/ClickHouse/ClickHouse/pull/75794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Propagate format settings to JSON as string formatting in Native format. [#75832](https://github.com/ClickHouse/ClickHouse/pull/75832) ([Pavel Kruglov](https://github.com/Avogar)). -* Recorded the default enablement of parallel hash as join algorithm in v24.12 in the settings change history. This means that ClickHouse will continue to join using non-parallel hash if an older compatibility level than v24.12 is configured. [#75870](https://github.com/ClickHouse/ClickHouse/pull/75870) ([Robert Schulze](https://github.com/rschu1ze)). -* Fixed a bug that tables with implicitly added min-max indices could not be copied into a new table (issue [#75677](https://github.com/ClickHouse/ClickHouse/issues/75677)). [#75877](https://github.com/ClickHouse/ClickHouse/pull/75877) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -* `clickhouse-library-bridge` allows opening arbitrary libraries from the filesystem, which makes it safe to run only inside an isolated environment. To prevent a vulnerability when it is run near the clickhouse-server, we will limit the paths of libraries to a location, provided in the configuration. This vulnerability was found with the [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by **Arseniy Dugin**. [#75954](https://github.com/ClickHouse/ClickHouse/pull/75954) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* We happened to use JSON serialization for some metadata, which was a mistake, because JSON does not support binary data inside string literals, including zero bytes. SQL queries can contain binary data and invalid UTF-8, so we have to support this in our metadata files as well. At the same time, ClickHouse's `JSONEachRow` and similar formats work around that by deviating from the JSON standard in favor of a perfect roundtrip for the binary data. See the motivation here: https://github.com/ClickHouse/ClickHouse/pull/73668#issuecomment-2560501790. The solution is to make `Poco::JSON` library consistent with the JSON format serialization in ClickHouse. This closes [#73668](https://github.com/ClickHouse/ClickHouse/issues/73668). [#75963](https://github.com/ClickHouse/ClickHouse/pull/75963) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix `Part <...> does not contain in snapshot of previous virtual parts. (PART_IS_TEMPORARILY_LOCKED)` during DETACH PART. [#76039](https://github.com/ClickHouse/ClickHouse/pull/76039) ([Aleksei Filatov](https://github.com/aalexfvk)). -* Fix check for commit limits in storage `S3Queue`. [#76104](https://github.com/ClickHouse/ClickHouse/pull/76104) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix attaching MergeTree tables with auto indexes (`add_minmax_index_for_numeric_columns`/`add_minmax_index_for_string_columns`). [#76139](https://github.com/ClickHouse/ClickHouse/pull/76139) ([Azat Khuzhin](https://github.com/azat)). -* Fixed issue of stack traces from parent threads of a job (`enable_job_stack_trace` setting) are not printed out. Fixed issue `enable_job_stack_trace` setting is not properly propagated to the threads resulting stack trace content not always respects this setting. [#76191](https://github.com/ClickHouse/ClickHouse/pull/76191) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Fix reinterpretAs with FixedString on big-endian architecture. [#76253](https://github.com/ClickHouse/ClickHouse/pull/76253) ([Azat Khuzhin](https://github.com/azat)). -* Fix all sort of bugs due to race between UUID and table names (for instance it will fix the race between `RENAME` and `RESTART REPLICA`, in case of concurrent `RENAME` with `SYSTEM RESTART REPLICA` you may get end up restarting wrong replica, or/and leaving one of the tables in a `Table X is being restarted` state). [#76308](https://github.com/ClickHouse/ClickHouse/pull/76308) ([Azat Khuzhin](https://github.com/azat)). -* Removed allocation from the signal handler. [#76446](https://github.com/ClickHouse/ClickHouse/pull/76446) ([Nikita Taranov](https://github.com/nickitat)). -* Fix dynamic filesystem cache resize handling unexpected errors during eviction. [#76466](https://github.com/ClickHouse/ClickHouse/pull/76466) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fixed `used_flag` initialization in parallel hash. It might cause a server crash. [#76580](https://github.com/ClickHouse/ClickHouse/pull/76580) ([Nikita Taranov](https://github.com/nickitat)). -* Fix a logical error when calling `defaultProfiles()` function inside a projection. [#76627](https://github.com/ClickHouse/ClickHouse/pull/76627) ([pufit](https://github.com/pufit)). -* Do not request interactive basic auth in the browser in Web UI. Closes [#76319](https://github.com/ClickHouse/ClickHouse/issues/76319). [#76637](https://github.com/ClickHouse/ClickHouse/pull/76637) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix THERE_IS_NO_COLUMN exception when selecting boolean literal from distributed tables. [#76656](https://github.com/ClickHouse/ClickHouse/pull/76656) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* The subpath inside the table directory is chosen in a more profound way. [#76681](https://github.com/ClickHouse/ClickHouse/pull/76681) ([Daniil Ivanik](https://github.com/divanik)). -* Fix an error `Not found column in block` after altering a table with a subcolumn in PK. After https://github.com/ClickHouse/ClickHouse/pull/72644, requires https://github.com/ClickHouse/ClickHouse/pull/74403. [#76686](https://github.com/ClickHouse/ClickHouse/pull/76686) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Add performance tests for null short circuits and fix bugs. [#76708](https://github.com/ClickHouse/ClickHouse/pull/76708) ([李扬](https://github.com/taiyang-li)). -* Flush output write buffers before finalizing them. Fix `LOGICAL_ERROR` generated during the finalization of some output format, e.g. `JSONEachRowWithProgressRowOutputFormat`. [#76726](https://github.com/ClickHouse/ClickHouse/pull/76726) ([Antonio Andelic](https://github.com/antonio2368)). -* Added support for MongoDB binary UUID ([#74452](https://github.com/ClickHouse/ClickHouse/issues/74452)) - Fixed WHERE pushdown to MongoDB when using the table function ([#72210](https://github.com/ClickHouse/ClickHouse/issues/72210)) - Changed the MongoDB - ClickHouse type mapping such that MongoDB's binary UUID can only be parsed to ClickHouse's UUID. This should avoid ambiguities and surprises in future. - Fixed OID mapping, preserving backward compatibility. [#76762](https://github.com/ClickHouse/ClickHouse/pull/76762) ([Kirill Nikiforov](https://github.com/allmazz)). -* Fix exception handling in parallel prefixes deserialization of JSON subcolumns. [#76809](https://github.com/ClickHouse/ClickHouse/pull/76809) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix lgamma function behavior for negative integers. [#76840](https://github.com/ClickHouse/ClickHouse/pull/76840) ([Ilya Kataev](https://github.com/IlyaKataev)). -* Fix reverse key analysis for explicitly defined primary keys. Similar to [#76654](https://github.com/ClickHouse/ClickHouse/issues/76654). [#76846](https://github.com/ClickHouse/ClickHouse/pull/76846) ([Amos Bird](https://github.com/amosbird)). -* Fix pretty print of Bool values in JSON format. [#76905](https://github.com/ClickHouse/ClickHouse/pull/76905) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix possible crash because of bad JSON column rollback on error during async inserts. [#76908](https://github.com/ClickHouse/ClickHouse/pull/76908) ([Pavel Kruglov](https://github.com/Avogar)). -* Previously, `multi_if` may return different types of columns during planning and main execution. This resulted in code producing undefined behavior from the C++ perspective. [#76914](https://github.com/ClickHouse/ClickHouse/pull/76914) ([Nikita Taranov](https://github.com/nickitat)). -* Fixed incorrect serialization of constant nullable keys in MergeTree. This fixes [#76939](https://github.com/ClickHouse/ClickHouse/issues/76939). [#76985](https://github.com/ClickHouse/ClickHouse/pull/76985) ([Amos Bird](https://github.com/amosbird)). -* Fix sorting of `BFloat16` values. This closes [#75487](https://github.com/ClickHouse/ClickHouse/issues/75487). This closes [#75669](https://github.com/ClickHouse/ClickHouse/issues/75669). [#77000](https://github.com/ClickHouse/ClickHouse/pull/77000) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Bug fix JSON with variant subcolumn by adding check to skip ephemeral subcolumns in part consistency check. [#72187](https://github.com/ClickHouse/ClickHouse/issues/72187). [#77034](https://github.com/ClickHouse/ClickHouse/pull/77034) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)). -* Fix crash in template parsing in Values format in case of types mismatch. [#77071](https://github.com/ClickHouse/ClickHouse/pull/77071) ([Pavel Kruglov](https://github.com/Avogar)). -* Don't allow creating EmbeddedRocksDB table with subcolumn in a primary key. Previously, such a table could be created but `SELECT` queries failed. [#77074](https://github.com/ClickHouse/ClickHouse/pull/77074) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix illegal comparison in distributed queries because pushing down predicates to remote doesn't respect literal types. [#77093](https://github.com/ClickHouse/ClickHouse/pull/77093) ([Duc Canh Le](https://github.com/canhld94)). -* Fix crash during Kafka table creation with exception. [#77121](https://github.com/ClickHouse/ClickHouse/pull/77121) ([Pavel Kruglov](https://github.com/Avogar)). -* Support new JSON and subcolumns in Kafka and RabbitMQ engines. [#77122](https://github.com/ClickHouse/ClickHouse/pull/77122) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix exceptions stack unwinding on MacOS. [#77126](https://github.com/ClickHouse/ClickHouse/pull/77126) ([Eduard Karacharov](https://github.com/korowa)). -* Fix reading 'null' subcolumn in getSubcolumn function. [#77163](https://github.com/ClickHouse/ClickHouse/pull/77163) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix not working skip indexes with expression with literals in analyzer and remove trivial casts during indexes analysis. [#77229](https://github.com/ClickHouse/ClickHouse/pull/77229) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix bloom filter index with Array and not supported functions. [#77271](https://github.com/ClickHouse/ClickHouse/pull/77271) ([Pavel Kruglov](https://github.com/Avogar)). -* We should only check the restriction on the amount of tables during the initial CREATE query. [#77274](https://github.com/ClickHouse/ClickHouse/pull/77274) ([Nikolay Degterinsky](https://github.com/evillique)). -* `SELECT toBFloat16(-0.0) == toBFloat16(0.0)` now correctly returns `true` (from previously `false`). This makes the behavior consistent with `Float32` and `Float64`. [#77290](https://github.com/ClickHouse/ClickHouse/pull/77290) ([Shankar Iyer](https://github.com/shankar-iyer)). -* Fix posbile incorrect reference to unintialized key_index variable, which may lead to crash in debug builds (this uninitialized reference won't cause issues in release builds because subsequent code are likely to throw errors.) ### documentation entry for user-facing changes. [#77305](https://github.com/ClickHouse/ClickHouse/pull/77305) ([wxybear](https://github.com/wxybear)). -* Reverted. [#77307](https://github.com/ClickHouse/ClickHouse/pull/77307) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix name for partition with a Bool value. It was broken in https://github.com/ClickHouse/ClickHouse/pull/74533. [#77319](https://github.com/ClickHouse/ClickHouse/pull/77319) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix comparison between tuples with nullable elements inside and strings. As an example, before the change comparison between a Tuple `(1, null)` and a String `'(1,null)'` would result in an error. Another example would be a comparison between a Tuple `(1, a)`, where `a` is a Nullable column, and a String `'(1, 2)'`. This change addresses these issues. [#77323](https://github.com/ClickHouse/ClickHouse/pull/77323) ([Alexey Katsman](https://github.com/alexkats)). -* Fix crash in ObjectStorageQueueSource. Was intoduced in https://github.com/ClickHouse/ClickHouse/pull/76358. [#77325](https://github.com/ClickHouse/ClickHouse/pull/77325) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix a bug when `close_session` query parameter didn't have any effect leading to named sessions being closed only after `session_timeout`. [#77336](https://github.com/ClickHouse/ClickHouse/pull/77336) ([Alexey Katsman](https://github.com/alexkats)). -* Fix `async_insert` with `input()`. [#77340](https://github.com/ClickHouse/ClickHouse/pull/77340) ([Azat Khuzhin](https://github.com/azat)). -* Fix: `WITH FILL` may fail with `NOT_FOUND_COLUMN_IN_BLOCK` when planer removes sorting column. Similar issue related to inconsistent DAG calculated for `INTERPOLATE` expression. [#77343](https://github.com/ClickHouse/ClickHouse/pull/77343) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Reverted. [#77390](https://github.com/ClickHouse/ClickHouse/pull/77390) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Fixed receiving messages from nats server without attached mv. [#77392](https://github.com/ClickHouse/ClickHouse/pull/77392) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)). -* Fix logical error while reading from empty `FileLog` via `merge` table function, close [#75575](https://github.com/ClickHouse/ClickHouse/issues/75575). [#77441](https://github.com/ClickHouse/ClickHouse/pull/77441) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Fix several `LOGICAL_ERROR`s around setting an alias of invalid AST nodes. [#77445](https://github.com/ClickHouse/ClickHouse/pull/77445) ([Raúl Marín](https://github.com/Algunenano)). -* In filesystem cache implementation fix error processing during file segment write. [#77471](https://github.com/ClickHouse/ClickHouse/pull/77471) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Make DatabaseIceberg use correct metadata file provided by catalog. Closes [#75187](https://github.com/ClickHouse/ClickHouse/issues/75187). [#77486](https://github.com/ClickHouse/ClickHouse/pull/77486) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Use default format settings in Dynamic serialization from shared variant. [#77572](https://github.com/ClickHouse/ClickHouse/pull/77572) ([Pavel Kruglov](https://github.com/Avogar)). -* Revert 'Avoid toAST() in execution of scalar subqueries'. [#77584](https://github.com/ClickHouse/ClickHouse/pull/77584) ([Raúl Marín](https://github.com/Algunenano)). -* Fix checking if the table data path exists on the local disk. [#77608](https://github.com/ClickHouse/ClickHouse/pull/77608) ([Tuan Pham Anh](https://github.com/tuanpach)). -* The query cache now assumes that UDFs are non-deterministic. Accordingly, results of queries with UDFs are no longer cached. Previously, users were able to define non-deterministic UDFs whose result would erronously be cached (issue [#77553](https://github.com/ClickHouse/ClickHouse/issues/77553)). [#77633](https://github.com/ClickHouse/ClickHouse/pull/77633) ([Jimmy Aguilar Mena](https://github.com/Ergus)). -* Fix sending constant values to remote for some types. [#77634](https://github.com/ClickHouse/ClickHouse/pull/77634) ([Pavel Kruglov](https://github.com/Avogar)). -* Fix system.filesystem_cache_log working only under setting `enable_filesystem_cache_log`. [#77650](https://github.com/ClickHouse/ClickHouse/pull/77650) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix a logical error when calling `defaultRoles()` function inside a projection. Follow-up for [#76627](https://github.com/ClickHouse/ClickHouse/issues/76627). [#77667](https://github.com/ClickHouse/ClickHouse/pull/77667) ([pufit](https://github.com/pufit)). -* Fix crash because of expired context in StorageS3(Azure)Queue. [#77720](https://github.com/ClickHouse/ClickHouse/pull/77720) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Second arguments of type `Nullable` for function `arrayResize` are now disallowed. Previously, anything from errors to wrong results could happen with `Nullable` as second argument. (issue [#48398](https://github.com/ClickHouse/ClickHouse/issues/48398)). [#77724](https://github.com/ClickHouse/ClickHouse/pull/77724) ([Manish Gill](https://github.com/mgill25)). -* Hide credentials in RabbitMQ, Nats, Redis, AzureQueue table engines. [#77755](https://github.com/ClickHouse/ClickHouse/pull/77755) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix undefined behaviour on NaN comparison in ArgMin/ArgMax. [#77756](https://github.com/ClickHouse/ClickHouse/pull/77756) ([Raúl Marín](https://github.com/Algunenano)). -* Regularly check if merges and mutations were cancelled even in case when the operation doesn't produce any blocks to write. [#77766](https://github.com/ClickHouse/ClickHouse/pull/77766) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Reverted. [#77843](https://github.com/ClickHouse/ClickHouse/pull/77843) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Fix possible crash when `NOT_FOUND_COLUMN_IN_BLOCK` error occurs. [#77854](https://github.com/ClickHouse/ClickHouse/pull/77854) ([Vladimir Cherkasov](https://github.com/vdimir)). -* Fix crash that happens in the `StorageSystemObjectStorageQueueSettings` while filling data. [#77878](https://github.com/ClickHouse/ClickHouse/pull/77878) ([Bharat Nallan](https://github.com/bharatnc)). -* Disable fuzzy search for history in SSH server (since it requires skim). [#78002](https://github.com/ClickHouse/ClickHouse/pull/78002) ([Azat Khuzhin](https://github.com/azat)). -* Fixes a bug that a vector search query on a non-indexed column was returning incorrect results if there was another vector column in the table with a defined vector similarity index. (Issue [#77978](https://github.com/ClickHouse/ClickHouse/issues/77978)). [#78069](https://github.com/ClickHouse/ClickHouse/pull/78069) ([Shankar Iyer](https://github.com/shankar-iyer)). -* Fix `The requested output format {} is binary... Do you want to output it anyway? [y/N]` prompt. [#78095](https://github.com/ClickHouse/ClickHouse/pull/78095) ([Azat Khuzhin](https://github.com/azat)). -* Fix of a bug in case of `toStartOfInterval` with zero origin argument. [#78096](https://github.com/ClickHouse/ClickHouse/pull/78096) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Disallow specifying an empty `session_id` query parameter for HTTP interface. [#78098](https://github.com/ClickHouse/ClickHouse/pull/78098) ([Alexey Katsman](https://github.com/alexkats)). -* Fix metadata override in Database Replicated which could have happened due to a RENAME query executed right after an ALTER query. [#78107](https://github.com/ClickHouse/ClickHouse/pull/78107) ([Nikolay Degterinsky](https://github.com/evillique)). -* Fix crash in NATS engine. [#78108](https://github.com/ClickHouse/ClickHouse/pull/78108) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)). -* Do not try to create a `history_file` in an embedded client for SSH. [#78112](https://github.com/ClickHouse/ClickHouse/pull/78112) ([Azat Khuzhin](https://github.com/azat)). -* Fix system.detached_tables displaying incorrect information after RENAME DATABASE or DROP TABLE queries. [#78126](https://github.com/ClickHouse/ClickHouse/pull/78126) ([Nikolay Degterinsky](https://github.com/evillique)). -* Fix for checks for too many tables with Database Replicated after https://github.com/ClickHouse/ClickHouse/pull/77274. Also, perform the check before creating the storage to avoid creating unaccounted nodes in ZooKeeper in the case of RMT or KeeperMap. [#78127](https://github.com/ClickHouse/ClickHouse/pull/78127) ([Nikolay Degterinsky](https://github.com/evillique)). -* Fix possible crash due to concurrent S3Queue metadata initialization. [#78131](https://github.com/ClickHouse/ClickHouse/pull/78131) ([Azat Khuzhin](https://github.com/azat)). -* `groupArray*` functions now produce BAD_ARGUMENTS error for Int-typed 0 value of max_size argument, like it's already done for UInt one, instead of trying to execute with it. [#78140](https://github.com/ClickHouse/ClickHouse/pull/78140) ([Eduard Karacharov](https://github.com/korowa)). -* Prevent crash on recoverLostReplica if the local table is removed before it's detached. [#78173](https://github.com/ClickHouse/ClickHouse/pull/78173) ([Raúl Marín](https://github.com/Algunenano)). -* Fix "alterable" column in system.s3_queue_settings returning always `false`. [#78187](https://github.com/ClickHouse/ClickHouse/pull/78187) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Mask azure access signature to be not visible to user or in logs. [#78189](https://github.com/ClickHouse/ClickHouse/pull/78189) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix prefetch of substreams with prefixes in Wide parts. [#78205](https://github.com/ClickHouse/ClickHouse/pull/78205) ([Pavel Kruglov](https://github.com/Avogar)). -* Fixed crashes / incorrect result for `mapFromArrays` in case of `LowCardinality(Nullable)` type of key array. [#78240](https://github.com/ClickHouse/ClickHouse/pull/78240) ([Eduard Karacharov](https://github.com/korowa)). -* Fix delta-kernel auth options. [#78255](https://github.com/ClickHouse/ClickHouse/pull/78255) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Not schedule RefreshMV task if a replica's `disable_insertion_and_mutation` is true. A task is some insertion, it will failed if `disable_insertion_and_mutation` is true. [#78277](https://github.com/ClickHouse/ClickHouse/pull/78277) ([Xu Jia](https://github.com/XuJia0210)). -* Validate access to underlying tables for the Merge engine. [#78339](https://github.com/ClickHouse/ClickHouse/pull/78339) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -* FINAL modifier can be lost for `Distributed` engine table. [#78428](https://github.com/ClickHouse/ClickHouse/pull/78428) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* `Bitmapmin` returns uint32_max when the bitmap is `empty(uint64_max when input type >= 8bits)`, which matches the behavior of empty `roaring_bitmap`'s `minimum()`. [#78444](https://github.com/ClickHouse/ClickHouse/pull/78444) ([wxybear](https://github.com/wxybear)). -* Revert "Apply preserve_most attribute at some places in code" since it may lead to crashes. [#78449](https://github.com/ClickHouse/ClickHouse/pull/78449) ([Azat Khuzhin](https://github.com/azat)). -* Use insertion columns for INFILE schema inference. [#78490](https://github.com/ClickHouse/ClickHouse/pull/78490) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -* Disable parallelize query processing right after reading `FROM` when `distributed_aggregation_memory_efficient` enabled, it may lead to logical error. Closes [#76934](https://github.com/ClickHouse/ClickHouse/issues/76934). [#78500](https://github.com/ClickHouse/ClickHouse/pull/78500) ([flynn](https://github.com/ucasfl)). -* Set at least one stream for reading in case there are zero planned streams after applying `max_streams_to_max_threads_ratio` setting. [#78505](https://github.com/ClickHouse/ClickHouse/pull/78505) ([Eduard Karacharov](https://github.com/korowa)). -* In storage S3Queue fix logical error "Cannot unregister: table uuid is not registered". Closes [#78285](https://github.com/ClickHouse/ClickHouse/issues/78285). [#78541](https://github.com/ClickHouse/ClickHouse/pull/78541) ([Kseniia Sumarokova](https://github.com/kssenii)). -* ClickHouse is now able to figure out its cgroup v2 on systems with both cgroups v1 and v2 enabled. [#78566](https://github.com/ClickHouse/ClickHouse/pull/78566) ([Grigory Korolev](https://github.com/gkorolev)). -* ObjectStorage cluster table functions failed when used with table level-settings. [#78587](https://github.com/ClickHouse/ClickHouse/pull/78587) ([Daniil Ivanik](https://github.com/divanik)). -* Better checks for transactions are not supported by ReplicatedMergeTree on `INSERT`s. [#78633](https://github.com/ClickHouse/ClickHouse/pull/78633) ([Azat Khuzhin](https://github.com/azat)). -* Apply query settings during attachment. [#78637](https://github.com/ClickHouse/ClickHouse/pull/78637) ([Raúl Marín](https://github.com/Algunenano)). -* Fixes a crash when an invalid path is specified in `iceberg_metadata_file_path`. [#78688](https://github.com/ClickHouse/ClickHouse/pull/78688) ([alesapin](https://github.com/alesapin)). -* In DeltaLake table engine with delta-kernel implementation fix case when read schema is different from table schema and there are partition columns at the same time leading to not found column error. [#78690](https://github.com/ClickHouse/ClickHouse/pull/78690) ([Kseniia Sumarokova](https://github.com/kssenii)). -* This update corrects a bug where a new named session would inadvertently close at the scheduled time of a previous session if both sessions shared the same name and the new one was created before the old one's timeout expired. [#78698](https://github.com/ClickHouse/ClickHouse/pull/78698) ([Alexey Katsman](https://github.com/alexkats)). -* Don't block table shutdown while running CHECK TABLE. [#78782](https://github.com/ClickHouse/ClickHouse/pull/78782) ([Raúl Marín](https://github.com/Algunenano)). -* Keeper fix: fix ephemeral count in all cases. [#78799](https://github.com/ClickHouse/ClickHouse/pull/78799) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix bad cast in `StorageDistributed` when using table functions other than `view()`. Closes [#78464](https://github.com/ClickHouse/ClickHouse/issues/78464). [#78828](https://github.com/ClickHouse/ClickHouse/pull/78828) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Fix formatting for `tupleElement(*, 1)`. Closes [#78639](https://github.com/ClickHouse/ClickHouse/issues/78639). [#78832](https://github.com/ClickHouse/ClickHouse/pull/78832) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Dictionaries of type `ssd_cache` now reject zero or negative `block_size` and `write_buffer_size` parameters (issue [#78314](https://github.com/ClickHouse/ClickHouse/issues/78314)). [#78854](https://github.com/ClickHouse/ClickHouse/pull/78854) ([Elmi Ahmadov](https://github.com/ahmadov)). -* Fix crash in REFRESHABLE MV in case of ALTER after incorrect shutdown. [#78858](https://github.com/ClickHouse/ClickHouse/pull/78858) ([Azat Khuzhin](https://github.com/azat)). -* Fix parsing of bad DateTime values in CSV format. [#78919](https://github.com/ClickHouse/ClickHouse/pull/78919) ([Pavel Kruglov](https://github.com/Avogar)). - -## Build/Testing/Packaging Improvement {#build-testing-packaging-improvement} - -* The internal dependency LLVM is bumped from 16 to 18. [#66053](https://github.com/ClickHouse/ClickHouse/pull/66053) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Restore deleted nats integration tests and fix errors. - fixed some race conditions in nats engine - fixed data loss when streaming data to nats in case of connection loss - fixed freeze of receiving the last chunk of data when streaming from nats ended - nats_max_reconnect is deprecated and has no effect, reconnect is performed permanently with nats_reconnect_wait timeout. [#69772](https://github.com/ClickHouse/ClickHouse/pull/69772) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)). -* Fix the issue that asm files of contrib openssl cannot be generated. [#72622](https://github.com/ClickHouse/ClickHouse/pull/72622) ([RinChanNOW](https://github.com/RinChanNOWWW)). -* Fix stability for test 03210_variant_with_aggregate_function_type. [#74012](https://github.com/ClickHouse/ClickHouse/pull/74012) ([Anton Ivashkin](https://github.com/ianton-ru)). -* Support build HDFS on both ARM and Intel Mac. [#74244](https://github.com/ClickHouse/ClickHouse/pull/74244) ([Yan Xin](https://github.com/yxheartipp)). -* The universal installation script will propose installation even on macOS. [#74339](https://github.com/ClickHouse/ClickHouse/pull/74339) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix build when kerberos is not enabled. [#74771](https://github.com/ClickHouse/ClickHouse/pull/74771) ([flynn](https://github.com/ucasfl)). -* Update to embedded LLVM 19. [#75148](https://github.com/ClickHouse/ClickHouse/pull/75148) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* *Potentially breaking*: Improvement to set even more restrictive defaults. The current defaults are already secure. The user has to specify an option to publish ports explicitly. But when the `default` user doesn’t have a password set by `CLICKHOUSE_PASSWORD` and/or a username changed by `CLICKHOUSE_USER` environment variables, it should be available only from the local system as an additional level of protection. [#75259](https://github.com/ClickHouse/ClickHouse/pull/75259) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Integration tests have a 1-hour timeout for single batch of parallel tests running. When this timeout is reached `pytest` is killed without some logs. Internal pytest timeout is set to 55 minutes to print results from a session and not trigger external timeout signal. Closes [#75532](https://github.com/ClickHouse/ClickHouse/issues/75532). [#75533](https://github.com/ClickHouse/ClickHouse/pull/75533) ([Ilya Yatsishin](https://github.com/qoega)). -* Make all clickhouse-server related actions a function, and execute them only when launching the default binary in `entrypoint.sh`. A long-postponed improvement was suggested in [#50724](https://github.com/ClickHouse/ClickHouse/issues/50724). Added switch `--users` to `clickhouse-extract-from-config` to get values from the `users.xml`. [#75643](https://github.com/ClickHouse/ClickHouse/pull/75643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* For stress tests if server did not exit while we collected stacktraces via gdb additional wait time is added to make `Possible deadlock on shutdown (see gdb.log)` detection less noisy. It will only add delay for cases when test did not finish successfully. [#75668](https://github.com/ClickHouse/ClickHouse/pull/75668) ([Ilya Yatsishin](https://github.com/qoega)). -* Restore deleted nats integration tests and fix errors. - fixed some race conditions in nats engine - fixed data loss when streaming data to nats in case of connection loss - fixed freeze of receiving the last chunk of data when streaming from nats ended - nats_max_reconnect is deprecated and has no effect, reconnect is performed permanently with nats_reconnect_wait timeout. [#75850](https://github.com/ClickHouse/ClickHouse/pull/75850) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)). -* Enable ICU and GRPC when cross-compiling for Darwin. [#75922](https://github.com/ClickHouse/ClickHouse/pull/75922) ([Raúl Marín](https://github.com/Algunenano)). -* Fixing splitting test's output because of `sleep` during the process group killing. [#76090](https://github.com/ClickHouse/ClickHouse/pull/76090) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Do not collect the `docker-compose` logs at the end of running since the script is often killed. Instead, collect them in the background. [#76140](https://github.com/ClickHouse/ClickHouse/pull/76140) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Split tests for kafka storage into a few files. Fixes [#69452](https://github.com/ClickHouse/ClickHouse/issues/69452). [#76208](https://github.com/ClickHouse/ClickHouse/pull/76208) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* `clickhouse-odbc-bridge` and `clickhouse-library-bridge` are moved to a separate repository, https://github.com/ClickHouse/odbc-bridge/. [#76225](https://github.com/ClickHouse/ClickHouse/pull/76225) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Remove about 20MB of dead code from the binary. [#76226](https://github.com/ClickHouse/ClickHouse/pull/76226) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Raise minimum required CMake version to 3.25 due to `block()` introduction. [#76316](https://github.com/ClickHouse/ClickHouse/pull/76316) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Update fmt to 11.1.3. [#76547](https://github.com/ClickHouse/ClickHouse/pull/76547) ([Raúl Marín](https://github.com/Algunenano)). -* Bump `lz4` to `1.10.0`. [#76571](https://github.com/ClickHouse/ClickHouse/pull/76571) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Bump `curl` to `8.12.1`. [#76572](https://github.com/ClickHouse/ClickHouse/pull/76572) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Bump `libcpuid` to `0.7.1`. [#76573](https://github.com/ClickHouse/ClickHouse/pull/76573) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Use a machine-readable format to parse pytest results. [#76910](https://github.com/ClickHouse/ClickHouse/pull/76910) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Fix rust cross-compilation and allow disabling Rust completely. [#76921](https://github.com/ClickHouse/ClickHouse/pull/76921) ([Raúl Marín](https://github.com/Algunenano)). -* Require clang 19 to build the project. [#76945](https://github.com/ClickHouse/ClickHouse/pull/76945) ([Raúl Marín](https://github.com/Algunenano)). -* The test is executed for 10+ seconds in the serial mode. It's too long for fast tests. [#76948](https://github.com/ClickHouse/ClickHouse/pull/76948) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Bump `sccache` to `0.10.0`. [#77580](https://github.com/ClickHouse/ClickHouse/pull/77580) ([Konstantin Bogdanov](https://github.com/thevar1able)). -* Respect CPU target features in rust and enable LTO in all crates. [#78590](https://github.com/ClickHouse/ClickHouse/pull/78590) ([Raúl Marín](https://github.com/Algunenano)). -* Bump `minizip-ng` to `4.0.9`. [#78917](https://github.com/ClickHouse/ClickHouse/pull/78917) ([Konstantin Bogdanov](https://github.com/thevar1able)). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/fast-release-24-2.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/fast-release-24-2.md deleted file mode 100644 index 714f8a8f575..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/changelogs/fast-release-24-2.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -slug: /whats-new/changelog/24.2-fast-release -title: 'v24.2 Changelog' -description: 'Fast release changelog for v24.2' -keywords: ['changelog'] -sidebar_label: 'v24.2' ---- - -### ClickHouse release tag: 24.2.2.15987 {#clickhouse-release-tag-242215987} - -#### Backward Incompatible Change {#backward-incompatible-change} -* Validate suspicious/experimental types in nested types. Previously we didn't validate such types (except JSON) in nested types like Array/Tuple/Map. [#59385](https://github.com/ClickHouse/ClickHouse/pull/59385) ([Kruglov Pavel](https://github.com/Avogar)). -* The sort clause `ORDER BY ALL` (introduced with v23.12) is replaced by `ORDER BY *`. The previous syntax was too error-prone for tables with a column `all`. [#59450](https://github.com/ClickHouse/ClickHouse/pull/59450) ([Robert Schulze](https://github.com/rschu1ze)). -* Add sanity check for number of threads and block sizes. [#60138](https://github.com/ClickHouse/ClickHouse/pull/60138) ([Raúl Marín](https://github.com/Algunenano)). -* Reject incoming INSERT queries in case when query-level settings `async_insert` and `deduplicate_blocks_in_dependent_materialized_views` are enabled together. This behaviour is controlled by a setting `throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert` and enabled by default. This is a continuation of https://github.com/ClickHouse/ClickHouse/pull/59699 needed to unblock https://github.com/ClickHouse/ClickHouse/pull/59915. [#60888](https://github.com/ClickHouse/ClickHouse/pull/60888) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Utility `clickhouse-copier` is moved to a separate repository on GitHub: https://github.com/ClickHouse/copier. It is no longer included in the bundle but is still available as a separate download. This closes: [#60734](https://github.com/ClickHouse/ClickHouse/issues/60734) This closes: [#60540](https://github.com/ClickHouse/ClickHouse/issues/60540) This closes: [#60250](https://github.com/ClickHouse/ClickHouse/issues/60250) This closes: [#52917](https://github.com/ClickHouse/ClickHouse/issues/52917) This closes: [#51140](https://github.com/ClickHouse/ClickHouse/issues/51140) This closes: [#47517](https://github.com/ClickHouse/ClickHouse/issues/47517) This closes: [#47189](https://github.com/ClickHouse/ClickHouse/issues/47189) This closes: [#46598](https://github.com/ClickHouse/ClickHouse/issues/46598) This closes: [#40257](https://github.com/ClickHouse/ClickHouse/issues/40257) This closes: [#36504](https://github.com/ClickHouse/ClickHouse/issues/36504) This closes: [#35485](https://github.com/ClickHouse/ClickHouse/issues/35485) This closes: [#33702](https://github.com/ClickHouse/ClickHouse/issues/33702) This closes: [#26702](https://github.com/ClickHouse/ClickHouse/issues/26702) ### Documentation entry for user-facing changes. [#61058](https://github.com/ClickHouse/ClickHouse/pull/61058) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* To increase compatibility with MySQL, function `locate` now accepts arguments `(needle, haystack[, start_pos])` by default. The previous behavior `(haystack, needle, [, start_pos])` can be restored by setting `function_locate_has_mysql_compatible_argument_order = 0`. [#61092](https://github.com/ClickHouse/ClickHouse/pull/61092) ([Robert Schulze](https://github.com/rschu1ze)). -* The obsolete in-memory data parts have been deprecated since version 23.5 and have not been supported since version 23.10. Now the remaining code is removed. Continuation of [#55186](https://github.com/ClickHouse/ClickHouse/issues/55186) and [#45409](https://github.com/ClickHouse/ClickHouse/issues/45409). It is unlikely that you have used in-memory data parts because they were available only before version 23.5 and only when you enabled them manually by specifying the corresponding SETTINGS for a MergeTree table. To check if you have in-memory data parts, run the following query: `SELECT part_type, count() FROM system.parts GROUP BY part_type ORDER BY part_type`. To disable the usage of in-memory data parts, do `ALTER TABLE ... MODIFY SETTING min_bytes_for_compact_part = DEFAULT, min_rows_for_compact_part = DEFAULT`. Before upgrading from old ClickHouse releases, first check that you don't have in-memory data parts. If there are in-memory data parts, disable them first, then wait while there are no in-memory data parts and continue the upgrade. [#61127](https://github.com/ClickHouse/ClickHouse/pull/61127) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Forbid `SimpleAggregateFunction` in `ORDER BY` of `MergeTree` tables (like `AggregateFunction` is forbidden, but they are forbidden because they are not comparable) by default (use `allow_suspicious_primary_key` to allow them). [#61399](https://github.com/ClickHouse/ClickHouse/pull/61399) ([Azat Khuzhin](https://github.com/azat)). -* ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. This is controlled by the settings, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`, `output_format_arrow_string_as_string`. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases. Parquet/ORC/Arrow supports many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools lack support for the faster `lz4` compression method, that's why we set `zstd` by default. This is controlled by the settings `output_format_parquet_compression_method`, `output_format_orc_compression_method`, and `output_format_arrow_compression_method`. We changed the default to `zstd` for Parquet and ORC, but not Arrow (it is emphasized for low-level usages). [#61817](https://github.com/ClickHouse/ClickHouse/pull/61817) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix for the materialized view security issue, which allowed a user to insert into a table without required grants for that. Fix validates that the user has permission to insert not only into a materialized view but also into all underlying tables. This means that some queries, which worked before, now can fail with Not enough privileges. To address this problem, the release introduces a new feature of SQL security for views [https://clickhouse.com/docs/sql-reference/statements/create/view#sql_security](/sql-reference/statements/create/view#sql_security). [#54901](https://github.com/ClickHouse/ClickHouse/pull/54901) ([pufit](https://github.com/pufit)) - -#### New Feature {#new-feature} -* Topk/topkweighed support mode, which return count of values and it's error. [#54508](https://github.com/ClickHouse/ClickHouse/pull/54508) ([UnamedRus](https://github.com/UnamedRus)). -* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. [#54901](https://github.com/ClickHouse/ClickHouse/pull/54901) ([pufit](https://github.com/pufit)). -* Implemented automatic conversion of merge tree tables of different kinds to replicated engine. Create empty `convert_to_replicated` file in table's data directory (`/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`) and that table will be converted automatically on next server start. [#57798](https://github.com/ClickHouse/ClickHouse/pull/57798) ([Kirill](https://github.com/kirillgarbar)). -* Added table function `mergeTreeIndex`. It represents the contents of index and marks files of `MergeTree` tables. It can be used for introspection. Syntax: `mergeTreeIndex(database, table, [with_marks = true])` where `database.table` is an existing table with `MergeTree` engine. [#58140](https://github.com/ClickHouse/ClickHouse/pull/58140) ([Anton Popov](https://github.com/CurtizJ)). -* Try to detect file format automatically during schema inference if it's unknown in `file/s3/hdfs/url/azureBlobStorage` engines. Closes [#50576](https://github.com/ClickHouse/ClickHouse/issues/50576). [#59092](https://github.com/ClickHouse/ClickHouse/pull/59092) ([Kruglov Pavel](https://github.com/Avogar)). -* Add generate_series as a table function. This function generates table with an arithmetic progression with natural numbers. [#59390](https://github.com/ClickHouse/ClickHouse/pull/59390) ([divanik](https://github.com/divanik)). -* Added query `ALTER TABLE table FORGET PARTITION partition` that removes ZooKeeper nodes, related to an empty partition. [#59507](https://github.com/ClickHouse/ClickHouse/pull/59507) ([Sergei Trifonov](https://github.com/serxa)). -* Support reading and writing backups as tar archives. [#59535](https://github.com/ClickHouse/ClickHouse/pull/59535) ([josh-hildred](https://github.com/josh-hildred)). -* Provides new aggregate function 'groupArrayIntersect'. Follows up: [#49862](https://github.com/ClickHouse/ClickHouse/issues/49862). [#59598](https://github.com/ClickHouse/ClickHouse/pull/59598) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Implemented system.dns_cache table, which can be useful for debugging DNS issues. [#59856](https://github.com/ClickHouse/ClickHouse/pull/59856) ([Kirill Nikiforov](https://github.com/allmazz)). -* Implemented support for S3Express buckets. [#59965](https://github.com/ClickHouse/ClickHouse/pull/59965) ([Nikita Taranov](https://github.com/nickitat)). -* The codec `LZ4HC` will accept a new level 2, which is faster than the previous minimum level 3, at the expense of less compression. In previous versions, `LZ4HC(2)` and less was the same as `LZ4HC(3)`. Author: [Cyan4973](https://github.com/Cyan4973). [#60090](https://github.com/ClickHouse/ClickHouse/pull/60090) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Implemented system.dns_cache table, which can be useful for debugging DNS issues. New server setting dns_cache_max_size. [#60257](https://github.com/ClickHouse/ClickHouse/pull/60257) ([Kirill Nikiforov](https://github.com/allmazz)). -* Added function `toMillisecond` which returns the millisecond component for values of type`DateTime` or `DateTime64`. [#60281](https://github.com/ClickHouse/ClickHouse/pull/60281) ([Shaun Struwig](https://github.com/Blargian)). -* Support single-argument version for the merge table function, as `merge(['db_name', ] 'tables_regexp')`. [#60372](https://github.com/ClickHouse/ClickHouse/pull/60372) ([豪肥肥](https://github.com/HowePa)). -* Make all format names case insensitive, like Tsv, or TSV, or tsv, or even rowbinary. [#60420](https://github.com/ClickHouse/ClickHouse/pull/60420) ([豪肥肥](https://github.com/HowePa)). -* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. [#60439](https://github.com/ClickHouse/ClickHouse/pull/60439) ([pufit](https://github.com/pufit)). -* Add four properties to the `StorageMemory` (memory-engine) `min_bytes_to_keep, max_bytes_to_keep, min_rows_to_keep` and `max_rows_to_keep` - Add tests to reflect new changes - Update `memory.md` documentation - Add table `context` property to `MemorySink` to enable access to table parameter bounds. [#60612](https://github.com/ClickHouse/ClickHouse/pull/60612) ([Jake Bamrah](https://github.com/JakeBamrah)). -* Added function `toMillisecond` which returns the millisecond component for values of type`DateTime` or `DateTime64`. [#60649](https://github.com/ClickHouse/ClickHouse/pull/60649) ([Robert Schulze](https://github.com/rschu1ze)). -* Separate limits on number of waiting and executing queries. Added new server setting `max_waiting_queries` that limits the number of queries waiting due to `async_load_databases`. Existing limits on number of executing queries no longer count waiting queries. [#61053](https://github.com/ClickHouse/ClickHouse/pull/61053) ([Sergei Trifonov](https://github.com/serxa)). -* Add support for `ATTACH PARTITION ALL`. [#61107](https://github.com/ClickHouse/ClickHouse/pull/61107) ([Kirill Nikiforov](https://github.com/allmazz)). - -#### Performance Improvement {#performance-improvement} -* Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section. [#52230](https://github.com/ClickHouse/ClickHouse/pull/52230) ([JackyWoo](https://github.com/JackyWoo)). -* Improve the performance of serialized aggregation method when involving multiple [nullable] columns. This is a general version of [#51399](https://github.com/ClickHouse/ClickHouse/issues/51399) that doesn't compromise on abstraction integrity. [#55809](https://github.com/ClickHouse/ClickHouse/pull/55809) ([Amos Bird](https://github.com/amosbird)). -* Lazy build join output to improve performance of ALL join. [#58278](https://github.com/ClickHouse/ClickHouse/pull/58278) ([LiuNeng](https://github.com/liuneng1994)). -* Improvements to aggregate functions ArgMin / ArgMax / any / anyLast / anyHeavy, as well as `ORDER BY {u8/u16/u32/u64/i8/i16/u32/i64) LIMIT 1` queries. [#58640](https://github.com/ClickHouse/ClickHouse/pull/58640) ([Raúl Marín](https://github.com/Algunenano)). -* Optimize performance of sum/avg conditionally for bigint and big decimal types by reducing branch miss. [#59504](https://github.com/ClickHouse/ClickHouse/pull/59504) ([李扬](https://github.com/taiyang-li)). -* Improve performance of SELECTs with active mutations. [#59531](https://github.com/ClickHouse/ClickHouse/pull/59531) ([Azat Khuzhin](https://github.com/azat)). -* Trivial optimize on column filter. Avoid those filter columns whoes underlying data type is not number being filtered with `result_size_hint = -1`. Peak memory can be reduced to 44% of the original in some cases. [#59698](https://github.com/ClickHouse/ClickHouse/pull/59698) ([李扬](https://github.com/taiyang-li)). -* Primary key will use less amount of memory. [#60049](https://github.com/ClickHouse/ClickHouse/pull/60049) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Improve memory usage for primary key and some other operations. [#60050](https://github.com/ClickHouse/ClickHouse/pull/60050) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* The tables' primary keys will be loaded in memory lazily on first access. This is controlled by the new MergeTree setting `primary_key_lazy_load`, which is on by default. This provides several advantages: - it will not be loaded for tables that are not used; - if there is not enough memory, an exception will be thrown on first use instead of at server startup. This provides several disadvantages: - the latency of loading the primary key will be paid on the first query rather than before accepting connections; this theoretically may introduce a thundering-herd problem. This closes [#11188](https://github.com/ClickHouse/ClickHouse/issues/11188). [#60093](https://github.com/ClickHouse/ClickHouse/pull/60093) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Vectorized function `dotProduct` which is useful for vector search. [#60202](https://github.com/ClickHouse/ClickHouse/pull/60202) ([Robert Schulze](https://github.com/rschu1ze)). -* If the table's primary key contains mostly useless columns, don't keep them in memory. This is controlled by a new setting `primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns` with the value `0.9` by default, which means: for a composite primary key, if a column changes its value for at least 0.9 of all the times, the next columns after it will be not loaded. [#60255](https://github.com/ClickHouse/ClickHouse/pull/60255) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Execute multiIf function columnarly when result_type's underlying type is number. [#60384](https://github.com/ClickHouse/ClickHouse/pull/60384) ([李扬](https://github.com/taiyang-li)). -* As is shown in Fig 1, the replacement of "&&" with "&" could generate the SIMD code. ![image](https://github.com/ClickHouse/ClickHouse/assets/26588299/a5a72ac4-6dc6-4d52-835a-4f512e55f0b9) Fig 1. Code compiled from '&&' (left) and '&' (right). [#60498](https://github.com/ClickHouse/ClickHouse/pull/60498) ([Zhiguo Zhou](https://github.com/ZhiguoZh)). -* Faster (almost 2x) mutexes (was slower due to ThreadFuzzer). [#60823](https://github.com/ClickHouse/ClickHouse/pull/60823) ([Azat Khuzhin](https://github.com/azat)). -* Move connection drain from prepare to work, and drain multiple connections in parallel. [#60845](https://github.com/ClickHouse/ClickHouse/pull/60845) ([lizhuoyu5](https://github.com/lzydmxy)). -* Optimize insertManyFrom of nullable number or nullable string. [#60846](https://github.com/ClickHouse/ClickHouse/pull/60846) ([李扬](https://github.com/taiyang-li)). -* Optimized function `dotProduct` to omit unnecessary and expensive memory copies. [#60928](https://github.com/ClickHouse/ClickHouse/pull/60928) ([Robert Schulze](https://github.com/rschu1ze)). -* Operations with the filesystem cache will suffer less from the lock contention. [#61066](https://github.com/ClickHouse/ClickHouse/pull/61066) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Optimize ColumnString::replicate and prevent memcpySmallAllowReadWriteOverflow15Impl from being optimized to built-in memcpy. Close [#61074](https://github.com/ClickHouse/ClickHouse/issues/61074). ColumnString::replicate speeds up by 2.46x on x86-64. [#61075](https://github.com/ClickHouse/ClickHouse/pull/61075) ([李扬](https://github.com/taiyang-li)). -* 30x faster printing for 256-bit integers. [#61100](https://github.com/ClickHouse/ClickHouse/pull/61100) ([Raúl Marín](https://github.com/Algunenano)). -* If a query with a syntax error contained COLUMNS matcher with a regular expression, the regular expression was compiled each time during the parser's backtracking, instead of being compiled once. This was a fundamental error. The compiled regexp was put to AST. But the letter A in AST means "abstract" which means it should not contain heavyweight objects. Parts of AST can be created and discarded during parsing, including a large number of backtracking. This leads to slowness on the parsing side and consequently allows DoS by a readonly user. But the main problem is that it prevents progress in fuzzers. [#61543](https://github.com/ClickHouse/ClickHouse/pull/61543) ([Alexey Milovidov](https://github.com/alexey-milovidov)). - -#### Improvement {#improvement} -* While running the MODIFY COLUMN query for materialized views, check the inner table's structure to ensure every column exists. [#47427](https://github.com/ClickHouse/ClickHouse/pull/47427) ([sunny](https://github.com/sunny19930321)). -* Added table `system.keywords` which contains all the keywords from parser. Mostly needed and will be used for better fuzzing and syntax highlighting. [#51808](https://github.com/ClickHouse/ClickHouse/pull/51808) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). -* Ordinary database engine is deprecated. You will receive a warning in clickhouse-client if your server is using it. This closes [#52229](https://github.com/ClickHouse/ClickHouse/issues/52229). [#56942](https://github.com/ClickHouse/ClickHouse/pull/56942) ([shabroo](https://github.com/shabroo)). -* All zero copy locks related to a table have to be dropped when the table is dropped. The directory which contains these locks has to be removed also. [#57575](https://github.com/ClickHouse/ClickHouse/pull/57575) ([Sema Checherinda](https://github.com/CheSema)). -* Add short-circuit ability for `dictGetOrDefault` function. Closes [#52098](https://github.com/ClickHouse/ClickHouse/issues/52098). [#57767](https://github.com/ClickHouse/ClickHouse/pull/57767) ([jsc0218](https://github.com/jsc0218)). -* Allow declaring enum in external table structure. [#57857](https://github.com/ClickHouse/ClickHouse/pull/57857) ([Duc Canh Le](https://github.com/canhld94)). -* Running `ALTER COLUMN MATERIALIZE` on a column with `DEFAULT` or `MATERIALIZED` expression now writes the correct values: The default value for existing parts with default value or the non-default value for existing parts with non-default value. Previously, the default value was written for all existing parts. [#58023](https://github.com/ClickHouse/ClickHouse/pull/58023) ([Duc Canh Le](https://github.com/canhld94)). -* Enabled a backoff logic (e.g. exponential). Will provide an ability for reduced CPU usage, memory usage and log file sizes. [#58036](https://github.com/ClickHouse/ClickHouse/pull/58036) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -* Consider lightweight deleted rows when selecting parts to merge. [#58223](https://github.com/ClickHouse/ClickHouse/pull/58223) ([Zhuo Qiu](https://github.com/jewelzqiu)). -* Allow to define `volume_priority` in `storage_configuration`. [#58533](https://github.com/ClickHouse/ClickHouse/pull/58533) ([Andrey Zvonov](https://github.com/zvonand)). -* Add support for Date32 type in T64 codec. [#58738](https://github.com/ClickHouse/ClickHouse/pull/58738) ([Hongbin Ma](https://github.com/binmahone)). -* This PR makes http/https connections reusable for all uses cases. Even when response is 3xx or 4xx. [#58845](https://github.com/ClickHouse/ClickHouse/pull/58845) ([Sema Checherinda](https://github.com/CheSema)). -* Added comments for columns for more system tables. Continuation of https://github.com/ClickHouse/ClickHouse/pull/58356. [#59016](https://github.com/ClickHouse/ClickHouse/pull/59016) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Now we can use virtual columns in PREWHERE. It's worthwhile for non-const virtual columns like `_part_offset`. [#59033](https://github.com/ClickHouse/ClickHouse/pull/59033) ([Amos Bird](https://github.com/amosbird)). -* Settings for the Distributed table engine can now be specified in the server configuration file (similar to MergeTree settings), e.g. ``` false ```. [#59291](https://github.com/ClickHouse/ClickHouse/pull/59291) ([Azat Khuzhin](https://github.com/azat)). -* Keeper improvement: cache only a certain amount of logs in-memory controlled by `latest_logs_cache_size_threshold` and `commit_logs_cache_size_threshold`. [#59460](https://github.com/ClickHouse/ClickHouse/pull/59460) ([Antonio Andelic](https://github.com/antonio2368)). -* Instead using a constant key, now object storage generates key for determining remove objects capability. [#59495](https://github.com/ClickHouse/ClickHouse/pull/59495) ([Sema Checherinda](https://github.com/CheSema)). -* Don't infer floats in exponential notation by default. Add a setting `input_format_try_infer_exponent_floats` that will restore previous behaviour (disabled by default). Closes [#59476](https://github.com/ClickHouse/ClickHouse/issues/59476). [#59500](https://github.com/ClickHouse/ClickHouse/pull/59500) ([Kruglov Pavel](https://github.com/Avogar)). -* Allow alter operations to be surrounded by parenthesis. The emission of parentheses can be controlled by the `format_alter_operations_with_parentheses` config. By default in formatted queries the parentheses are emitted as we store the formatted alter operations in some places as metadata (e.g.: mutations). The new syntax clarifies some of the queries where alter operations end in a list. E.g.: `ALTER TABLE x MODIFY TTL date GROUP BY a, b, DROP COLUMN c` cannot be parsed properly with the old syntax. In the new syntax the query `ALTER TABLE x (MODIFY TTL date GROUP BY a, b), (DROP COLUMN c)` is obvious. Older versions are not able to read the new syntax, therefore using the new syntax might cause issues if newer and older version of ClickHouse are mixed in a single cluster. [#59532](https://github.com/ClickHouse/ClickHouse/pull/59532) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). -* Bumped Intel QPL (used by codec `DEFLATE_QPL`) from v1.3.1 to v1.4.0 . Also fixed a bug for polling timeout mechanism, as we observed in same cases timeout won't work properly, if timeout happen, IAA and CPU may process buffer concurrently. So far, we'd better make sure IAA codec status is not QPL_STS_BEING_PROCESSED, then fallback to SW codec. [#59551](https://github.com/ClickHouse/ClickHouse/pull/59551) ([jasperzhu](https://github.com/jinjunzh)). -* Add positional pread in libhdfs3. If you want to call positional read in libhdfs3, use the hdfsPread function in hdfs.h as follows. `tSize hdfsPread(hdfsFS fs, hdfsFile file, void * buffer, tSize length, tOffset position);`. [#59624](https://github.com/ClickHouse/ClickHouse/pull/59624) ([M1eyu](https://github.com/M1eyu2018)). -* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#59697](https://github.com/ClickHouse/ClickHouse/pull/59697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Unify xml and sql created named collection behaviour in kafka storage. [#59710](https://github.com/ClickHouse/ClickHouse/pull/59710) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). -* Allow uuid in replica_path if CREATE TABLE explicitly has it. [#59908](https://github.com/ClickHouse/ClickHouse/pull/59908) ([Azat Khuzhin](https://github.com/azat)). -* Add column `metadata_version` of ReplicatedMergeTree table in `system.tables` system table. [#59942](https://github.com/ClickHouse/ClickHouse/pull/59942) ([Maksim Kita](https://github.com/kitaisreal)). -* Keeper improvement: add retries on failures for Disk related operations. [#59980](https://github.com/ClickHouse/ClickHouse/pull/59980) ([Antonio Andelic](https://github.com/antonio2368)). -* Add new config setting `backups.remove_backup_files_after_failure`: ``` true ```. [#60002](https://github.com/ClickHouse/ClickHouse/pull/60002) ([Vitaly Baranov](https://github.com/vitlibar)). -* Use multiple threads while reading the metadata of tables from a backup while executing the RESTORE command. [#60040](https://github.com/ClickHouse/ClickHouse/pull/60040) ([Vitaly Baranov](https://github.com/vitlibar)). -* Now if `StorageBuffer` has more than 1 shard (`num_layers` > 1) background flush will happen simultaneously for all shards in multiple threads. [#60111](https://github.com/ClickHouse/ClickHouse/pull/60111) ([alesapin](https://github.com/alesapin)). -* Support specifying users for specific S3 settings in config using `user` key. [#60144](https://github.com/ClickHouse/ClickHouse/pull/60144) ([Antonio Andelic](https://github.com/antonio2368)). -* Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)). -* Allow "local" as object storage type instead of "local_blob_storage". [#60165](https://github.com/ClickHouse/ClickHouse/pull/60165) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Implement comparison operator for Variant values and proper Field inserting into Variant column. Don't allow creating `Variant` type with similar variant types by default (allow uder a setting `allow_suspicious_variant_types`) Closes [#59996](https://github.com/ClickHouse/ClickHouse/issues/59996). Closes [#59850](https://github.com/ClickHouse/ClickHouse/issues/59850). [#60198](https://github.com/ClickHouse/ClickHouse/pull/60198) ([Kruglov Pavel](https://github.com/Avogar)). -* Improved overall usability of virtual columns. Now it is allowed to use virtual columns in `PREWHERE` (it's worthwhile for non-const virtual columns like `_part_offset`). Now a builtin documentation is available for virtual columns as a comment of column in `DESCRIBE` query with enabled setting `describe_include_virtual_columns`. [#60205](https://github.com/ClickHouse/ClickHouse/pull/60205) ([Anton Popov](https://github.com/CurtizJ)). -* Short circuit execution for `ULIDStringToDateTime`. [#60211](https://github.com/ClickHouse/ClickHouse/pull/60211) ([Juan Madurga](https://github.com/jlmadurga)). -* Added `query_id` column for tables `system.backups` and `system.backup_log`. Added error stacktrace to `error` column. [#60220](https://github.com/ClickHouse/ClickHouse/pull/60220) ([Maksim Kita](https://github.com/kitaisreal)). -* Parallel flush of pending INSERT blocks of Distributed engine on `DETACH`/server shutdown and `SYSTEM FLUSH DISTRIBUTED` (Parallelism will work only if you have multi disk policy for table (like everything in Distributed engine right now)). [#60225](https://github.com/ClickHouse/ClickHouse/pull/60225) ([Azat Khuzhin](https://github.com/azat)). -* Filter setting is improper in `joinRightColumnsSwitchNullability`, resolve [#59625](https://github.com/ClickHouse/ClickHouse/issues/59625). [#60259](https://github.com/ClickHouse/ClickHouse/pull/60259) ([lgbo](https://github.com/lgbo-ustc)). -* Add a setting to force read-through cache for merges. [#60308](https://github.com/ClickHouse/ClickHouse/pull/60308) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Issue [#57598](https://github.com/ClickHouse/ClickHouse/issues/57598) mentions a variant behaviour regarding transaction handling. An issued COMMIT/ROLLBACK when no transaction is active is reported as an error contrary to MySQL behaviour. [#60338](https://github.com/ClickHouse/ClickHouse/pull/60338) ([PapaToemmsn](https://github.com/PapaToemmsn)). -* Added `none_only_active` mode for `distributed_ddl_output_mode` setting. [#60340](https://github.com/ClickHouse/ClickHouse/pull/60340) ([Alexander Tokmakov](https://github.com/tavplubix)). -* Connections through the MySQL port now automatically run with setting `prefer_column_name_to_alias = 1` to support QuickSight out-of-the-box. Also, settings `mysql_map_string_to_text_in_show_columns` and `mysql_map_fixed_string_to_text_in_show_columns` are now enabled by default, affecting also only MySQL connections. This increases compatibility with more BI tools. [#60365](https://github.com/ClickHouse/ClickHouse/pull/60365) ([Robert Schulze](https://github.com/rschu1ze)). -* When output format is Pretty format and a block consists of a single numeric value which exceeds one million, A readable number will be printed on table right. e.g. ``` ┌──────count()─┐ │ 233765663884 │ -- 233.77 billion └──────────────┘ ```. [#60379](https://github.com/ClickHouse/ClickHouse/pull/60379) ([rogeryk](https://github.com/rogeryk)). -* Allow configuring HTTP redirect handlers for clickhouse-server. For example, you can make `/` redirect to the Play UI. [#60390](https://github.com/ClickHouse/ClickHouse/pull/60390) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* The advanced dashboard has slightly better colors for multi-line graphs. [#60391](https://github.com/ClickHouse/ClickHouse/pull/60391) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix a race condition in JavaScript code leading to duplicate charts on top of each other. [#60392](https://github.com/ClickHouse/ClickHouse/pull/60392) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#60434](https://github.com/ClickHouse/ClickHouse/pull/60434) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Function `substring` now has a new alias `byteSlice`. [#60494](https://github.com/ClickHouse/ClickHouse/pull/60494) ([Robert Schulze](https://github.com/rschu1ze)). -* Renamed server setting `dns_cache_max_size` to `dns_cache_max_entries` to reduce ambiguity. [#60500](https://github.com/ClickHouse/ClickHouse/pull/60500) ([Kirill Nikiforov](https://github.com/allmazz)). -* `SHOW INDEX | INDEXES | INDICES | KEYS` no longer sorts by the primary key columns (which was unintuitive). [#60514](https://github.com/ClickHouse/ClickHouse/pull/60514) ([Robert Schulze](https://github.com/rschu1ze)). -* Keeper improvement: abort during startup if an invalid snapshot is detected to avoid data loss. [#60537](https://github.com/ClickHouse/ClickHouse/pull/60537) ([Antonio Andelic](https://github.com/antonio2368)). -* Added MergeTree read split ranges into intersecting and non intersecting fault injection using `merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_fault_probability` setting. [#60548](https://github.com/ClickHouse/ClickHouse/pull/60548) ([Maksim Kita](https://github.com/kitaisreal)). -* The Advanced dashboard now has controls always visible on scrolling. This allows you to add a new chart without scrolling up. [#60692](https://github.com/ClickHouse/ClickHouse/pull/60692) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* String types and Enums can be used in the same context, such as: arrays, UNION queries, conditional expressions. This closes [#60726](https://github.com/ClickHouse/ClickHouse/issues/60726). [#60727](https://github.com/ClickHouse/ClickHouse/pull/60727) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)). -* Support files without format extension in Filesystem database. [#60795](https://github.com/ClickHouse/ClickHouse/pull/60795) ([Kruglov Pavel](https://github.com/Avogar)). -* Keeper improvement: support `leadership_expiry_ms` in Keeper's settings. [#60806](https://github.com/ClickHouse/ClickHouse/pull/60806) ([Brokenice0415](https://github.com/Brokenice0415)). -* Always infer exponential numbers in JSON formats regardless of the setting `input_format_try_infer_exponent_floats`. Add setting `input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects` that allows to use String type for ambiguous paths instead of an exception during named Tuples inference from JSON objects. [#60808](https://github.com/ClickHouse/ClickHouse/pull/60808) ([Kruglov Pavel](https://github.com/Avogar)). -* Add a flag for SMJ to treat null as biggest/smallest. So the behavior can be compitable with other SQL systems, like Apache Spark. [#60896](https://github.com/ClickHouse/ClickHouse/pull/60896) ([loudongfeng](https://github.com/loudongfeng)). -* Clickhouse version has been added to docker labels. Closes [#54224](https://github.com/ClickHouse/ClickHouse/issues/54224). [#60949](https://github.com/ClickHouse/ClickHouse/pull/60949) ([Nikolay Monkov](https://github.com/nikmonkov)). -* Add a setting `parallel_replicas_allow_in_with_subquery = 1` which allows subqueries for IN work with parallel replicas. [#60950](https://github.com/ClickHouse/ClickHouse/pull/60950) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* DNSResolver shuffles set of resolved IPs. [#60965](https://github.com/ClickHouse/ClickHouse/pull/60965) ([Sema Checherinda](https://github.com/CheSema)). -* Support detect output format by file exctension in `clickhouse-client` and `clickhouse-local`. [#61036](https://github.com/ClickHouse/ClickHouse/pull/61036) ([豪肥肥](https://github.com/HowePa)). -* Check memory limit update periodically. [#61049](https://github.com/ClickHouse/ClickHouse/pull/61049) ([Han Fei](https://github.com/hanfei1991)). -* Enable processors profiling (time spent/in and out bytes for sorting, aggregation, ...) by default. [#61096](https://github.com/ClickHouse/ClickHouse/pull/61096) ([Azat Khuzhin](https://github.com/azat)). -* Add the function `toUInt128OrZero`, which was missed by mistake (the mistake is related to https://github.com/ClickHouse/ClickHouse/pull/945). The compatibility aliases `FROM_UNIXTIME` and `DATE_FORMAT` (they are not ClickHouse-native and only exist for MySQL compatibility) have been made case insensitive, as expected for SQL-compatibility aliases. [#61114](https://github.com/ClickHouse/ClickHouse/pull/61114) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Improvements for the access checks, allowing to revoke of unpossessed rights in case the target user doesn't have the revoking grants either. Example: ```sql GRANT SELECT ON *.* TO user1; REVOKE SELECT ON system.* FROM user1;. [#61115](https://github.com/ClickHouse/ClickHouse/pull/61115) ([pufit](https://github.com/pufit)). -* Fix an error in previeous opt: https://github.com/ClickHouse/ClickHouse/pull/59698: remove break to make sure the first filtered column has minimum size cc @jsc0218. [#61145](https://github.com/ClickHouse/ClickHouse/pull/61145) ([李扬](https://github.com/taiyang-li)). -* Fix `has()` function with `Nullable` column (fixes [#60214](https://github.com/ClickHouse/ClickHouse/issues/60214)). [#61249](https://github.com/ClickHouse/ClickHouse/pull/61249) ([Mikhail Koviazin](https://github.com/mkmkme)). -* Now it's possible to specify attribute `merge="true"` in config substitutions for subtrees ``. In case this attribute specified, clickhouse will merge subtree with existing configuration, otherwise default behavior is append new content to configuration. [#61299](https://github.com/ClickHouse/ClickHouse/pull/61299) ([alesapin](https://github.com/alesapin)). -* Add async metrics for virtual memory mappings: VMMaxMapCount & VMNumMaps. Closes [#60662](https://github.com/ClickHouse/ClickHouse/issues/60662). [#61354](https://github.com/ClickHouse/ClickHouse/pull/61354) ([Tuan Pham Anh](https://github.com/tuanpavn)). -* Use `temporary_files_codec` setting in all places where we create temporary data, for example external memory sorting and external memory GROUP BY. Before it worked only in `partial_merge` JOIN algorithm. [#61456](https://github.com/ClickHouse/ClickHouse/pull/61456) ([Maksim Kita](https://github.com/kitaisreal)). -* Remove duplicated check `containing_part.empty()`, It's already being checked here: https://github.com/ClickHouse/ClickHouse/blob/1296dac3c7e47670872c15e3f5e58f869e0bd2f2/src/Storages/MergeTree/MergeTreeData.cpp#L6141. [#61467](https://github.com/ClickHouse/ClickHouse/pull/61467) ([William Schoeffel](https://github.com/wiledusc)). -* Add a new setting `max_parser_backtracks` which allows to limit the complexity of query parsing. [#61502](https://github.com/ClickHouse/ClickHouse/pull/61502) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Less contention during dynamic resize of filesystem cache. [#61524](https://github.com/ClickHouse/ClickHouse/pull/61524) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Disallow sharded mode of StorageS3 queue, because it will be rewritten. [#61537](https://github.com/ClickHouse/ClickHouse/pull/61537) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fixed typo: from `use_leagcy_max_level` to `use_legacy_max_level`. [#61545](https://github.com/ClickHouse/ClickHouse/pull/61545) ([William Schoeffel](https://github.com/wiledusc)). -* Remove some duplicate entries in blob_storage_log. [#61622](https://github.com/ClickHouse/ClickHouse/pull/61622) ([YenchangChan](https://github.com/YenchangChan)). -* Added `current_user` function as a compatibility alias for MySQL. [#61770](https://github.com/ClickHouse/ClickHouse/pull/61770) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Use managed identity for backups IO when using Azure Blob Storage. Add a setting to prevent ClickHouse from attempting to create a non-existent container, which requires permissions at the storage account level. [#61785](https://github.com/ClickHouse/ClickHouse/pull/61785) ([Daniel Pozo Escalona](https://github.com/danipozo)). -* In the previous version, some numbers in Pretty formats were not pretty enough. [#61794](https://github.com/ClickHouse/ClickHouse/pull/61794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* A long value in Pretty formats won't be cut if it is the single value in the resultset, such as in the result of the `SHOW CREATE TABLE` query. [#61795](https://github.com/ClickHouse/ClickHouse/pull/61795) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Similarly to `clickhouse-local`, `clickhouse-client` will accept the `--output-format` option as a synonym to the `--format` option. This closes [#59848](https://github.com/ClickHouse/ClickHouse/issues/59848). [#61797](https://github.com/ClickHouse/ClickHouse/pull/61797) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* If stdout is a terminal and the output format is not specified, `clickhouse-client` and similar tools will use `PrettyCompact` by default, similarly to the interactive mode. `clickhouse-client` and `clickhouse-local` will handle command line arguments for input and output formats in a unified fashion. This closes [#61272](https://github.com/ClickHouse/ClickHouse/issues/61272). [#61800](https://github.com/ClickHouse/ClickHouse/pull/61800) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Underscore digit groups in Pretty formats for better readability. This is controlled by a new setting, `output_format_pretty_highlight_digit_groups`. [#61802](https://github.com/ClickHouse/ClickHouse/pull/61802) ([Alexey Milovidov](https://github.com/alexey-milovidov)). - -#### Bug Fix (user-visible misbehavior in an official stable release) {#bug-fix-user-visible-misbehavior-in-an-official-stable-release} - -* Fix bug with `intDiv` for decimal arguments [#59243](https://github.com/ClickHouse/ClickHouse/pull/59243) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix_kql_issue_found_by_wingfuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)). -* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)). -* rabbitmq: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix function execution over const and LowCardinality with GROUP BY const for analyzer [#59986](https://github.com/ClickHouse/ClickHouse/pull/59986) ([Azat Khuzhin](https://github.com/azat)). -* Fix scale conversion for DateTime64 [#60004](https://github.com/ClickHouse/ClickHouse/pull/60004) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix INSERT into SQLite with single quote (by escaping single quotes with a quote instead of backslash) [#60015](https://github.com/ClickHouse/ClickHouse/pull/60015) ([Azat Khuzhin](https://github.com/azat)). -* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)). -* Fix finished_mutations_to_keep=0 for MergeTree (as docs says 0 is to keep everything) [#60031](https://github.com/ClickHouse/ClickHouse/pull/60031) ([Azat Khuzhin](https://github.com/azat)). -* Fix possible exception from s3queue table on drop [#60036](https://github.com/ClickHouse/ClickHouse/pull/60036) ([Kseniia Sumarokova](https://github.com/kssenii)). -* PartsSplitter invalid ranges for the same part [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)). -* Use max_query_size from context in DDLLogEntry instead of hardcoded 4096 [#60083](https://github.com/ClickHouse/ClickHouse/pull/60083) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix inconsistent formatting of queries [#60095](https://github.com/ClickHouse/ClickHouse/pull/60095) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix inconsistent formatting of explain in subqueries [#60102](https://github.com/ClickHouse/ClickHouse/pull/60102) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)). -* Allow casting of bools in string representation to to true bools [#60160](https://github.com/ClickHouse/ClickHouse/pull/60160) ([Robert Schulze](https://github.com/rschu1ze)). -* Fix system.s3queue_log [#60166](https://github.com/ClickHouse/ClickHouse/pull/60166) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix arrayReduce with nullable aggregate function name [#60188](https://github.com/ClickHouse/ClickHouse/pull/60188) ([Raúl Marín](https://github.com/Algunenano)). -* Fix actions execution during preliminary filtering (PK, partition pruning) [#60196](https://github.com/ClickHouse/ClickHouse/pull/60196) ([Azat Khuzhin](https://github.com/azat)). -* Hide sensitive info for s3queue [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Revert "Replace `ORDER BY ALL` by `ORDER BY *`" [#60248](https://github.com/ClickHouse/ClickHouse/pull/60248) ([Robert Schulze](https://github.com/rschu1ze)). -* Azure Blob Storage : Fix issues endpoint and prefix [#60251](https://github.com/ClickHouse/ClickHouse/pull/60251) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). -* Fix http exception codes. [#60252](https://github.com/ClickHouse/ClickHouse/pull/60252) ([Austin Kothig](https://github.com/kothiga)). -* fix LRUResource Cache bug (Hive cache) [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)). -* s3queue: fix bug (also fixes flaky test_storage_s3_queue/test.py::test_shards_distributed) [#60282](https://github.com/ClickHouse/ClickHouse/pull/60282) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix use-of-uninitialized-value and invalid result in hashing functions with IPv6 [#60359](https://github.com/ClickHouse/ClickHouse/pull/60359) ([Kruglov Pavel](https://github.com/Avogar)). -* Force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)). -* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense [#60430](https://github.com/ClickHouse/ClickHouse/pull/60430) ([Kruglov Pavel](https://github.com/Avogar)). -* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix OptimizeDateOrDateTimeConverterWithPreimageVisitor with null arguments [#60453](https://github.com/ClickHouse/ClickHouse/pull/60453) ([Raúl Marín](https://github.com/Algunenano)). -* Try to avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Merging [#59674](https://github.com/ClickHouse/ClickHouse/issues/59674). [#60470](https://github.com/ClickHouse/ClickHouse/pull/60470) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Correctly check keys in s3Cluster [#60477](https://github.com/ClickHouse/ClickHouse/pull/60477) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)). -* Keeper fix: add timeouts when waiting for commit logs [#60544](https://github.com/ClickHouse/ClickHouse/pull/60544) ([Antonio Andelic](https://github.com/antonio2368)). -* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)). -* Don't output number tips for date types [#60577](https://github.com/ClickHouse/ClickHouse/pull/60577) ([Raúl Marín](https://github.com/Algunenano)). -* Fix reading from MergeTree with non-deterministic functions in filter [#60586](https://github.com/ClickHouse/ClickHouse/pull/60586) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix logical error on bad compatibility setting value type [#60596](https://github.com/ClickHouse/ClickHouse/pull/60596) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix inconsistent aggregate function states in mixed x86-64 / ARM clusters [#60610](https://github.com/ClickHouse/ClickHouse/pull/60610) ([Harry Lee](https://github.com/HarryLeeIBM)). -* fix(prql): Robust panic handler [#60615](https://github.com/ClickHouse/ClickHouse/pull/60615) ([Maximilian Roos](https://github.com/max-sixty)). -* Fix `intDiv` for decimal and date arguments [#60672](https://github.com/ClickHouse/ClickHouse/pull/60672) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). -* Fix: expand CTE in alter modify query [#60682](https://github.com/ClickHouse/ClickHouse/pull/60682) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). -* Fix system.parts for non-Atomic/Ordinary database engine (i.e. Memory) [#60689](https://github.com/ClickHouse/ClickHouse/pull/60689) ([Azat Khuzhin](https://github.com/azat)). -* Fix "Invalid storage definition in metadata file" for parameterized views [#60708](https://github.com/ClickHouse/ClickHouse/pull/60708) ([Azat Khuzhin](https://github.com/azat)). -* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Remove wrong sanitize checking in aggregate function quantileGK [#60740](https://github.com/ClickHouse/ClickHouse/pull/60740) ([李扬](https://github.com/taiyang-li)). -* Fix insert-select + insert_deduplication_token bug by setting streams to 1 [#60745](https://github.com/ClickHouse/ClickHouse/pull/60745) ([Jordi Villar](https://github.com/jrdi)). -* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). -* Fix toStartOfInterval [#60763](https://github.com/ClickHouse/ClickHouse/pull/60763) ([Andrey Zvonov](https://github.com/zvonand)). -* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). -* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)). -* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix possible stuck on error in HashedDictionaryParallelLoader [#60926](https://github.com/ClickHouse/ClickHouse/pull/60926) ([vdimir](https://github.com/vdimir)). -* Fix async RESTORE with Replicated database [#60934](https://github.com/ClickHouse/ClickHouse/pull/60934) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix deadlock in async inserts to `Log` tables via native protocol [#61055](https://github.com/ClickHouse/ClickHouse/pull/61055) ([Anton Popov](https://github.com/CurtizJ)). -* Fix lazy execution of default argument in dictGetOrDefault for RangeHashedDictionary [#61196](https://github.com/ClickHouse/ClickHouse/pull/61196) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix multiple bugs in groupArraySorted [#61203](https://github.com/ClickHouse/ClickHouse/pull/61203) ([Raúl Marín](https://github.com/Algunenano)). -* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix usage of session_token in S3 engine [#61234](https://github.com/ClickHouse/ClickHouse/pull/61234) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix possible incorrect result of aggregate function `uniqExact` [#61257](https://github.com/ClickHouse/ClickHouse/pull/61257) ([Anton Popov](https://github.com/CurtizJ)). -* Fix bugs in show database [#61269](https://github.com/ClickHouse/ClickHouse/pull/61269) ([Raúl Marín](https://github.com/Algunenano)). -* Fix logical error in RabbitMQ storage with MATERIALIZED columns [#61320](https://github.com/ClickHouse/ClickHouse/pull/61320) ([vdimir](https://github.com/vdimir)). -* Fix CREATE OR REPLACE DICTIONARY [#61356](https://github.com/ClickHouse/ClickHouse/pull/61356) ([Vitaly Baranov](https://github.com/vitlibar)). -* Fix ATTACH query with external ON CLUSTER [#61365](https://github.com/ClickHouse/ClickHouse/pull/61365) ([Nikolay Degterinsky](https://github.com/evillique)). -* fix issue of actions dag split [#61458](https://github.com/ClickHouse/ClickHouse/pull/61458) ([Raúl Marín](https://github.com/Algunenano)). -* Fix finishing a failed RESTORE [#61466](https://github.com/ClickHouse/ClickHouse/pull/61466) ([Vitaly Baranov](https://github.com/vitlibar)). -* Disable async_insert_use_adaptive_busy_timeout correctly with compatibility settings [#61468](https://github.com/ClickHouse/ClickHouse/pull/61468) ([Raúl Marín](https://github.com/Algunenano)). -* Allow queuing in restore pool [#61475](https://github.com/ClickHouse/ClickHouse/pull/61475) ([Nikita Taranov](https://github.com/nickitat)). -* Fix bug when reading system.parts using UUID (issue 61220). [#61479](https://github.com/ClickHouse/ClickHouse/pull/61479) ([Dan Wu](https://github.com/wudanzy)). -* Fix crash in window view [#61526](https://github.com/ClickHouse/ClickHouse/pull/61526) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix `repeat` with non native integers [#61527](https://github.com/ClickHouse/ClickHouse/pull/61527) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix client `-s` argument [#61530](https://github.com/ClickHouse/ClickHouse/pull/61530) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Fix crash in arrayPartialReverseSort [#61539](https://github.com/ClickHouse/ClickHouse/pull/61539) ([Raúl Marín](https://github.com/Algunenano)). -* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix addDays cause an error when used datetime64 [#61561](https://github.com/ClickHouse/ClickHouse/pull/61561) ([Shuai li](https://github.com/loneylee)). -* Fix `system.part_log` for async insert with deduplication [#61620](https://github.com/ClickHouse/ClickHouse/pull/61620) ([Antonio Andelic](https://github.com/antonio2368)). -* Fix Non-ready set for system.parts. [#61666](https://github.com/ClickHouse/ClickHouse/pull/61666) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.md.hash deleted file mode 100644 index e92c5175860..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.md.hash +++ /dev/null @@ -1 +0,0 @@ -003c69fb50444957 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx deleted file mode 100644 index e1afdf787f2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx +++ /dev/null @@ -1,328 +0,0 @@ ---- -'sidebar_position': 1 -'slug': '/cloud/get-started/cloud-quick-start' -'sidebar_label': 'Cloud Quick Start' -'keywords': -- 'clickhouse' -- 'install' -- 'getting started' -- 'quick start' -'pagination_next': 'cloud/get-started/sql-console' -'title': 'ClickHouse Cloud クイックスタート' -'description': 'ClickHouse Cloud のクイックスタートガイド' ---- - -import Image from '@theme/IdealImage'; -import signup_page from '@site/static/images/_snippets/signup_page.png'; -import select_plan from '@site/static/images/_snippets/select_plan.png'; -import createservice1 from '@site/static/images/_snippets/createservice1.png'; -import scaling_limits from '@site/static/images/_snippets/scaling_limits.png'; -import createservice8 from '@site/static/images/_snippets/createservice8.png'; -import show_databases from '@site/static/images/_snippets/show_databases.png'; -import service_connect from '@site/static/images/_snippets/service_connect.png'; -import data_sources from '@site/static/images/_snippets/data_sources.png'; -import select_data_source from '@site/static/images/_snippets/select_data_source.png'; -import client_details from '@site/static/images/_snippets/client_details.png'; -import new_rows_from_csv from '@site/static/images/_snippets/new_rows_from_csv.png'; -import SQLConsoleDetail from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md'; - -# ClickHouse Cloud クイックスタート - -ClickHouse を始める最も迅速で簡単な方法は、[ClickHouse Cloud](https://console.clickhouse.cloud) に新しいサービスを作成することです。 - - - -## ClickHouse サービスの作成 {#1-create-a-clickhouse-service} - -[ClickHouse Cloud](https://console.clickhouse.cloud) で無料の ClickHouse サービスを作成するには、以下の手順を完了してサインアップする必要があります: - - - [サインアップページ](https://console.clickhouse.cloud/signUp) でアカウントを作成します - - メールまたは Google SSO、Microsoft SSO、AWS Marketplace、Google Cloud、Microsoft Azure のいずれかを使用してサインアップできます - - メールとパスワードでサインアップする場合は、次の 24 時間以内にメールで受け取ったリンクを使用してメールアドレスを確認してください - - 作成したユーザー名とパスワードでログインします - - -
- -ログインすると、ClickHouse Cloud のオンボーディングウィザードが開始され、新しい ClickHouse サービスの作成に導かれます。最初に、[プランを選択](/cloud/manage/cloud-tiers)することを求められます: - - -
- -:::tip -ほとんどのワークロードには Scale ティアをお勧めします。 -ティアの詳細については、[こちら](/cloud/manage/cloud-tiers)をご覧ください。 -::: - -新しいサービスをデプロイするために希望するリージョンを選択する必要があります。 -利用可能なオプションは選択したティアによって異なります。 -以下のステップでは、ユーザーが推奨の Scale ティアを選択したと仮定しています。 - -サービスをデプロイするための希望のリージョンを選択し、新しいサービスに名前を付けます: - - -
- -デフォルトでは、スケールテイアは 4 VCPU および 16 GiB RAM を持つ 3 つのレプリカを作成します。[垂直オートスケーリング](/manage/scaling#vertical-auto-scaling)は、Scale ティアでデフォルトで有効になります。 - -ユーザーは必要に応じてサービスリソースをカスタマイズでき、レプリカがスケーリングする最小サイズと最大サイズを指定できます。準備ができたら、`Create service` を選択します。 - - -
- -おめでとうございます! ClickHouse Cloud サービスが立ち上がり、オンボーディングが完了しました。データの取り込みとクエリの実行を開始する方法について、引き続きお読みください。 - -## ClickHouse への接続 {#2-connect-to-clickhouse} -ClickHouse への接続方法は 2 つあります: - - ウェブベースの SQL コンソールを使用して接続 - - アプリから接続 -
-### SQL コンソールを使用して接続 {#connect-using-sql-console} - -迅速に開始するために、ClickHouse ではオンボーディングを完了するとリダイレクトされるウェブベースの SQL コンソールを提供しています。 - - - - -クエリタブを作成し、接続が機能していることを確認するためのシンプルなクエリを入力します: - -```sql -SHOW databases -``` - -リストに 4 つのデータベースが表示され、その中に自分が追加したものも含まれているはずです。 - - -
- - -これで、新しい ClickHouse サービスの使用を開始できる準備が整いました! - -### アプリを使って接続 {#connect-with-your-app} - -ナビゲーションメニューから接続ボタンを押します。モーダルが開き、サービスの資格情報を提供し、インターフェースまたは言語クライアントへの接続方法に関する一連の手順が表示されます。 - - -
- -言語クライアントが表示されない場合は、[統合リスト](/integrations)を確認してください。 - -## データの追加 {#3-add-data} - -ClickHouse はデータと共により良くなります! データを追加する方法はいくつかあり、そのほとんどはナビゲーションメニューからアクセスできるデータソースページで利用可能です。 - - -
- -以下の方法でデータをアップロードできます: - - S3、Postgres、Kafka、GCS などのデータソースからデータを取得するための ClickPipe を設定する - - SQL コンソールを使用する - - ClickHouse クライアントを使用する - - ファイルをアップロードする - 受け入れられるフォーマットには JSON、CSV、TSV が含まれる - - ファイル URL からデータをアップロードする - -### ClickPipes {#clickpipes} - -[ClickPipes](http://clickhouse.com/docs/integrations/clickpipes) は、多様なソースからデータを取得する作業を簡単にするための管理された統合プラットフォームです。最も要求を満たすワークロードのために設計された ClickPipes の堅牢でスケーラブルなアーキテクチャは、一貫したパフォーマンスと信頼性を保証します。ClickPipes は長期ストリーミングニーズまたは一度きりのデータロードジョブに使用できます。 - - -
- -### SQL コンソールを使用してデータを追加 {#add-data-using-the-sql-console} - -ほとんどのデータベース管理システムと同様に、ClickHouse はテーブルを **データベース** に論理的にグループ化します。ClickHouse で新しいデータベースを作成するには [`CREATE DATABASE`](../../sql-reference/statements/create/database.md) コマンドを使用します: - -```sql -CREATE DATABASE IF NOT EXISTS helloworld -``` - -次のコマンドを実行して、`helloworld` データベース内に `my_first_table` という名前のテーブルを作成します: - -```sql -CREATE TABLE helloworld.my_first_table -( - user_id UInt32, - message String, - timestamp DateTime, - metric Float32 -) -ENGINE = MergeTree() -PRIMARY KEY (user_id, timestamp) -``` - -上記の例では、`my_first_table` は 4 つのカラムを持つ [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) テーブルです: - - - `user_id`: 32 ビットの符号なし整数 ([UInt32](../../sql-reference/data-types/int-uint.md)) - - `message`: [String](../../sql-reference/data-types/string.md) データ型で、他のデータベースシステムの `VARCHAR`、`BLOB`、`CLOB` などのタイプを置き換えます - - `timestamp`: [DateTime](../../sql-reference/data-types/datetime.md) 値で、時点を表します - - `metric`: 32 ビットの浮動小数点数 ([Float32](../../sql-reference/data-types/float.md)) - -:::note テーブルエンジン -テーブルエンジンは以下を決定します: - - データがどのように、どこに保存されるか - - サポートされるクエリ - - データがレプリケーションされるかどうか -
-選択できるテーブルエンジンは多く存在しますが、単一ノードの ClickHouse サーバーでのシンプルなテーブルには [`MergeTree`](/engines/table-engines/mergetree-family/mergetree.md) が適しているでしょう。 -::: - -#### 主キーの簡単な紹介 {#a-brief-intro-to-primary-keys} - -進む前に、ClickHouse における主キーの機能を理解することが重要です(主キーの実装は予想外のものに見えるかもしれません!): - - - ClickHouse における主キーはテーブル内の各行に対して **_一意ではない_** です - -ClickHouse テーブルの主キーは、ディスクに書き込む際のデータの並び順を決定します。8192 行または 10MB のデータごとに(**インデックスの粒度**と呼ばれます)、主キーインデックスファイルにエントリが作成されます。この粒度の概念は、簡単にメモリ内に収まる **スパースインデックス** を作り、グラニュールは `SELECT` クエリの際に処理される最小のカラムデータのストライプを表します。 - -主キーは `PRIMARY KEY` パラメータを使用して定義できます。`PRIMARY KEY` が指定されていないテーブルを定義すると、キーは `ORDER BY` 句に指定されたタプルになります。`PRIMARY KEY` と `ORDER BY` の両方を指定すると、主キーはソート順のサブセットでなければなりません。 - -主キーはまた、`(user_id, timestamp)` のタプルであるソートキーでもあります。したがって、各カラムファイルに格納されるデータは `user_id` で、次に `timestamp` でソートされます。 - -ClickHouse のコア概念について詳しくは、["Core Concepts"](../../managing-data/core-concepts/index.md) を参照してください。 - -#### テーブルへのデータの挿入 {#insert-data-into-your-table} - -ClickHouse ではおなじみの [`INSERT INTO TABLE`](../../sql-reference/statements/insert-into.md) コマンドを使用できますが、[`MergeTree`](/engines/table-engines/mergetree-family/mergetree.md) テーブルへの各挿入により、ストレージ内に **パーツ** が作成されることを理解することが重要です。 - -:::tip ClickHouse ベストプラクティス -バッチごとに大量の行を挿入します - 数万行または数百万行を一度に挿入してください。心配しないでください - ClickHouse はそのようなボリュームを簡単に処理できます - そして、サービスへの書き込みリクエストを減らすことで [コストを節約](/best-practices/selecting-an-insert-strategy#batch-inserts-if-synchronous) できます。 -::: - -
- -シンプルな例でも、同時に 1 行以上を挿入しましょう: - -```sql -INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES - (101, 'Hello, ClickHouse!', now(), -1.0 ), - (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), - (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), - (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ) -``` - -:::note -`timestamp` カラムはさまざまな [**Date**](../../sql-reference/data-types/date.md) と [**DateTime**](../../sql-reference/data-types/datetime.md) 関数を使用して生成されることに注意してください。ClickHouse には役立つ関数が数百ありますので、[**Functions**セクション](/sql-reference/functions/) で確認できます。 -::: - -正常に動作したことを確認しましょう: - -```sql -SELECT * FROM helloworld.my_first_table -``` - -### ClickHouse クライアントを使用してデータを追加 {#add-data-using-the-clickhouse-client} - -コマンドラインツール [**clickhouse client**](/interfaces/cli) を使用して、ClickHouse Cloud サービスに接続することもできます。左のメニューから `Connect` をクリックしてこれらの詳細にアクセスします。ダイアログからドロップダウンリストで `Native` を選択します: - - -
- -1. [ClickHouse](/interfaces/cli) をインストールします。 - -2. 次のコマンドを実行し、ホスト名、ユーザー名、およびパスワードを置き換えます: - -```bash -./clickhouse client --host HOSTNAME.REGION.CSP.clickhouse.cloud \ ---secure --port 9440 \ ---user default \ ---password -``` -スマイルマークのプロンプトが表示されたら、クエリを実行する準備が整ったことを示しています! -```response -:) -``` - -3. 次のクエリを実行して試してみてください: - -
- -```sql -SELECT * -FROM helloworld.my_first_table -ORDER BY timestamp -``` - -応答がきれいなテーブル形式で返されることに注意してください: - -```response -┌─user_id─┬─message────────────────────────────────────────────┬───────────timestamp─┬──metric─┐ -│ 102 │ Insert a lot of rows per batch │ 2022-03-21 00:00:00 │ 1.41421 │ -│ 102 │ Sort your data based on your commonly-used queries │ 2022-03-22 00:00:00 │ 2.718 │ -│ 101 │ Hello, ClickHouse! │ 2022-03-22 14:04:09 │ -1 │ -│ 101 │ Granules are the smallest chunks of data read │ 2022-03-22 14:04:14 │ 3.14159 │ -└─────────┴────────────────────────────────────────────────────┴─────────────────────┴─────────┘ - -4 行がセットにあります。経過時間: 0.008 秒。 -``` - -4. [`FORMAT`](../../sql-reference/statements/select/format.md) 句を追加して、ClickHouse の [多くのサポートされた出力フォーマット](/interfaces/formats/) の 1 つを指定します: - -
- -```sql -SELECT * -FROM helloworld.my_first_table -ORDER BY timestamp -FORMAT TabSeparated -``` -上記のクエリでは、出力がタブ区切りで返されます: -```response -クエリ ID: 3604df1c-acfd-4117-9c56-f86c69721121 - -102 Insert a lot of rows per batch 2022-03-21 00:00:00 1.41421 -102 Sort your data based on your commonly-used queries 2022-03-22 00:00:00 2.718 -101 Hello, ClickHouse! 2022-03-22 14:04:09 -1 -101 Granules are the smallest chunks of data read 2022-03-22 14:04:14 3.14159 - -4 行がセットにあります。経過時間: 0.005 秒。 -``` - -5. `clickhouse client` を終了するには、**exit** コマンドを入力します: - -
- -```bash -exit -``` - -### ファイルをアップロード {#upload-a-file} - -データベースを始める際によくあるタスクは、既にファイルにあるデータを挿入することです。クリックストリームデータを表すサンプルデータがオンラインにあります - それにはユーザー ID、訪問した URL、イベントのタイムスタンプが含まれます。 - -`data.csv` という CSV ファイルに以下のテキストが含まれているとします: - -```bash title="data.csv" -102,これはファイル内のデータです,2022-02-22 10:43:28,123.45 -101,カンマ区切りです,2022-02-23 00:00:00,456.78 -103,FORMAT を使用してフォーマットを指定します,2022-02-21 10:43:30,678.90 -``` - -1. 次のコマンドが `my_first_table` にデータを挿入します: - -
- -```bash -./clickhouse client --host HOSTNAME.REGION.CSP.clickhouse.cloud \ ---secure --port 9440 \ ---user default \ ---password \ ---query='INSERT INTO helloworld.my_first_table FORMAT CSV' < data.csv -``` - -2. SQL コンソールからクエリを実行すると新しい行がテーブルに表示されることに注意してください: - -
- - -
- - - -## 次は何をしますか? {#whats-next} - -- [チュートリアル](/tutorial.md)では、テーブルに 200 万行を挿入し、いくつかの分析クエリを書きます。 -- 挿入方法に関する指示がある [サンプルデータセット](/getting-started/index.md) のリストがあります。 -- [ClickHouse の使い始め方](https://clickhouse.com/company/events/getting-started-with-clickhouse/) に関する 25 分のビデオをチェックしてください。 -- 外部ソースからデータが来ている場合、メッセージキュー、データベース、パイプラインなどへの接続に関する [統合ガイドのコレクション](/integrations/index.mdx) を確認してください。 -- UI/BI 可視化ツールを使用している場合は、ClickHouse への UI 接続に関する [ユーザーガイド](/integrations/data-visualization) を参照してください。 -- [主キー](/guides/best-practices/sparse-primary-indexes.md) に関するユーザーガイドは、主キーおよびその定義方法について知っておくべきすべてのことです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx.hash deleted file mode 100644 index 920a1a2fa37..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/cloud-quick-start.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -334f9f4b187c4ae0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md deleted file mode 100644 index 418371d45ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -slug: '/cloud/get-started' -title: 'はじめに' -description: 'はじめにの目次' -keywords: -- 'Cloud Quick Start' -- 'SQL Console' -- 'Query Insights' -- 'Query API Endpoints' -- 'Dashboards' -- 'Cloud Support' ---- - - - -Welcome to ClickHouse Cloud! Explore the pages below to learn more about what ClickHouse Cloud has to offer. - -| ページ | 説明 | -|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| -| [概要](/cloud/overview) | ClickHouse Cloudを使用する利点と、使用されているClickHouseのバージョンの概要です。 | -| [クラウドクイックスタート](/cloud/get-started/cloud-quick-start) | クラウドのセットアップを迅速に行うためのガイドです。 | -| [SQLコンソール](/cloud/get-started/sql-console) | クラウドに利用可能なインタラクティブなSQLコンソールについて学びます。 | -| [クエリアイデンティティ](/cloud/get-started/query-insights) | クラウドのクエリアイデンティティ機能がどのようにClickHouseの組み込みクエリログの利用を視覚化とテーブルを通じて容易にするかを学びます。 | -| [クエリエンドポイント](/cloud/get-started/query-endpoints) | ClickHouse Cloudコンソールで保存された任意のSQLクエリからAPIエンドポイントを直接作成できるQuery APIエンドポイント機能について学びます。 | -| [ダッシュボード](/cloud/manage/dashboards) | SQLコンソールのダッシュボード機能がどのように保存されたクエリから視覚化を収集し共有できるかを学びます。 | -| [クラウドサポート](/cloud/support) | ClickHouse Cloudユーザーおよび顧客向けのサポートサービスについて詳しく学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md.hash deleted file mode 100644 index 68858b04518..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -b8fcec8e00383a0d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md deleted file mode 100644 index f41f31f527e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md +++ /dev/null @@ -1,511 +0,0 @@ ---- -sidebar_title: 'Query API Endpoints' -slug: '/cloud/get-started/query-endpoints' -description: '保存したクエリから簡単に REST API エンドポイントを作成します' -keywords: -- 'api' -- 'query api endpoints' -- 'query endpoints' -- 'query rest api' -title: 'クエリ API エンドポイント' ---- - -import Image from '@theme/IdealImage'; -import endpoints_testquery from '@site/static/images/cloud/sqlconsole/endpoints-testquery.png'; -import endpoints_savequery from '@site/static/images/cloud/sqlconsole/endpoints-savequery.png'; -import endpoints_configure from '@site/static/images/cloud/sqlconsole/endpoints-configure.png'; -import endpoints_completed from '@site/static/images/cloud/sqlconsole/endpoints-completed.png'; -import endpoints_curltest from '@site/static/images/cloud/sqlconsole/endpoints-curltest.png'; -import endpoints_monitoring from '@site/static/images/cloud/sqlconsole/endpoints-monitoring.png'; - - -# クエリ API エンドポイント - -**クエリ API エンドポイント** 機能を使用すると、ClickHouse Cloud コンソール内の任意の保存された SQL クエリから直接 API エンドポイントを作成できます。この機能により、ネイティブ ドライバーを介して ClickHouse Cloud サービスに接続することなく、HTTP を介して API エンドポイントにアクセスして保存したクエリを実行できます。 - -## クイック スタート ガイド {#quick-start-guide} - -続行する前に、API キーと Admin Console Role を持っていることを確認してください。このガイドに従って [API キーを作成する](/cloud/manage/openapi) ことができます。 - -### 保存されたクエリの作成 {#creating-a-saved-query} - -保存されたクエリがある場合は、このステップをスキップできます。 - -新しいクエリ タブを開きます。デモンストレーション用に、約 45 億件のレコードを含む [youtube データセット](/getting-started/example-datasets/youtube-dislikes) を使用します。例として、ユーザーが入力した `year` パラメータに基づいて、平均ビュー数による上位 10 人のアップローダーを返します: - -```sql -with sum(view_count) as view_sum, - round(view_sum / num_uploads, 2) as per_upload -select - uploader, - count() as num_uploads, - formatReadableQuantity(view_sum) as total_views, - formatReadableQuantity(per_upload) as views_per_video -from - youtube -where - toYear(upload_date) = {year: UInt16} -group by uploader -order by per_upload desc -limit 10 -``` - -このクエリにはパラメータ (`year`) が含まれていることに注意してください。SQL コンソール クエリ エディタは、ClickHouse のクエリ パラメータ式を自動的に検出し、各パラメータの入力を提供します。このクエリが正常に機能するかどうかを確認するために、すぐに実行してみましょう: - - - -次のステップとして、クエリを保存します: - - - -保存されたクエリに関する詳細なドキュメントは [こちら](/cloud/get-started/sql-console#saving-a-query) で確認できます。 - -### クエリ API エンドポイントの構成 {#configuring-the-query-api-endpoint} - -クエリ API エンドポイントは、クエリビューから **Share** ボタンをクリックし、 `API Endpoint` を選択することで構成できます。どの API キーがエンドポイントにアクセスできるかを指定するよう促されます: - - - -API キーを選択すると、クエリ API エンドポイントが自動的にプロビジョニングされます。テスト リクエストを送信するための例の `curl` コマンドが表示されます: - - - -### クエリ API パラメータ {#query-api-parameters} - -クエリ内のクエリ パラメータは、 `{parameter_name: type}` という構文で指定できます。これらのパラメータは自動的に検出され、例のリクエスト ペイロードにはこれらのパラメータを渡すための `queryVariables` オブジェクトが含まれます。 - -### テストと監視 {#testing-and-monitoring} - -クエリ API エンドポイントが作成されると、 `curl` またはその他の HTTP クライアントを使用して正常に機能するかをテストできます: - - - -最初のリクエストを送信すると、**Share** ボタンのすぐ右に新しいボタンが表示されます。それをクリックすると、クエリに関する監視データを含むフライアウトが開きます: - - - -## 実装の詳細 {#implementation-details} - -### 説明 {#description} - -このルートは、指定されたクエリ エンドポイントでクエリを実行します。異なるバージョン、形式、およびクエリ変数をサポートしています。レスポンスはストリーム (_バージョン 2 のみ_) で返すことも、単一のペイロードとして返すこともできます。 - -### 認証 {#authentication} - -- **必須**: はい -- **方法**: OpenAPI キー/シークレットを介した基本認証 -- **権限**: クエリ エンドポイントに対する適切な権限。 - -### URL パラメータ {#url-parameters} - -- `queryEndpointId` (必須): 実行するクエリ エンドポイントの一意の識別子。 - -### クエリ パラメータ {#query-parameters} - -#### V1 {#v1} - -なし - -#### V2 {#v2} - -- `format` (オプション): レスポンスの形式。ClickHouse がサポートするすべての形式をサポート。 -- `param_:name` クエリで使用するクエリ変数。 `name` はクエリ内の変数名と一致する必要があります。リクエストの本文がストリームである場合にのみ使用する必要があります。 -- `:clickhouse_setting` サポートされている [ClickHouse 設定](/operations/settings/settings) をクエリパラメータとして渡すことができます。 - -### ヘッダー {#headers} - -- `x-clickhouse-endpoint-version` (オプション): クエリ エンドポイントのバージョン。サポートされているバージョンは `1` と `2` です。指定しない場合、デフォルトバージョンはエンドポイントに最後に保存されたものです。 -- `x-clickhouse-endpoint-upgrade` (オプション): このヘッダーを設定してエンドポイント バージョンをアップグレードします。これは、 `x-clickhouse-endpoint-version` ヘッダーと組み合わせて機能します。 - -### リクエスト ボディ {#request-body} - -- `queryVariables` (オプション): クエリで使用する変数を含むオブジェクト。 -- `format` (オプション): レスポンスの形式。クエリ API エンドポイントがバージョン 2 の場合、任意の ClickHouse によってサポートされている形式が可能です。v1 にサポートされる形式は次のとおりです: - - TabSeparated - - TabSeparatedWithNames - - TabSeparatedWithNamesAndTypes - - JSON - - JSONEachRow - - CSV - - CSVWithNames - - CSVWithNamesAndTypes - -### レスポンス {#responses} - -- **200 OK**: クエリが正常に実行されました。 -- **400 Bad Request**: リクエストが不正です。 -- **401 Unauthorized**: 認証なしまたは権限が不十分な状態でリクエストが行われました。 -- **404 Not Found**: 指定されたクエリ エンドポイントが見つかりませんでした。 - -### エラーハンドリング {#error-handling} - -- リクエストに有効な認証情報が含まれていることを確認します。 -- `queryEndpointId` と `queryVariables` が正しいことを検証します。 -- サーバーエラーを適切に処理し、適切なエラーメッセージを返します。 - -### エンドポイント バージョンのアップグレード {#upgrading-the-endpoint-version} - -エンドポイント バージョンを `v1` から `v2` にアップグレードするには、リクエストに `x-clickhouse-endpoint-upgrade` ヘッダーを含め、その値を `1` に設定します。これによりアップグレードプロセスがトリガーされ、`v2` で利用可能な機能や改善点を使用できるようになります。 - -## 例 {#examples} - -### 基本リクエスト {#basic-request} - -**クエリ API エンドポイント SQL:** - -```sql -SELECT database, name as num_tables FROM system.tables limit 3; -``` - -#### バージョン 1 {#version-1} - -**cURL:** - -```bash -curl -X POST 'https://console-api.clickhouse.cloud/.api/query-endpoints//run' \ ---user '' \ --H 'Content-Type: application/json' \ --d '{ "format": "JSONEachRow" }' -``` - -**JavaScript:** - -```javascript -fetch( - "https://console-api.clickhouse.cloud/.api/query-endpoints//run", - { - method: "POST", - headers: { - Authorization: "Basic ", - "Content-Type": "application/json", - }, - body: JSON.stringify({ - format: "JSONEachRow", - }), - } -) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -**レスポンス:** - -```json -{ - "data": { - "columns": [ - { - "name": "database", - "type": "String" - }, - { - "name": "num_tables", - "type": "String" - } - ], - "rows": [ - ["INFORMATION_SCHEMA", "COLUMNS"], - ["INFORMATION_SCHEMA", "KEY_COLUMN_USAGE"], - ["INFORMATION_SCHEMA", "REFERENTIAL_CONSTRAINTS"] - ] - } -} -``` - -#### バージョン 2 {#version-2} - -**cURL:** - -```bash -curl -X POST 'https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=JSONEachRow' \ ---user '' \ --H 'Content-Type: application/json' \ --H 'x-clickhouse-endpoint-version: 2' -``` - -**JavaScript:** - -```javascript -fetch( - "https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=JSONEachRow", - { - method: "POST", - headers: { - Authorization: "Basic ", - "Content-Type": "application/json", - "x-clickhouse-endpoint-version": "2", - }, - } -) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -**レスポンス:** - -```application/x-ndjson -{"database":"INFORMATION_SCHEMA","num_tables":"COLUMNS"} -{"database":"INFORMATION_SCHEMA","num_tables":"KEY_COLUMN_USAGE"} -{"database":"INFORMATION_SCHEMA","num_tables":"REFERENTIAL_CONSTRAINTS"} -``` - -### クエリ変数と JSONCompactEachRow 形式のバージョン 2 でのリクエスト {#request-with-query-variables-and-version-2-on-jsoncompacteachrow-format} - -**クエリ API エンドポイント SQL:** - -```sql -SELECT name, database FROM system.tables WHERE match(name, {tableNameRegex: String}) AND database = {database: String}; -``` - -**cURL:** - -```bash -curl -X POST 'https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=JSONCompactEachRow' \ ---user '' \ --H 'Content-Type: application/json' \ --H 'x-clickhouse-endpoint-version: 2' \ --d '{ "queryVariables": { "tableNameRegex": "query.*", "database": "system" } }' -``` - -**JavaScript:** - -```javascript -fetch( - "https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=JSONCompactEachRow", - { - method: "POST", - headers: { - Authorization: "Basic ", - "Content-Type": "application/json", - "x-clickhouse-endpoint-version": "2", - }, - body: JSON.stringify({ - queryVariables: { - tableNameRegex: "query.*", - database: "system", - }, - }), - } -) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -**レスポンス:** - -```application/x-ndjson -["query_cache", "system"] -["query_log", "system"] -["query_views_log", "system"] -``` - -### テーブルにデータを挿入するクエリ変数の配列を使ったリクエスト {#request-with-array-in-the-query-variables-that-inserts-data-into-a-table} - -**テーブル SQL:** - -```SQL -CREATE TABLE default.t_arr -( - `arr` Array(Array(Array(UInt32))) -) -ENGINE = MergeTree -ORDER BY tuple() -``` - -**クエリ API エンドポイント SQL:** - -```sql - INSERT INTO default.t_arr VALUES ({arr: Array(Array(Array(UInt32)))}); -``` - -**cURL:** - -```bash -curl -X POST 'https://console-api.clickhouse.cloud/.api/query-endpoints//run' \ ---user '' \ --H 'Content-Type: application/json' \ --H 'x-clickhouse-endpoint-version: 2' \ --d '{ - "queryVariables": { - "arr": [[[12, 13, 0, 1], [12]]] - } -}' -``` - -**JavaScript:** - -```javascript -fetch( - "https://console-api.clickhouse.cloud/.api/query-endpoints//run", - { - method: "POST", - headers: { - Authorization: "Basic ", - "Content-Type": "application/json", - "x-clickhouse-endpoint-version": "2", - }, - body: JSON.stringify({ - queryVariables: { - arr: [[[12, 13, 0, 1], [12]]], - }, - }), - } -) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -**レスポンス:** - -```text -OK -``` - -### ClickHouse 設定 max_threads を 8 に設定するリクエスト {#request-with-clickhouse-settings-max_threads-set-to-8} - -**クエリ API エンドポイント SQL:** - -```sql -SELECT * from system.tables; -``` - -**cURL:** - -```bash -curl -X POST 'https://console-api.clickhouse.cloud/.api/query-endpoints//run?max_threads=8,' \ ---user '' \ --H 'Content-Type: application/json' \ --H 'x-clickhouse-endpoint-version: 2' \ -``` - -**JavaScript:** - -```javascript -fetch( - "https://console-api.clickhouse.cloud/.api/query-endpoints//run?max_threads=8", - { - method: "POST", - headers: { - Authorization: "Basic ", - "Content-Type": "application/json", - "x-clickhouse-endpoint-version": "2", - }, - } -) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -### ストリームとしてレスポンスをリクエストし、解析する {#request-and-parse-the-response-as-a-stream} - -**クエリ API エンドポイント SQL:** - -```sql -SELECT name, database from system.tables; -``` - -**TypeScript:** - -```typescript -async function fetchAndLogChunks( - url: string, - openApiKeyId: string, - openApiKeySecret: string -) { - const auth = Buffer.from(`${openApiKeyId}:${openApiKeySecret}`).toString( - "base64" - ); - - const headers = { - Authorization: `Basic ${auth}`, - "x-clickhouse-endpoint-version": "2", - }; - - const response = await fetch(url, { - headers, - method: "POST", - body: JSON.stringify({ format: "JSONEachRow" }), - }); - - if (!response.ok) { - console.error(`HTTP error! Status: ${response.status}`); - return; - } - - const reader = response.body as unknown as Readable; - reader.on("data", (chunk) => { - console.log(chunk.toString()); - }); - - reader.on("end", () => { - console.log("ストリームが終了しました。"); - }); - - reader.on("error", (err) => { - console.error("ストリームエラー:", err); - }); -} - -const endpointUrl = - "https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=JSONEachRow"; -const openApiKeyId = ""; -const openApiKeySecret = ""; -// 使用例 -fetchAndLogChunks(endpointUrl, openApiKeyId, openApiKeySecret).catch((err) => - console.error(err) -); -``` - -**出力** - -```shell -> npx tsx index.ts -> {"name":"COLUMNS","database":"INFORMATION_SCHEMA"} -> {"name":"KEY_COLUMN_USAGE","database":"INFORMATION_SCHEMA"} -... -> ストリームが終了しました。 -``` - -### ファイルからテーブルにストリームを挿入する {#insert-a-stream-from-a-file-into-a-table} - -次の内容を持つファイル ./samples/my_first_table_2024-07-11.csv を作成します: - -```csv -"user_id","json","name" -"1","{""name"":""John"",""age"":30}","John" -"2","{""name"":""Jane"",""age"":25}","Jane" -``` - -**テーブル作成 SQL:** - -```sql -create table default.my_first_table -( - user_id String, - json String, - name String, -) ENGINE = MergeTree() -ORDER BY user_id; -``` - -**クエリ API エンドポイント SQL:** - -```sql -INSERT INTO default.my_first_table -``` - -**cURL:** - -```bash -cat ./samples/my_first_table_2024-07-11.csv | curl --user '' \ - -X POST \ - -H 'Content-Type: application/octet-stream' \ - -H 'x-clickhouse-endpoint-version: 2' \ - "https://console-api.clickhouse.cloud/.api/query-endpoints//run?format=CSV" \ - --data-binary @- -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md.hash deleted file mode 100644 index 0a346951535..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-endpoints.md.hash +++ /dev/null @@ -1 +0,0 @@ -2efed77deefef909 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md deleted file mode 100644 index 4583fac5ec4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -sidebar_title: 'Query Insights' -slug: '/cloud/get-started/query-insights' -description: 'system.query_logのデータを可視化して、クエリのデバッグとパフォーマンスの最適化を簡素化' -keywords: -- 'query insights' -- 'query log' -- 'query log ui' -- 'system.query_log insights' -title: 'クエリインサイト' ---- - -import Image from '@theme/IdealImage'; -import insights_overview from '@site/static/images/cloud/sqlconsole/insights_overview.png'; -import insights_latency from '@site/static/images/cloud/sqlconsole/insights_latency.png'; -import insights_recent from '@site/static/images/cloud/sqlconsole/insights_recent.png'; -import insights_drilldown from '@site/static/images/cloud/sqlconsole/insights_drilldown.png'; -import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_query_info.png'; - - -# クエリインサイト - -**クエリインサイト**機能は、ClickHouseの組み込みクエリログをさまざまな視覚化やテーブルを通じて使いやすくします。ClickHouseの `system.query_log` テーブルは、クエリの最適化、デバッグ、全体のクラスターの健全性とパフォーマンスの監視において重要な情報源です。 - -## クエリ概要 {#query-overview} - -サービスを選択すると、左側のサイドバーの**監視**ナビゲーションアイテムが展開され、新しい**クエリインサイト**サブアイテムが表示されます。このオプションをクリックすると、新しいクエリインサイトページが開きます。 - - - -## トップレベルメトリクス {#top-level-metrics} - -上部の統計ボックスは、選択した期間内の基本的なトップレベルのクエリメトリクスを表します。その下には、選択した時間ウィンドウ内のクエリの種類(select、insert、other)ごとに分けられたクエリボリューム、レイテンシ、およびエラーレートを示す3つの時系列チャートがあります。レイテンシチャートは、さらにp50、p90、p99のレイテンシを表示するように調整できます: - - - -## 最近のクエリ {#recent-queries} - -トップレベルメトリクスの下には、選択した時間ウィンドウ内のクエリログエントリ(正規化されたクエリハッシュとユーザーごとにグループ化されたもの)を表示するテーブルがあります: - - - -最近のクエリは、利用可能な任意のフィールドでフィルタリングおよびソートできます。このテーブルは、テーブル名、p90、p99のレイテンシなどの追加フィールドを表示または非表示にするように構成することもできます。 - -## クエリの詳細表示 {#query-drill-down} - -最近のクエリテーブルからクエリを選択すると、その選択したクエリに特有のメトリクスと情報を含むフライアウトが開きます: - - - -フライアウトからわかるように、この特定のクエリは過去24時間で3000回以上実行されています。**クエリ情報**タブのすべてのメトリクスは集約メトリクスですが、**クエリ履歴**タブを選択することで各実行のメトリクスを表示することもできます: - - - -
- -このペインから、各クエリ実行の`設定`および`プロファイルイベント`項目を展開して追加情報を表示できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md.hash deleted file mode 100644 index 043bd89863a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/query-insights.md.hash +++ /dev/null @@ -1 +0,0 @@ -270a16c45145d311 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md deleted file mode 100644 index c784b0b3d25..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md +++ /dev/null @@ -1,312 +0,0 @@ ---- -sidebar_title: 'SQL Console' -slug: '/cloud/get-started/sql-console' -description: 'SQLコンソールを使用してクエリを実行し、可視化を作成します。' -keywords: -- 'sql console' -- 'sql client' -- 'cloud console' -- 'console' -title: 'SQL Console' ---- - -import Image from '@theme/IdealImage'; -import table_list_and_schema from '@site/static/images/cloud/sqlconsole/table-list-and-schema.png'; -import view_columns from '@site/static/images/cloud/sqlconsole/view-columns.png'; -import abc from '@site/static/images/cloud/sqlconsole/abc.png'; -import inspecting_cell_content from '@site/static/images/cloud/sqlconsole/inspecting-cell-content.png'; -import sort_descending_on_column from '@site/static/images/cloud/sqlconsole/sort-descending-on-column.png'; -import filter_on_radio_column_equal_gsm from '@site/static/images/cloud/sqlconsole/filter-on-radio-column-equal-gsm.png'; -import add_more_filters from '@site/static/images/cloud/sqlconsole/add-more-filters.png'; -import filtering_and_sorting_together from '@site/static/images/cloud/sqlconsole/filtering-and-sorting-together.png'; -import create_a_query_from_sorts_and_filters from '@site/static/images/cloud/sqlconsole/create-a-query-from-sorts-and-filters.png'; -import creating_a_query from '@site/static/images/cloud/sqlconsole/creating-a-query.png'; -import run_selected_query from '@site/static/images/cloud/sqlconsole/run-selected-query.png'; -import run_at_cursor_2 from '@site/static/images/cloud/sqlconsole/run-at-cursor-2.png'; -import run_at_cursor from '@site/static/images/cloud/sqlconsole/run-at-cursor.png'; -import cancel_a_query from '@site/static/images/cloud/sqlconsole/cancel-a-query.png'; -import sql_console_save_query from '@site/static/images/cloud/sqlconsole/sql-console-save-query.png'; -import sql_console_rename from '@site/static/images/cloud/sqlconsole/sql-console-rename.png'; -import sql_console_share from '@site/static/images/cloud/sqlconsole/sql-console-share.png'; -import sql_console_edit_access from '@site/static/images/cloud/sqlconsole/sql-console-edit-access.png'; -import sql_console_add_team from '@site/static/images/cloud/sqlconsole/sql-console-add-team.png'; -import sql_console_edit_member from '@site/static/images/cloud/sqlconsole/sql-console-edit-member.png'; -import sql_console_access_queries from '@site/static/images/cloud/sqlconsole/sql-console-access-queries.png'; -import search_hn from '@site/static/images/cloud/sqlconsole/search-hn.png'; -import match_in_body from '@site/static/images/cloud/sqlconsole/match-in-body.png'; -import pagination from '@site/static/images/cloud/sqlconsole/pagination.png'; -import pagination_nav from '@site/static/images/cloud/sqlconsole/pagination-nav.png'; -import download_as_csv from '@site/static/images/cloud/sqlconsole/download-as-csv.png'; -import tabular_query_results from '@site/static/images/cloud/sqlconsole/tabular-query-results.png'; -import switch_from_query_to_chart from '@site/static/images/cloud/sqlconsole/switch-from-query-to-chart.png'; -import trip_total_by_week from '@site/static/images/cloud/sqlconsole/trip-total-by-week.png'; -import bar_chart from '@site/static/images/cloud/sqlconsole/bar-chart.png'; -import change_from_bar_to_area from '@site/static/images/cloud/sqlconsole/change-from-bar-to-area.png'; -import update_query_name from '@site/static/images/cloud/sqlconsole/update-query-name.png'; -import update_subtitle_etc from '@site/static/images/cloud/sqlconsole/update-subtitle-etc.png'; -import adjust_axis_scale from '@site/static/images/cloud/sqlconsole/adjust-axis-scale.png'; - - -# SQLコンソール - -SQLコンソールは、ClickHouse Cloudでデータベースを探索し、クエリを実行するための最も迅速かつ簡単な方法です。SQLコンソールを使用して、以下のことができます: - -- ClickHouse Cloud Servicesに接続する -- テーブルデータを表示、フィルタリング、並べ替える -- クエリを実行し、結果データを数回のクリックで視覚化する -- チームメンバーとクエリを共有し、より効果的にコラボレーションする。 - -### テーブルの探索 {#exploring-tables} - -### テーブルリストとスキーマ情報の表示 {#viewing-table-list-and-schema-info} - -ClickHouseインスタンスに含まれるテーブルの概要は、左側のサイドバーエリアに表示されます。左バーの上部にあるデータベースセレクタを使用して、特定のデータベース内のテーブルを表示します。 - - -リスト内のテーブルは展開して、カラムとタイプを表示することもできます。 - - - -### テーブルデータの探索 {#exploring-table-data} - -リスト内のテーブルをクリックすると、新しいタブで開きます。テーブルビューでは、データを簡単に表示、選択、コピーできます。Microsoft ExcelやGoogle Sheetsなどのスプレッドシートアプリケーションにコピー&ペーストする際に、構造とフォーマットは保持されることに注意してください。フッターのナビゲーションを使用して、テーブルデータのページを切り替えることができます(30行単位でページ付け)。 - - - -### セルデータの検査 {#inspecting-cell-data} - -セルインスペクターツールを使用して、単一のセルに含まれる大量のデータを表示できます。開くにはセルを右クリックし、「セルを検査」を選択します。セルインスペクターの内容は、インスペクターコンテンツの右上隅にあるコピーアイコンをクリックすることでコピーできます。 - - - -## テーブルのフィルタリングと並べ替え {#filtering-and-sorting-tables} - -### テーブルの並べ替え {#sorting-a-table} - -SQLコンソールでテーブルを並べ替えるには、テーブルを開いてツールバーの「並べ替え」ボタンを選択します。このボタンをクリックすると、並べ替えを構成できるメニューが表示されます。並べ替えを希望するカラムと、並べ替えの順序(昇順または降順)を選択できます。「適用」を選択するか、Enterを押してテーブルを並べ替えます。 - - - -SQLコンソールでは、テーブルに複数の並べ替えを追加することもできます。再度「並べ替え」ボタンをクリックして、別の並べ替えを追加します。 - -:::note -並べ替えは、並べ替えペインに表示される順序(上から下)で適用されます。並べ替えを削除するには、並べ替えの隣にある「x」ボタンをクリックしてください。 -::: - -### テーブルのフィルタリング {#filtering-a-table} - -SQLコンソールでテーブルをフィルタリングするには、テーブルを開いて「フィルター」ボタンを選択します。並べ替えと同様に、このボタンをクリックすると、フィルタを構成できるメニューが表示されます。フィルタリングするカラムを選択し、必要な基準を選択できます。SQLコンソールは、カラムに含まれるデータのタイプに対応するフィルタオプションを賢く表示します。 - - - -フィルタに満足したら、「適用」を選択してデータをフィルタリングできます。また、下記のように追加のフィルタを追加することもできます。 - - - -並べ替え機能と同様に、フィルタの隣にある「x」ボタンをクリックして削除できます。 - -### フィルタリングと並べ替えを同時に行う {#filtering-and-sorting-together} - -SQLコンソールでは、テーブルをフィルタリングして並べ替えを同時に行うことができます。これを行うには、上記の手順を使用してすべての希望するフィルタと並べ替えを追加し、「適用」ボタンをクリックします。 - - - -### フィルタと並べ替えからクエリを作成する {#creating-a-query-from-filters-and-sorts} - -SQLコンソールでは、フィルタと並べ替えをワンクリックでクエリに変換できます。希望するフィルタと並べ替えのパラメータを選択したら、ツールバーの「クエリを作成」ボタンを選択します。「クエリを作成」をクリックすると、テーブルビューに含まれるデータに対応するSQLコマンドで事前に入力された新しいクエリタブが開きます。 - - - -:::note -「クエリを作成」機能を使用する際、フィルタと並べ替えは必須ではありません。 -::: - -SQLコンソールでのクエリの詳細については、(link) クエリのドキュメントを読むことができます。 - -## クエリの作成と実行 {#creating-and-running-a-query} - -### クエリの作成 {#creating-a-query} - -SQLコンソールで新しいクエリを作成する方法は2つあります。 - -- タブバーの「+」ボタンをクリックする -- 左側のサイドバーのクエリリストから「新しいクエリ」ボタンを選択する - - - -### クエリの実行 {#running-a-query} - -クエリを実行するには、SQLエディタにSQLコマンドを入力し、「実行」ボタンをクリックするか、ショートカット`cmd / ctrl + enter`を使用します。複数のコマンドを連続して記述して実行する場合は、各コマンドの後にセミコロンを追加することを忘れないでください。 - -クエリ実行オプション -デフォルトでは、実行ボタンをクリックするとSQLエディタ内のすべてのコマンドが実行されます。SQLコンソールは、他の2つのクエリ実行オプションをサポートします: - -- 選択したコマンドを実行 -- カーソルの位置でコマンドを実行 - -選択したコマンドを実行するには、望ましいコマンドまたはコマンドのシーケンスをハイライトし、「実行」ボタンをクリックします(または`cmd / ctrl + enter`ショートカットを使用)。選択がある場合は、SQLエディタのコンテキストメニュー(エディタ内の任意の場所を右クリックして開く)から「選択した実行」を選択することもできます。 - - - -現在のカーソル位置でコマンドを実行する方法は2つあります: - -- 拡張実行オプションメニューから「カーソルで実行」を選択します(または対応するショートカット`cmd / ctrl + shift + enter`を使用) - - - -- SQLエディタのコンテキストメニューから「カーソルで実行」を選択します。 - - - -:::note -カーソル位置にあるコマンドは実行時に黄色に点滅します。 -::: - -### クエリのキャンセル {#canceling-a-query} - -クエリが実行中は、クエリエディタのツールバーにある「実行」ボタンが「キャンセル」ボタンに置き換わります。このボタンをクリックするか、`Esc`を押すとクエリがキャンセルされます。注:キャンセル後に既に返された結果はそのまま表示されます。 - - - -### クエリの保存 {#saving-a-query} - -クエリを保存することで、後で簡単に見つけられるようになり、チームメンバーと共有することができます。SQLコンソールでは、クエリをフォルダに整理することもできます。 - -クエリを保存するには、ツールバーの「実行」ボタンのすぐ隣にある「保存」ボタンをクリックします。希望する名前を入力し、「クエリを保存」をクリックします。 - -:::note -ショートカット`cmd / ctrl + s`を使用すると、現在のクエリタブでの作業を保存することもできます。 -::: - - - -また、「無題のクエリ」をツールバーでクリックして名前を変更し、Enterキーを押すことで、同時にクエリの名前を付けて保存することもできます: - - - -### クエリの共有 {#query-sharing} - -SQLコンソールでは、クエリをチームメンバーと簡単に共有することができます。SQLコンソールは、全体およびユーザーごとに調整可能な4つのアクセスレベルをサポートしています: - -- 所有者(共有オプションを調整可能) -- 書き込みアクセス -- 読み取り専用アクセス -- アクセスなし - -クエリを保存した後、ツールバーの「共有」ボタンをクリックします。共有オプションが表示されるモーダルが表示されます: - - - -サービスにアクセスできるすべての組織メンバーのクエリアクセスを調整するには、上部のアクセスレベルセレクタを調整します: - - - -上記を適用した後、クエリはSQLコンソールにアクセスできるすべてのチームメンバーによって表示(および実行)できるようになります。 - -特定のメンバーのクエリアクセスを調整するには、「チームメンバーを追加」セレクタから希望するチームメンバーを選択します: - - - -チームメンバーを選択すると、アクセスレベルセレクタを持つ新しい行項目が表示されます: - - - -### 共有クエリへのアクセス {#accessing-shared-queries} - -クエリが共有されている場合、「クエリ」タブのSQLコンソール左サイドバーに表示されます: - - - -### クエリへのリンク(パーマリンク) {#linking-to-a-query-permalinks} - -保存されたクエリはパーマリンクされており、共有クエリへのリンクを送受信し、直接開くことができます。 - -クエリに存在する可能性のあるパラメータの値は、保存されたクエリのURLに自動的にクエリパラメータとして追加されます。たとえば、クエリに`{start_date: Date}`および`{end_date: Date}`パラメータが含まれている場合、パーマリンクは次のようになります:`https://console.clickhouse.cloud/services/:serviceId/console/query/:queryId?param_start_date=2015-01-01¶m_end_date=2016-01-01`。 - -## 高度なクエリ機能 {#advanced-querying-features} - -### クエリ結果の検索 {#searching-query-results} - -クエリが実行されると、結果ペイン内の検索入力を使用して取得された結果セットを迅速に検索できます。この機能は、追加の`WHERE`句の結果をプレビューしたり、特定のデータが結果セットに含まれていることを確認したりするのに役立ちます。検索入力に値を入力すると、結果ペインが更新され、入力した値と一致するレコードが返されます。この例では、`hackernews`テーブル内の`ClickHouse`を含むコメントのすべての`breakfast`のインスタンスを探します(大文字と小文字は区別しません): - - - -注:入力した値と一致する任意のフィールドが返されます。たとえば、上のスクリーンショットの3番目のレコードは、`by`フィールドの'breakfast'には一致しませんが、`text`フィールドには一致しています: - - - -### ページネーション設定の調整 {#adjusting-pagination-settings} - -デフォルトでは、クエリ結果ペインはすべての結果レコードを1ページに表示します。大きな結果セットの場合、結果をページングして表示しやすくする方が好ましいことがあります。これは、結果ペインの右下隅にあるページネーションセレクタを使用して実行できます: - - - -ページサイズを選択すると、結果セットに直ちにページネーションが適用され、結果ペインのフッターの中央にナビゲーションオプションが表示されます。 - - - -### クエリ結果データのエクスポート {#exporting-query-result-data} - -クエリ結果セットは、SQLコンソールから直接CSV形式に簡単にエクスポートできます。そのためには、結果ペインツールバーの右側にある`•••`メニューを開き、「CSVとしてダウンロード」を選択します。 - - - -## クエリデータの視覚化 {#visualizing-query-data} - -一部のデータは、チャート形式でより容易に解釈できます。SQLコンソールからクエリ結果データから視覚化を数回のクリックで迅速に作成できます。例として、NYCタクシーの週次統計を計算するクエリを使います: - -```sql -select - toStartOfWeek(pickup_datetime) as week, - sum(total_amount) as fare_total, - sum(trip_distance) as distance_total, - count(*) as trip_total -from - nyc_taxi -group by - 1 -order by - 1 asc -``` - - - -視覚化なしでは、これらの結果は解釈するのが難しいです。これをチャートに変換しましょう。 - -### チャートの作成 {#creating-charts} - -視覚化を構築するには、クエリ結果ペインツールバーから「チャート」オプションを選択します。チャート設定パネルが表示されます: - - - -まずは、`week`ごとの`trip_total`を追跡するシンプルな棒グラフを作成します。これを実行するには、`week`フィールドをx軸に、`trip_total`フィールドをy軸にドラッグします: - - - -ほとんどのグラフタイプは数値軸上に複数のフィールドをサポートしています。デモンストレーションとして、fare_totalフィールドをy軸にドラッグします: - - - -### チャートのカスタマイズ {#customizing-charts} - -SQLコンソールでは、チャートタイプセレクタグラフ設定パネルで選択できる10種類のチャートタイプをサポートしています。たとえば、前のチャートタイプを棒グラフからエリアに簡単に変更できます: - - - -チャートのタイトルは、データを供給するクエリの名前と一致します。クエリの名前を更新すると、チャートのタイトルも更新されます: - - - -多くの高度なチャートの特性も、チャート設定パネルの「高度な」セクションで調整できます。最初に以下の設定を調整します: - -- サブタイトル -- 軸タイトル -- x軸のラベル方向 - -それに応じてチャートが更新されます: - - - -特定のシナリオでは、各フィールドの軸スケールを独立して調整する必要があります。これも、軸範囲の最小値と最大値を指定することによって、チャート設定パネルの「高度な」セクションで実行できます。たとえば、上記のチャートは見た目が良いですが、`trip_total`と`fare_total`フィールド間の相関関係を示すためには、軸の範囲を調整する必要があります: - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md.hash deleted file mode 100644 index 0a47d757191..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/get-started/sql-console.md.hash +++ /dev/null @@ -1 +0,0 @@ -09476a9e1e6791b8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_category_.yml deleted file mode 100644 index 59089856c86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_category_.yml +++ /dev/null @@ -1,6 +0,0 @@ -label: 'Manage Cloud' -collapsible: true -collapsed: true -link: - type: generated-index - title: Manage ClickHouse Cloud diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md deleted file mode 100644 index 86a03099967..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -{} ---- - - - -データ転送料金が、クラウドプロバイダーおよびリージョンごとに、パブリックインターネットまたはクロスリージョンにどのように変動するかを示す表は以下の通りです。 - -**AWS** - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クラウドプロバイダーリージョンパブリックインターネットのアウトバウンド同じリージョンクロスリージョン
(すべてのTier 1)
`AWS``ap-northeast-1``$0.1440``$0.0000``$0.1152`
`AWS``ap-south-1``$0.1384``$0.0000``$0.1104`
`AWS``ap-southeast-1``$0.1512``$0.0000``$0.1152`
`AWS``ap-southeast-2``$0.1440``$0.0000``$0.1248`
`AWS``eu-central-1``$0.1152``$0.0000``$0.0312`
`AWS``eu-west-1``$0.1152``$0.0000``$0.0312`
`AWS``eu-west-2``$0.1152``$0.0000``$0.0312`
`AWS``us-east-1``$0.1152``$0.0000``$0.0312`
`AWS``us-east-2``$0.1152``$0.0000``$0.0312`
`AWS``us-west-2``$0.1152``$0.0000``$0.0312`
- -$^*$データ転送料金は、転送されたデータ1GBあたりの$です。 - -**GCP** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クラウドプロバイダー発信リージョンパブリックインターネットのアウトバウンド宛先リージョン
同じリージョン北アメリカヨーロッパアジア、オセアニア中東、南アメリカ、アフリカ
`GCP``us-central1``$0.1140``$0.0000``$0.0360` (Tier 1)`$0.0720` (Tier 2)`$0.1200` (Tier 3)`$0.1620` (Tier 4)
`GCP``us-east1``$0.1140``$0.0000``$0.0360` (Tier 1)`$0.0720` (Tier 2)`$0.1200` (Tier 3)`$0.1620` (Tier 4)
`GCP``europe-west4``$0.1140``$0.0000``$0.0720` (Tier 2)`$0.0360` (Tier 1)`$0.1200` (Tier 3)`$0.1620` (Tier 4)
`GCP``asia-southeast1``$0.1440``$0.0000``$0.1200` (Tier 3)`$0.1200` (Tier 3)`$0.1200` (Tier 3)`$0.1620` (Tier 4)
- -$^*$データ転送料金は、転送されたデータ1GBあたりの$です。 - -**Azure** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クラウドプロバイダー発信リージョンパブリックインターネットのアウトバウンド宛先リージョン
同じリージョン北アメリカヨーロッパアジア、オセアニア中東、南アメリカ、アフリカ
`Azure``eastus2``$0.1020``$0.0000``$0.0300` (Tier 1)`$0.0660` (Tier 2)`$0.0660` (Tier 2)`$0.0660` (Tier 2)
`Azure``westus3``$0.1020``$0.0000``$0.0300` (Tier 1)`$0.0660` (Tier 2)`$0.0660` (Tier 2)`$0.0660` (Tier 2)
`Azure``germanywestcentral``$0.1020``$0.0000``$0.0660` (Tier 2)`$0.0300` (Tier 1)`$0.0660` (Tier 2)`$0.0660` (Tier 2)
- -$^*$データ転送料金は、転送されたデータ1GBあたりの$です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md.hash deleted file mode 100644 index 36e4c74cb7f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md.hash +++ /dev/null @@ -1 +0,0 @@ -d8777672a8f919c9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md deleted file mode 100644 index e73a518c3e3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -sidebar_label: 'アカウント削除' -slug: '/cloud/manage/close_account' -title: 'アカウントクローズ&削除' -description: '時々、アカウントを閉鎖する必要がある状況があります。このガイドは、そのプロセスをサポートします。' ---- - - - -## アカウントの閉鎖と削除 {#account-close--deletion} -私たちの目標は、あなたのプロジェクトが成功するのを支援することです。このサイトで回答されていない質問がある場合や、ユニークなユースケースの評価についての支援が必要な場合は、[support@clickhouse.com](mailto:support@clickhouse.com)までご連絡ください。 - -アカウントの閉鎖が必要になる状況があることを理解しています。このガイドは、そのプロセスをお手伝いします。 - -## 閉鎖と削除の違い {#close-vs-delete} -顧客は、閉鎖されたアカウントに再度ログインして、使用状況、請求およびアカウントレベルのアクティビティログを確認できます。これにより、ユースケースの文書化から、税務目的のために年末に請求書をダウンロードするまで、さまざまな目的に役立つ詳細に簡単にアクセスできます。また、製品の更新情報も引き続き受け取るため、新機能が利用可能になったかどうかを知ることができます。さらに、閉鎖されたアカウントはいつでも再開することができ、新しいサービスを開始できます。 - -個人データの削除を要求する顧客は、これは不可逆的なプロセスであることを認識しておく必要があります。アカウントおよび関連情報はもはや利用できなくなります。製品更新情報は受け取れず、アカウントを再開することもできません。これはニュースレターの購読には影響しません。 - -ニュースレターの購読者は、アカウントを閉鎖したり情報を削除したりせずに、ニュースレターのメールの下部にある退会リンクを使用していつでも退会できます。 - -## 閉鎖の準備 {#preparing-for-closure} - -アカウントの閉鎖をリクエストする前に、以下の手順を実行してアカウントを準備してください。 -1. 保持する必要があるサービスからデータをエクスポートします。 -2. サービスを停止し、削除します。これにより、アカウントに追加の請求が発生するのを防ぎます。 -3. 閉鎖をリクエストする管理者以外のすべてのユーザーを削除します。これにより、プロセスが完了するまで新しいサービスが作成されないようにします。 -4. コントロールパネルの「使用状況」および「請求」タブを確認し、すべての請求が支払われていることを確認します。未払い残高があるアカウントは閉鎖できません。 - -## アカウントの閉鎖をリクエスト {#request-account-closure} - -閉鎖および削除のリクエストを認証する必要があります。リクエストを迅速に処理できるよう、以下の手順に従ってください。 -1. clickhouse.cloud アカウントにサインインします。 -2. 上記の「閉鎖の準備」セクションの残りの手順を完了します。 -3. ヘルプボタン(画面右上の疑問符)をクリックします。 -4. 「サポート」内で「ケースを作成」をクリックします。 -5. 「新規ケースの作成」画面で、以下を入力します: - -```text -Priority: Severity 3 -Subject: 私の ClickHouse アカウントを閉鎖してください -Description: キャンセルの理由について簡単なメモを共有していただけると嬉しいです。 -``` - -6. 「新規ケースを作成」をクリックします。 -7. アカウントを閉鎖し、完了したことを知らせる確認メールを送信します。 - -## 個人データ削除をリクエスト {#request-personal-data-deletion} -個人データの削除リクエストは、アカウント管理者のみが行えることに注意してください。アカウント管理者でない場合は、アカウントから削除をリクエストするために、あなたの ClickHouse アカウント管理者に連絡してください。 - -データ削除をリクエストするには、上記の「アカウントの閉鎖をリクエスト」の手順に従ってください。ケース情報を入力する際、件名を「私の ClickHouse アカウントを閉鎖し、個人データを削除してください」に変更します。 - -個人データの削除リクエストは、30日以内に完了します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md.hash deleted file mode 100644 index 01d80fe7fe7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/account-close.md.hash +++ /dev/null @@ -1 +0,0 @@ -94aa9906dcb7b95a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md deleted file mode 100644 index 4cc411551af..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -sidebar_label: '概要' -sidebar_position: 1 -title: 'ClickHouse Cloud API' -slug: '/cloud/manage/api/api-overview' -description: 'ClickHouse Cloud APIについて学ぶ' ---- - - - - -# ClickHouse Cloud API - -## 概要 {#overview} - -ClickHouse Cloud APIは、開発者がClickHouse Cloud上で組織やサービスを簡単に管理するためのREST APIです。このCloud APIを使用すると、サービスの作成と管理、APIキーのプロビジョニング、組織内のメンバーの追加または削除などが可能です。 - -[最初のAPIキーを作成し、ClickHouse Cloud APIの使用を開始する方法を学びましょう。](/cloud/manage/openapi.md) - -## Swagger (OpenAPI) エンドポイントとUI {#swagger-openapi-endpoint-and-ui} - -ClickHouse Cloud APIは、オープンソースの[OpenAPI仕様](https://www.openapis.org/)に基づいて構築されており、クライアント側での消費を予測可能にします。プログラムでClickHouse Cloud APIドキュメントを利用する必要がある場合、https://api.clickhouse.cloud/v1経由でJSONベースのSwaggerエンドポイントを提供しています。また、[Swagger UI](https://clickhouse.com/docs/cloud/manage/api/swagger)を通じてAPIドキュメントも見つけることができます。 - -## レート制限 {#rate-limits} - -開発者は、組織ごとに100のAPIキーに制限されています。各APIキーには、10秒間のウィンドウ内で10リクエストの制限があります。組織のAPIキーや10秒間のウィンドウ内でのリクエスト数を増やしたい場合は、support@clickhouse.comまでお問い合わせください。 - -## Terraformプロバイダー {#terraform-provider} - -公式のClickHouse Terraformプロバイダーを使用すると、[Infrastructure as Code](https://www.redhat.com/en/topics/automation/what-is-infrastructure-as-code-iac)を利用して、予測可能でバージョン管理された構成を作成し、デプロイメントのエラーを大幅に減らすことができます。 - -Terraformプロバイダーのドキュメントは、[Terraformレジストリ](https://registry.terraform.io/providers/ClickHouse/clickhouse/latest/docs)で確認できます。 - -ClickHouse Terraformプロバイダーに貢献したい場合は、[GitHubリポジトリ](https://github.com/ClickHouse/terraform-provider-clickhouse)でソースを確認できます。 - -## サポート {#support} - -迅速なサポートを得るために、まず[私たちのSlackチャンネル](https://clickhouse.com/slack)を訪れることをお勧めします。APIおよびその機能についての追加のヘルプや詳細が必要な場合は、https://console.clickhouse.cloud/supportでClickHouseサポートにお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md.hash deleted file mode 100644 index 21c0488c9ae..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -70ca4744c2f48cb5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-reference-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-reference-index.md.hash deleted file mode 100644 index c89e0903f80..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/api-reference-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -77e0b682f48b8fed diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md deleted file mode 100644 index 9829a35e555..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Cloud API' -slug: '/cloud/manage/cloud-api' -description: 'クラウドAPIセクションのランディングページ' ---- - - - -このセクションには、Cloud APIに関するリファレンスドキュメントが含まれており、次のページがあります: - -| ページ | 説明 | -|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [概要](/cloud/manage/api/api-overview) | レート制限、Terraformプロバイダー、Swagger (OpenAPI)エンドポイントおよびUI、利用可能なサポートの概要を提供します。 | -| [APIキーの管理](/cloud/manage/openapi) | OpenAPIを利用したCloudのAPIについて詳しく学ぶことができ、アカウントやサービスの各種要素をプログラムから管理することができます。 | -| [APIリファレンス](https://clickhouse.com/docs/cloud/manage/api/swagger) | OpenAPI (swagger) リファレンスページです。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md.hash deleted file mode 100644 index dea3784a5bb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -ad4a986b80f1932a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/invitations-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/invitations-api-reference.md.hash deleted file mode 100644 index 2bc092cdec5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/invitations-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -48741952c62f0032 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/keys-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/keys-api-reference.md.hash deleted file mode 100644 index c9a865232e3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/keys-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -0b8605360e8853ae diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/members-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/members-api-reference.md.hash deleted file mode 100644 index 2ce2b521c5e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/members-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -568c9479ca7fdab9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/organizations-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/organizations-api-reference.md.hash deleted file mode 100644 index 67af8b63879..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/organizations-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -23c5829e60dbeb4d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/privateEndpointConfig-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/privateEndpointConfig-api-reference.md.hash deleted file mode 100644 index 13bfc592df1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/privateEndpointConfig-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -630e4268eb8df304 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/prometheus-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/prometheus-api-reference.md.hash deleted file mode 100644 index 5aeb58b8120..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/prometheus-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -8be69a3a8fdb2bdb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/services-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/services-api-reference.md.hash deleted file mode 100644 index c03567b4e87..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/services-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -551b7ef7b2a7c5c4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/usageCost-api-reference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/usageCost-api-reference.md.hash deleted file mode 100644 index dcc48519a13..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/api/usageCost-api-reference.md.hash +++ /dev/null @@ -1 +0,0 @@ -bad5b885d1bec1c2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md deleted file mode 100644 index fc55e5afa6b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -sidebar_label: '設定可能なバックアップ' -slug: '/cloud/manage/backups/configurable-backups' -description: '設定可能なバックアップ' -title: '設定可能なバックアップ' -keywords: -- 'backups' -- 'cloud backups' -- 'restore' ---- - -import backup_settings from '@site/static/images/cloud/manage/backup-settings.png'; -import backup_configuration_form from '@site/static/images/cloud/manage/backup-configuration-form.png'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge'; -import Image from '@theme/IdealImage'; - - - -ClickHouse Cloudでは、**Scale**および**Enterprise**ティアサービスのバックアップスケジュールを設定することができます。バックアップは、ビジネスニーズに基づいて以下の側面で設定できます。 - -- **Retention**: 各バックアップが保持される日数の期間。保持期間は最短1日から最長30日まで指定でき、その間にいくつかの値を選択できます。 -- **Frequency**: 周期は、次のバックアップの間隔を指定できるようにします。例えば、"12時間ごと"の頻度は、バックアップが12時間の間隔で行われることを意味します。頻度は、次の時間間隔で「6時間ごと」から「48時間ごと」まで、`6`、`8`、`12`、`16`、`20`、`24`、`36`、`48`の範囲で設定できます。 -- **Start Time**: 毎日バックアップをスケジュールしたい開始時刻。開始時間を指定すると、バックアップの「Frequency」は自動的に24時間ごと1回になります。Clickhouse Cloudは、指定した開始時間の1時間以内にバックアップを開始します。 - -:::note -カスタムスケジュールは、指定されたサービスのClickHouse Cloudにおけるデフォルトのバックアップポリシーを上書きします。 -::: - -サービスのバックアップスケジュールを設定するには、コンソールの**Settings**タブに移動し、**Change backup configuration**をクリックします。 - -バックアップ設定を構成 - -これにより、右側にバックアップの保持期間、頻度、開始時間を選択するためのタブが開きます。選択した設定を保存する必要があります。 - -バックアップの保持と頻度を選択 - -:::note -開始時間と頻度は相互排他的です。開始時間が優先されます。 -::: - -:::note -バックアップスケジュールを変更すると、デフォルトのバックアップに含まれない可能性のあるバックアップのため、ストレージに対する月額料金が高くなる可能性があります。以下の「["Understanding backup cost"](./overview.md/#understanding-backup-cost)」セクションを参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md.hash deleted file mode 100644 index 131e0593d43..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/configurable-backups.md.hash +++ /dev/null @@ -1 +0,0 @@ -ded388302fa94ada diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md deleted file mode 100644 index ddacbb884de..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -sidebar_label: 'Export Backups to your Own Cloud Account' -slug: '/cloud/manage/backups/export-backups-to-own-cloud-account' -title: 'Export Backups to your Own Cloud Account' -description: 'Describes how to export backups to your own Cloud account' ---- - -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - - - -ClickHouse Cloudは、独自のクラウドサービスプロバイダー(CSP)アカウント(AWS S3、Google Cloud Storage、またはAzure Blob Storage)へのバックアップをサポートしています。 -ClickHouse Cloudのバックアップの詳細、特に「フル」バックアップと「インクリメンタル」バックアップの違いについては、[バックアップ](overview.md)ドキュメントを参照してください。 - -ここでは、AWS、GCP、Azureのオブジェクトストレージにフルバックアップおよびインクリメンタルバックアップを取得し、バックアップから復元する方法の例を示します。 - -:::note -ユーザーは、バックアップが同じクラウドプロバイダー内の別のリージョンにエクスポートされる場合や、別のクラウドプロバイダー(同じまたは異なるリージョン)にエクスポートされる場合、[データ転送](../network-data-transfer.mdx)料金が発生することを認識しておく必要があります。 -::: - -## 要件 {#requirements} - -独自のCSPストレージバケットにバックアップをエクスポートおよび復元するには、以下の詳細が必要です。 - -### AWS {#aws} - -1. AWS S3エンドポイント、フォーマットは次の通りです: - - ```text - s3://.s3.amazonaws.com/ - ``` - - 例: - ```text - s3://testchbackups.s3.amazonaws.com/backups/ - ``` - ここで: - - `testchbackups` はバックアップをエクスポートするS3バケットの名前です。 - - `backups` はオプションのサブディレクトリです。 - -2. AWSアクセスキーとシークレット。 - -### Azure {#azure} - -1. Azureストレージ接続文字列。 -2. ストレージアカウント内のAzureコンテナ名。 -3. コンテナ内のAzure Blob。 - -### Google Cloud Storage (GCS) {#google-cloud-storage-gcs} - -1. GCSエンドポイント、フォーマットは次の通りです: - - ```text - https://storage.googleapis.com// - ``` -2. アクセスHMACキーとHMACシークレット。 - -
- -# バックアップ / 復元 - -## AWS S3バケットへのバックアップ / 復元 {#backup--restore-to-aws-s3-bucket} - -### データベースバックアップの取得 {#take-a-db-backup} - -**フルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO S3('https://testchbackups.s3.amazonaws.com/backups/', '', '') -``` - -ここで、`uuid` はバックアップセットを区別するための一意の識別子です。 - -:::note -このサブディレクトリ内の各新しいバックアップには異なるUUIDを使用する必要があります。そうでない場合は `BACKUP_ALREADY_EXISTS` エラーが発生します。 -たとえば、毎日バックアップを取得する場合、毎日新しいUUIDを使用する必要があります。 -::: - -**インクリメンタルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO S3('https://testchbackups.s3.amazonaws.com/backups/', '', '') -SETTINGS base_backup = S3('https://testchbackups.s3.amazonaws.com/backups/', '', '') -``` - -### バックアップからの復元 {#restore-from-a-backup} - -```sql -RESTORE DATABASE test_backups -AS test_backups_restored -FROM S3('https://testchbackups.s3.amazonaws.com/backups/', '', '') -``` - -詳細については、[S3エンドポイントを使用するためのBACKUP/RESTOREの設定](/operations/backup#configuring-backuprestore-to-use-an-s3-endpoint)を参照してください。 - -## Azure Blob Storageへのバックアップ / 復元 {#backup--restore-to-azure-blob-storage} - -### データベースバックアップの取得 {#take-a-db-backup-1} - -**フルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO AzureBlobStorage('', '', '/'); -``` - -ここで、`uuid` はバックアップセットを区別するための一意の識別子です。 - -**インクリメンタルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO AzureBlobStorage('', '', '//my_incremental') -SETTINGS base_backup = AzureBlobStorage('', '', '/') -``` - -### バックアップからの復元 {#restore-from-a-backup-1} - -```sql -RESTORE DATABASE test_backups -AS test_backups_restored_azure -FROM AzureBlobStorage('', '', '/') -``` - -詳細については、[S3エンドポイントを使用するためのBACKUP/RESTOREの設定](/operations/backup#configuring-backuprestore-to-use-an-azureblobstorage-endpoint)を参照してください。 - -## Google Cloud Storage (GCS)へのバックアップ / 復元 {#backup--restore-to-google-cloud-storage-gcs} - -### データベースバックアップの取得 {#take-a-db-backup-2} - -**フルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO S3('https://storage.googleapis.com//', ', ) -``` -ここで、`uuid` はバックアップセットを区別するための一意の識別子です。 - -**インクリメンタルバックアップ** - -```sql -BACKUP DATABASE test_backups -TO S3('https://storage.googleapis.com/test_gcs_backups//my_incremental', 'key', 'secret') -SETTINGS base_backup = S3('https://storage.googleapis.com/test_gcs_backups/', 'key', 'secret') -``` - -### バックアップからの復元 {#restore-from-a-backup-2} - -```sql -RESTORE DATABASE test_backups -AS test_backups_restored_gcs -FROM S3('https://storage.googleapis.com/test_gcs_backups/', 'key', 'secret') -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md.hash deleted file mode 100644 index fa7af426da1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/export-backups-to-own-cloud-account.md.hash +++ /dev/null @@ -1 +0,0 @@ -a74c503da7bdc353 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md deleted file mode 100644 index 3531721d059..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -slug: '/cloud/manage/backups' -title: 'バックアップ' -description: 'バックアップに関する目次ページ。' -keywords: -- 'backups' -- 'configurable backups' -- 'export backups to own cloud' ---- - - - -| ページ | 説明 | -|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| [概要](./overview.md) | バックアップの概要ページです。 | -| [カスタマイズ可能なバックアップ](./configurable-backups.md) | ScaleおよびEnterpriseティアのユーザーが、特定のビジネスニーズに応じてバックアップスケジュールをカスタマイズする方法について学びます。 | -| [バックアップを自分のクラウドアカウントにエクスポート](./export-backups-to-own-cloud-account.md) | 自分のクラウドアカウントにバックアップをエクスポートする機能を持つEnterpriseティアの機能について学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md.hash deleted file mode 100644 index d747e535481..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -da87a14f6088acac diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md deleted file mode 100644 index ea08a912c6b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -sidebar_label: '概要' -sidebar_position: 0 -slug: '/cloud/manage/backups/overview' -title: '概要' -keywords: -- 'backups' -- 'cloud backups' -- 'restore' -description: 'ClickHouse Cloud におけるバックアップの概要を提供します。' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge'; -import Image from '@theme/IdealImage'; -import backup_chain from '@site/static/images/cloud/manage/backup-chain.png'; -import backup_status_list from '@site/static/images/cloud/manage/backup-status-list.png'; -import backup_usage from '@site/static/images/cloud/manage/backup-usage.png'; -import backup_restore from '@site/static/images/cloud/manage/backup-restore.png'; -import backup_service_provisioning from '@site/static/images/cloud/manage/backup-service-provisioning.png'; - - -# バックアップ - -データベースのバックアップは、安全網を提供します。予期しない理由でデータが失われた場合に、サービスを最後の成功したバックアップから以前の状態に復元できるようにします。これによりダウンタイムが最小化され、ビジネスにとって重要なデータが永続的に失われることを防ぎます。このガイドでは、ClickHouse Cloudでのバックアップの仕組み、サービスのバックアップを構成するためのオプション、およびバックアップからの復元方法について説明します。 - -## ClickHouse Cloudでのバックアップの仕組み {#how-backups-work-in-clickhouse-cloud} - -ClickHouse Cloudのバックアップは、「完全」と「増分」バックアップの組み合わせで構成されるバックアップチェーンです。チェーンはフルバックアップから始まり、次に予定された数回の期間にわたって増分バックアップが取得され、バックアップのシーケンスが作成されます。バックアップチェーンが一定の長さに達すると、新しいチェーンが開始されます。このバックアップの全チェーンは、必要に応じて新しいサービスにデータを復元するために使用できます。特定のチェーンに含まれるすべてのバックアップがサービスに設定された保持期間を過ぎると(保持については後述)、そのチェーンは破棄されます。 - -以下のスクリーンショットでは、実線の四角がフルバックアップを示し、点線の四角が増分バックアップを示しています。四角の周りの実線の長方形は保持期間を示し、エンドユーザーがバックアップを復元するために使用できるバックアップを可視化しています。以下のシナリオでは、24時間ごとにバックアップが取得され、2日間保持されます。 - -1日目には、バックアップチェーンを開始するためにフルバックアップが取得されます。2日目には増分バックアップが取得され、フルバックアップと増分バックアップが復元のために利用可能になります。7日目には、チェーンに1つのフルバックアップと6つの増分バックアップがあり、最近の2つの増分バックアップはユーザーに見えます。8日目には、新しいフルバックアップを取得し、9日目には新しいチェーンに2つのバックアップがあるため、前のチェーンは破棄されます。 - -ClickHouse Cloudのバックアップチェーンの例 - -*Clickhouse Cloudのバックアップシナリオの例* - -## デフォルトのバックアップポリシー {#default-backup-policy} - -Basic、Scale、Enterpriseティアでは、バックアップは計測され、ストレージとは別に請求されます。すべてのサービスはデフォルトで1つのバックアップを持ち、ScaleティアからはCloud Consoleの設定タブを介してさらに構成できます。 - -## バックアップステータスのリスト {#backup-status-list} - -サービスは、デフォルトの毎日スケジュールまたは自分で選んだ[カスタムスケジュール](./configurable-backups.md)に基づいてバックアップされます。利用可能なすべてのバックアップはサービスの**バックアップ**タブから見ることができます。ここでは、バックアップのステータス、持続時間、およびバックアップのサイズを確認できます。また、**アクション**コラムを使用して特定のバックアップを復元することもできます。 - -ClickHouse Cloudのバックアップステータスのリスト - -## バックアップコストの理解 {#understanding-backup-cost} - -デフォルトのポリシーに従い、ClickHouse Cloudは毎日バックアップを行い、24時間保持します。データをより多く保持する必要があるスケジュールを選択したり、バックアップをより頻繁に行うことで、追加のストレージ料金が発生する可能性があります。 - -バックアップコストを理解するには、使用画面からサービスごとのバックアップコストを表示できます(以下に示す通り)。カスタマイズされたスケジュールで数日間バックアップを実行している状態から、コストの概念を把握し、バックアップの月額コストを外挿することができます。 - -ClickHouse Cloudのバックアップ使用量チャート - -バックアップの総コストを推定するには、スケジュールを設定する必要があります。また、スケジュールを設定する前に月額コストの見積もりを得るために、私たちは[料金計算機](https://clickhouse.com/pricing)の更新にも取り組んでいます。コストを見積もるために、以下の入力が必要になります: -- 完全バックアップと増分バックアップのサイズ -- 希望する頻度 -- 希望する保持 -- クラウドプロバイダーと地域 - -:::note -サービス内のデータが時間の経過とともに増加するにつれて、バックアップの推定コストが変化することに注意してください。 -::: - -## バックアップを復元する {#restore-a-backup} - -バックアップは、バックアップが取得された既存のサービスではなく、新しいClickHouse Cloudサービスに復元されます。 - -**バックアップ**アイコンをクリックした後、新しいサービスの名前を指定してから、このバックアップを復元できます: - -ClickHouse Cloudでのバックアップの復元 - -新しいサービスは準備ができるまでサービスリストに「プロビジョニング」と表示されます: - -プロビジョニングサービスの進行中 - -## 復元されたサービスの操作 {#working-with-your-restored-service} - -バックアップが復元された後、似たような2つのサービスがあります:復元が必要だった**元のサービス**と、元のバックアップから復元された新しい**復元サービス**です。 - -バックアップの復元が完了したら、次のいずれかを実行する必要があります: -- 新しい復元サービスを使用し、元のサービスを削除します。 -- 新しい復元サービスから元のサービスにデータを移行し、新しい復元サービスを削除します。 - -### **新しい復元サービス**を使用する {#use-the-new-restored-service} - -新しいサービスを使用するには、以下の手順を実行します: - -1. ニーズに必要なIPアクセスリストのエントリが新しいサービスに存在することを確認します。 -1. 新しいサービスに必要なデータが含まれていることを確認します。 -1. 元のサービスを削除します。 - -### **新しく復元されたサービス**から**元のサービス**にデータを移行する {#migrate-data-from-the-newly-restored-service-back-to-the-original-service} - -新しく復元されたサービスを何らかの理由で使用できない場合(たとえば、まだ既存のサービスに接続しているユーザーやアプリケーションがある場合)、新しく復元されたデータを元のサービスに移行することを決定するかもしれません。以下の手順で移行が可能です: - -**新しく復元されたサービスへのリモートアクセスを許可する** - -新しいサービスは、元のサービスと同じIP許可リストのバックアップから復元される必要があります。他のClickHouse Cloudサービスへの接続は、**Anywhere**からのアクセスが許可されていない限り許可されません。許可リストを変更し、一時的に**Anywhere**からのアクセスを許可してください。詳細は[IPアクセスリスト](/cloud/security/setting-ip-filters)のドキュメントを参照してください。 - -**新しく復元されたClickHouseサービス(復元されたデータをホストするシステム)で** - -:::note -アクセスするには、新しいサービスのパスワードをリセットする必要があります。それはサービスリストの**設定**タブから行うことができます。 -::: - -ソーステーブル(この例では`db.table`)を読み取ることができる読み取り専用ユーザーを追加します: - - ```sql - CREATE USER exporter - IDENTIFIED WITH SHA256_PASSWORD BY 'password-here' - SETTINGS readonly = 1; - ``` - - ```sql - GRANT SELECT ON db.table TO exporter; - ``` - -テーブル定義をコピーします: - - ```sql - SELECT create_table_query - FROM system.tables - WHERE database = 'db' AND table = 'table' - ``` - -**損傷したテーブルを持つ先行ClickHouse Cloudシステムにて:** - -宛先データベースを作成します: - ```sql - CREATE DATABASE db - ``` - -ソースの`CREATE TABLE`ステートメントを使用して宛先を作成します: - -:::tip -`CREATE`ステートメントを実行する際には、`ENGINE`を`ReplicatedMergeTree`に変更してください。ClickHouse Cloudは常にテーブルをレプリケートし、正しいパラメータを提供します。 -::: - - ```sql - CREATE TABLE db.table ... - ENGINE = ReplicatedMergeTree - ORDER BY ... - ``` - -新しく復元されたClickHouse Cloudサービスからデータを元のサービスに取得するために、`remoteSecure`関数を使用します: - - ```sql - INSERT INTO db.table - SELECT * - FROM remoteSecure('source-hostname', db, table, 'exporter', 'password-here') - ``` - -元のサービスにデータを成功裏に挿入した後、サービス内のデータを検証してください。データが確認された後には、新しいサービスを削除することも忘れないでください。 - -## テーブルの復元または未削除 {#undeleting-or-undropping-tables} - - - -ClickHouse Cloudでは`UNDROP`コマンドはサポートされていません。誤ってテーブルを`DROP`した場合、最善の策は最後のバックアップを復元し、バックアップからテーブルを再作成することです。 - -ユーザーが誤ってテーブルを削除するのを防ぐために、特定のユーザーまたはロールに対して[`DROP TABLE`コマンド](/sql-reference/statements/drop#drop-table)の権限を取り消すために[`GRANT`ステートメント](/sql-reference/statements/grant)を使用できます。 - -:::note -デフォルトでは、ClickHouse Cloudでは1TBを超えるサイズのテーブルを削除することはできないことに注意してください。これを超えるサイズのテーブルを削除したい場合には、次の設定`max_table_size_to_drop`を使用して実行できます: - -```sql -DROP TABLE IF EXISTS table_to_drop -SYNC SETTINGS max_table_size_to_drop=2097152 -- 限度を2TBに増やす -``` -::: - -## カスタマイズ可能なバックアップ {#configurable-backups} - -デフォルトのバックアップスケジュールとは異なるバックアップスケジュールを設定したい場合は、[カスタマイズ可能なバックアップ](./configurable-backups.md)を参照してください。 - -## 自分のクラウドアカウントにバックアップをエクスポート {#export-backups-to-your-own-cloud-account} - -バックアップを自分のクラウドアカウントにエクスポートしたいユーザーは、[こちら](./export-backups-to-own-cloud-account.md)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md.hash deleted file mode 100644 index dfe9a56f620..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/backups/overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -a85f0c762c56a7c5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md deleted file mode 100644 index ce398b5af41..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -sidebar_label: '概要' -slug: '/cloud/manage/billing/overview' -title: 'Pricing' -description: 'ClickHouse Cloud の価格に関する概要ページ' ---- - - - -For pricing information, see the [ClickHouse Cloud Pricing](https://clickhouse.com/pricing#pricing-calculator) page. -ClickHouse Cloud bills based on the usage of compute, storage, [data transfer](/cloud/manage/network-data-transfer) (egress over the internet and cross-region), and [ClickPipes](/integrations/clickpipes). -To understand what can affect your bill, and ways that you can manage your spend, keep reading. - -## Amazon Web Services (AWS) 例 {#amazon-web-services-aws-example} - -:::note -- 価格はAWS us-east-1の価格を反映しています。 -- 適用されるデータ転送およびClickPipesの料金を[ここ](jan2025_faq/dimensions.md)で確認できます。 -::: - -### 基本プラン: 月額66.52ドルから {#basic-from-6652-per-month} - -最適な使用ケース: 硬い信頼性保証がない小規模データボリュームの部門向け使用ケース。 - -**基本ティアサービス** -- 1レプリカ x 8 GiB RAM, 2 vCPU -- 500 GBの圧縮データ -- 500 GBのデータバックアップ -- 10 GBのパブリックインターネットのデータ転送 -- 5 GBのクロスリージョンデータ転送 - -この例の価格内訳: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1日6時間稼働1日12時間稼働1日24時間稼働
コンピュート\$39.91\$79.83\$159.66
ストレージ\$25.30\$25.30\$25.30
パブリックインターネットのデータ転送\$1.15\$1.15\$1.15
クロスリージョンデータ転送\$0.16\$0.16\$0.16
合計\$66.52\$106.44\$186.27
- -### スケール (常時稼働、自動スケーリング): 月額499.38ドルから {#scale-always-on-auto-scaling-from-49938-per-month} - -最適な使用ケース: 強化されたSLA(2つ以上のレプリカサービス)、スケーラビリティ、および高度なセキュリティが必要なワークロード。 - -**スケールティアサービス** -- アクティブワークロード ~100% 時間 -- 自動スケーリングの最大設定可能で、請求が爆発しないように防止 -- 100 GBのパブリックインターネットのデータ転送 -- 10 GBのクロスリージョンデータ転送 - -この例の価格内訳: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
例1例2例3
コンピュート2レプリカ x 8 GiB RAM, 2 vCPU

\$436.95
2レプリカ x 16 GiB RAM, 4 vCPU

\$873.89
3レプリカ x 16 GiB RAM, 4 vCPU

\$1,310.84
ストレージ1TBのデータ + 1バックアップ

\$50.60
2TBのデータ + 1バックアップ

\$101.20
3TBのデータ + 1バックアップ

\$151.80
パブリックインターネットのデータ転送\$11.52\$11.52\$11.52
クロスリージョンデータ転送\$0.31\$0.31\$0.31
合計\$499.38\$986.92\$1,474.47
- -### エンタープライズ: 価格は vary {#enterprise-starting-prices-vary} - -最適な使用ケース: 厳格なセキュリティおよびコンプライアンスのニーズを備えた大規模でミッションクリティカルな展開 - -**エンタープライズティアサービス** -- アクティブワークロード ~100% 時間 -- 1 TBのパブリックインターネットのデータ転送 -- 500 GBのクロスリージョンデータ転送 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
例1例2例3
コンピュート2レプリカ x 32 GiB RAM, 8 vCPU

\$2,285.60
2レプリカ x 64 GiB RAM, 16 vCPU

\$4,571.19
2 x 120 GiB RAM, 30 vCPU

\$8,570.99
ストレージ5TB + 1バックアップ

\$253.00
10TB + 1バックアップ

\$506.00
20TB + 1バックアップ

\$1,012.00
パブリックインターネットのデータ転送\$115.20\$115.20\$115.20
クロスリージョンデータ転送\$15.60\$15.60\$15.60
合計\$2,669.40\$5,207.99\$9,713.79
- -## よくある質問 {#faqs} - -### コンピュートはどのようにメータリングされていますか? {#how-is-compute-metered} - -ClickHouse Cloudは、コンピュートを1分単位で測定し、8G RAMの増分で課金します。 -コンピュートコストはティア、リージョン、クラウドサービスプロバイダによって異なります。 - -### ディスク上のストレージはどのように計算されますか? {#how-is-storage-on-disk-calculated} - -ClickHouse Cloudはクラウドオブジェクトストレージを使用し、利用はClickHouseテーブルに保存されているデータの圧縮サイズで測定されます。 -ストレージコストはティアに関わらず同じで、リージョンやクラウドサービスプロバイダーによって変動します。 - -### バックアップはストレージの合計にカウントされますか? {#do-backups-count-toward-total-storage} - -ストレージおよびバックアップはストレージコストにカウントされ、別途請求されます。 -すべてのサービスはデフォルトで1日保持される1つのバックアップを持ちます。 -追加のバックアップが必要なユーザーは、Cloud Consoleの設定タブで追加の[バックアップ](backups/overview.md)を構成できます。 - -### 圧縮をどのように推定しますか? {#how-do-i-estimate-compression} - -圧縮はデータセットによってかなり異なる可能性があります。 -データがどれだけ圧縮可能か(高カーディナリティフィールド対低カーディナリティフィールドの数)に依存しますし、ユーザーがスキーマをどのように設定するか(オプショナルコーデックを使用するかどうかなど)にも依存します。 -一般的な種類の分析データの場合、10倍ほど圧縮されることがありますが、実際にはそれよりも少ないか多い場合もあります。 -ガイダンスについては[最適化ドキュメント](/optimize/asynchronous-inserts)を参照し、詳細なログ使用例についてはこの[Uberブログ](https://www.uber.com/blog/logging/)をご覧ください。 -正確に知る唯一の実用的な方法は、データセットをClickHouseにインジェストし、データセットのサイズとClickHouseに保存されたサイズを比較することです。 - -以下のクエリを使用できます: - -```sql title="Estimating compression" -SELECT formatReadableSize(total_bytes) -FROM system.tables -WHERE name = -``` - -### セルフマネージドデプロイメントがある場合、ClickHouseがクラウドでサービスを実行するコストを推定するためのツールは何ですか? {#what-tools-does-clickhouse-offer-to-estimate-the-cost-of-running-a-service-in-the-cloud-if-i-have-a-self-managed-deployment} - -ClickHouseクエリログは、ClickHouse Cloud内のワークロードを実行するコストを推定するために使用できる[主要なメトリクス](/operations/system-tables/query_log)をキャプチャします。 -セルフマネージドからClickHouse Cloudへの移行の詳細については、[移行ドキュメント](/cloud/migration/clickhouse-to-cloud)を参照し、さらなる質問がある場合は[ClickHouse Cloudサポート](https://console.clickhouse.cloud/support)にお問い合わせください。 - -### ClickHouse Cloudにはどのような請求オプションがありますか? {#what-billing-options-are-available-for-clickhouse-cloud} - -ClickHouse Cloudは以下の請求オプションをサポートしています: - -- 自己サービスの月額(USD、クレジットカードによる)。 -- 直接販売の年次 / 複数年(先払いの"ClickHouse Credits"を通じて、USD、追加の支払いオプションあり)。 -- AWS、GCP、Azureのマーケットプレイスを通じて(ペイ・アズ・ユー・ゴー(PAYG)またはマーケットプレイスを通じてClickHouse Cloudと契約する)。 - -### 請求サイクルはどのくらいですか? {#how-long-is-the-billing-cycle} - -請求は月額サイクルに従い、開始日はClickHouse Cloud組織が作成された日として追跡されます。 - -### スケールおよびエンタープライズサービスのコストを管理するためにClickHouse Cloudが提供する制御は何ですか? {#what-controls-does-clickhouse-cloud-offer-to-manage-costs-for-scale-and-enterprise-services} - -- トライアルおよび年次契約の顧客は、消費が特定の閾値に達すると、自動的にメールで通知されます:`50%`、`75%`、`90%`。これにより、ユーザーは使用を積極的に管理できます。 -- ClickHouse Cloudでは、[高度なスケーリング管理](/manage/scaling)を使用して、コンピュートに最大自動スケーリング制限を設定でき、これは分析ワークロードにとって重要なコスト要因です。 -- [高度なスケーリング管理](/manage/scaling)を使用すると、非アクティブ中の一時停止/idlingの挙動を制御するオプションがあるメモリ制限を設定できます。 - -### 基本サービスのコストを管理するためにClickHouse Cloudが提供する制御は何ですか? {#what-controls-does-clickhouse-cloud-offer-to-manage-costs-for-basic-services} - -- [高度なスケーリング管理](/manage/scaling)を使用して、非アクティブ中の一時停止/idlingの挙動を制御できます。基本サービスでは、メモリ割り当ての調整はサポートされていません。 -- デフォルト設定では、一時的な非アクティブ期間後にサービスが一時停止します。 - -### 複数のサービスがある場合、サービスごとに請求書が発行されますか、それとも統合請求書が発行されますか? {#if-i-have-multiple-services-do-i-get-an-invoice-per-service-or-a-consolidated-invoice} - -特定の請求期間に対する組織内のすべてのサービスに対して、統合請求書が生成されます。 - -### トライアル期間とクレジットが失効する前にクレジットカードを追加してアップグレードすると、請求されますか? {#if-i-add-my-credit-card-and-upgrade-before-my-trial-period-and-credits-expire-will-i-be-charged} - -ユーザーが30日間のトライアル期間の終了前にトライアルから有料に変換し、トライアルクレジットが残っている場合、 -初期30日間のトライアル期間中はトライアルクレジットから継続して引き落とされ、その後クレジットカードに請求されます。 - -### 自分の支出を追跡する方法は? {#how-can-i-keep-track-of-my-spending} - -ClickHouse Cloudコンソールには、サービスごとの使用詳細を表示するUsage表示が用意されています。この内訳は、使用次元に整理されており、それぞれの計測ユニットに関連するコストを理解するのに役立ちます。 - -### ClickHouse Cloudサービスのマーケットプレイスサブスクリプションの請求書にアクセスするにはどうすればよいですか? {#how-do-i-access-my-invoice-for-my-marketplace-subscription-to-the-clickhouse-cloud-service} - -すべてのマーケットプレイスサブスクリプションは、マーケットプレイスによって請求および請求書が発行されます。請求書は、各クラウドプロバイダーのマーケットプレイスを通じて直接表示できます。 - -### 使用状況明細書の日付がマーケットプレイスの請求書と一致しないのはなぜですか? {#why-do-the-dates-on-the-usage-statements-not-match-my-marketplace-invoice} - -AWS Marketplaceの請求はカレンダーの月のサイクルに従います。 -たとえば、2024年12月1日から2025年1月1日までの使用の場合、 -請求書は2025年1月3日から5日までの間に発行されます。 - -ClickHouse Cloudの使用状況明細書は、異なる請求サイクルに従い、使用状況はサインアップの日から始まり30日間測定されて報告されます。 - -これらの日付が異なる場合、使用状況および請求の日付は異なります。使用状況明細書は、特定のサービスの使用を日ごとに追跡するため、コストの内訳を確認するために明細書を信頼できます。 - -### 前払いクレジットの使用に制限はありますか? {#are-there-any-restrictions-around-the-usage-of-prepaid-credits} - -ClickHouse Cloudの前払いクレジット(ClickHouseを通じて直接、またはクラウドプロバイダーのマーケットプレイス経由)は -契約の条件に基づいてのみ利用可能です。 -これは、受け入れ日または将来の日に適用でき、過去の期間に対しては適用できないことを意味します。 -前払いクレジットでカバーされないオーバーは、クレジットカード支払いまたはマーケットプレイスの月額請求でカバーされる必要があります。 - -### クラウドプロバイダーのマーケットプレイスを通じて支払う場合と直接ClickHouseに支払う場合で、ClickHouse Cloudの価格に違いはありますか? {#is-there-a-difference-in-clickhouse-cloud-pricing-whether-paying-through-the-cloud-provider-marketplace-or-directly-to-clickhouse} - -マーケットプレイスの請求とClickHouseに直接サインアップする場合の価格には違いはありません。 -いずれの場合も、ClickHouse Cloudの使用はClickHouse Cloud Credits (CHCs)として追跡され、 -同じ方法で計測され、請求されます。 - -### コンピュート-コンピュート分離の請求はどうなりますか? {#how-is-compute-compute-separation-billed} - -既存のサービスに加えて新しいサービスを作成する場合、 -この新しいサービスが既存のサービスとデータを共有すべきかどうかを選択できます。 -はいの場合、これら2つのサービスは[ウェアハウス](../reference/warehouses.md)を形成します。 -ウェアハウスにはデータが1回のみ保存され、複数のコンピュートサービスがこのデータにアクセスします。 - -データが1回だけ保存されるため、複数のサービスがアクセスしていても、データの複製に対してのみ支払います。 -コンピュートに関しては通常通り支払いが発生し、コンピュート-コンピュート分離/ウェアハウスに対する追加料金はありません。 -このデプロイメントでは共有ストレージを活用することで、ストレージとバックアップのコスト削減の恩恵を得ることができます。 - -コンピュート-コンピュート分離は、場合によっては大量のClickHouse Creditsを節約できます。 -良い例は以下のようなセットアップです: - -1. 24時間体制でデータを取り込むETLジョブがあります。これらのETLジョブはあまりメモリを必要としないため、例えば、32 GiBのRAMの小さなインスタンスで実行できます。 - -2. 同じチームのデータサイエンティストが突発的なレポーティング要件があり、 significant amount of memory - 236 GiBが必要ですが、高い可用性は必要とせず、最初の実行が失敗した場合は待って再実行できます。 - -この例では、データベースの管理者として、次のことを行えます: - -1. 2つのレプリカを持つ小さなサービスを作成します。それぞれ16 GiB - これがETLジョブを満たし、高い可用性を提供します。 - -2. データサイエンティストのために、同じウェアハウス内に236 GiBの1レプリカのみの2番目のサービスを作成できます。このサービスに対してアイリングを有効にすることで、データサイエンティストが使用していないときはこのサービスに対して支払わないようにします。 - -この例の**スケールティア**に関するコスト見積り(毎月): -- 親サービスは24時間稼働:2レプリカ x 16 GiB 4 vCPU(各レプリカ) -- 子サービス:1レプリカ x 236 GiB 59 vCPU(各レプリカ) -- 3 TBの圧縮データ + 1バックアップ -- 100 GBのパブリックインターネットのデータ転送 -- 50 GBのクロスリージョンデータ転送 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
子サービス
1日1時間稼働
子サービス
1日2時間稼働
子サービス
1日4時間稼働
コンピュート\$1,142.43\$1,410.97\$1,948.05
ストレージ\$151.80\$151.80\$151.80
パブリックインターネットのデータ転送\$11.52\$11.52\$11.52
クロスリージョンデータ転送\$1.56\$1.56\$1.56
合計\$1,307.31\$1,575.85\$2,112.93
- -ウェアハウスがない場合、データエンジニアがクエリに必要とするメモリの量に対して支払わなければなりませんでした。 -しかし、2つのサービスをウェアハウスで結合し、一方をアイリングすることでお金を節約できます。 - -## ClickPipes料金 {#clickpipes-pricing} - -### ClickPipesの料金構成はどのようになりますか? {#what-does-the-clickpipes-pricing-structure-look-like} - -2つの次元から構成されています。 - -- **コンピュート**: 1時間当たりの単価 - コンピュートは、ClickPipesレプリカポッドがデータを取り込むかどうかに関わらず、実行するコストを示します。 - すべてのClickPipesタイプに適用されます。 -- **取り込まれたデータ**: GB当たりの価格設定 - 取り込まれたデータレートは、すべてのストリーミングClickPipes - (Kafka、Confluent、Amazon MSK、Amazon Kinesis、Redpanda、WarpStream、Azure Event Hubs) - のレプリカポッドを介して転送されたデータに適用されます。取り込まれたデータサイズ(GB)は、ソースから受信したバイト数に基づいて請求されます(圧縮されていてもされていなくても)。 - -### ClickPipesレプリカとは何ですか? {#what-are-clickpipes-replicas} - -ClickPipesは、ClickHouse Cloudサービスとは独立して実行およびスケールする専用インフラストラクチャを介してリモートデータソースからデータを取り込みます。 -このため、専用のコンピュートレプリカを使用します。 - -### レプリカのデフォルト数とサイズは何ですか? {#what-is-the-default-number-of-replicas-and-their-size} - -各ClickPipeは、2 GiBのRAMと0.5 vCPUが提供される1レプリカがデフォルトです。 -これは**0.25** ClickHouseコンピュートユニット(1ユニット = 8 GiB RAM、2 vCPUs)に相当します。 - -### ClickPipesの公表価格は何ですか? {#what-are-the-clickpipes-public-prices} - -- コンピュート: \$0.20 /単位 /時間(\$0.05 /レプリカ /時間) -- 取り込まれたデータ: \$0.04 /GB - -### 例としてはどのようになりますか? {#how-does-it-look-in-an-illustrative-example} - -以下の例では、明示的に記載されていない限り、単一のレプリカを仮定します。 - - - - - - - - - - - - - - - - - - - - - - -
24時間で100 GB24時間で1 TB24時間で10 TB
ストリーミングClickPipe(0.25 x 0.20 x 24) + (0.04 x 100) = \$5.20(0.25 x 0.20 x 24) + (0.04 x 1000) = \$41.204レプリカの場合:

(0.25 x 0.20 x 24 x 4) + (0.04 x 10000) = \$404.80
オブジェクトストレージClickPipe $^*$(0.25 x 0.20 x 24) = \$1.20(0.25 x 0.20 x 24) = \$1.20(0.25 x 0.20 x 24) = \$1.20
- -$^1$ _オーケストレーション用のClickPipesコンピュートのみ。 -実際のデータ転送は基盤となるClickhouseサービスによって想定されています_ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md.hash deleted file mode 100644 index 6b0f436f93b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing.md.hash +++ /dev/null @@ -1 +0,0 @@ -6f8b002f7214bf00 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md deleted file mode 100644 index 8822b1f195c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -slug: '/cloud/manage/billing' -title: '請求' -description: '請求の目次ページ。' -keywords: -- 'billing' -- 'payment thresholds' -- 'trouble shooting' -- 'marketplace' ---- - - - -このドキュメントのこのセクションは、請求に関連するトピックをカバーしており、以下のページが含まれています: - -| ページ | 説明 | -|---------------------------------------------------|---------------------------------------------------------------------| -| [概要](/cloud/marketplace/marketplace-billing) | マーケットプレイス請求の概要とFAQページ。 | -| [支払いの閾値](/cloud/billing/payment-thresholds) | 支払いの閾値がどのように機能するか、そしてそれらを調整する方法について学びます。 | -| [請求問題のトラブルシューティング](/manage/troubleshooting-billing-issues) | 一般的な請求問題をトラブルシューティングします。 | -| [マーケットプレイス](/cloud/manage/) | その他のマーケットプレイス関連トピックのランディングページ。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md.hash deleted file mode 100644 index f5f25464bb5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -fbab6a1653f93ac7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md deleted file mode 100644 index 9da9c6c0dd3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/aws-marketplace-committed-contract' -title: 'AWS Marketplace Committed Contract' -description: 'Subscribe to ClickHouse Cloud through the AWS Marketplace (Committed - Contract)' -keywords: -- 'aws' -- 'amazon' -- 'marketplace' -- 'billing' -- 'committed' -- 'committed contract' ---- - -import Image from '@theme/IdealImage'; -import aws_marketplace_committed_1 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-committed-1.png'; -import aws_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-6.png'; -import aws_marketplace_payg_7 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-7.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import aws_marketplace_payg_10 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-10.png'; -import aws_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-11.png'; -import aws_marketplace_payg_12 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-12.png'; - -Get started with ClickHouse Cloud on the [AWS Marketplace](https://aws.amazon.com/marketplace) via a committed contract. A committed contract, also known as a a Private Offer, allows customers to commit to spending a certain amount on ClickHouse Cloud over a period of time. - -## Prerequisites {#prerequisites} - -- ClickHouseからの特定の契約条件に基づいたPrivate Offer。 - -## Steps to sign up {#steps-to-sign-up} - -1. プライベートオファーの確認と受け入れのためのリンクが含まれたメールを受け取っているはずです。 - -
- -AWS Marketplace private offer email - -
- -2. メール内の **Review Offer** リンクをクリックします。これによりプライベートオファーの詳細が記載されたAWS Marketplaceのページに移動します。プライベートオファーを受け入れる際には、契約オプションのプルダウンリストで単位数を1に設定してください。 - -3. AWSポータルでのサブスクリプション手続きが完了したら、 **Set up your account** をクリックします。この時点でClickHouse Cloudにリダイレクトされ、新しいアカウントを登録するか、既存のアカウントでサインインする必要があります。このステップを完了しないと、AWS MarketplaceのサブスクリプションをClickHouse Cloudにリンクすることができません。 - -4. ClickHouse Cloudにリダイレクトされたら、既存のアカウントでログインするか、新しいアカウントを登録します。このステップは非常に重要で、あなたのClickHouse Cloud組織をAWS Marketplaceの請求に結びつけることができます。 - -
- -ClickHouse Cloud sign in page - -
- -新しいClickHouse Cloudユーザーの場合は、ページの下部で **Register** をクリックします。新しいユーザーを作成するように求められ、メールの確認を行います。メールを確認した後、ClickHouse Cloudのログインページを離れ、[https://console.clickhouse.cloud](https://console.clickhouse.cloud) で新しいユーザー名を使用してログインできます。 - -
- -ClickHouse Cloud sign up page - -
- -新しいユーザーの場合は、ビジネスに関する基本情報をいくつか提供する必要があることに注意してください。以下のスクリーンショットを参照してください。 - -
- -ClickHouse Cloud sign up info form - -
- -
- -ClickHouse Cloud sign up info form 2 - -
- -既存のClickHouse Cloudユーザーであれば、単に資格情報を使用してログインしてください。 - -5. ログインに成功すると、新しいClickHouse Cloud組織が作成されます。この組織はあなたのAWS請求アカウントに接続され、すべての使用量はAWSアカウントを通じて請求されます。 - -6. ログイン後、請求が実際にAWS Marketplaceに関連付けられていることを確認し、ClickHouse Cloudリソースの設定を開始できます。 - -
- -ClickHouse Cloud view AWS Marketplace billing - -
- -ClickHouse Cloud new services page - -
- -6. サインアップが確認された旨のメールを受け取るはずです: - -
- -AWS Marketplace confirmation email - -
- -問題が発生した場合は、[サポートチームにお問い合わせ](https://clickhouse.com/support/program)ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md.hash deleted file mode 100644 index 03465181c10..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-committed.md.hash +++ /dev/null @@ -1 +0,0 @@ -75faa9566e51f2b8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md deleted file mode 100644 index 52a10c1a7f3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/aws-marketplace-payg' -title: 'AWS Marketplace PAYG' -description: 'AWS Marketplaceを通じてClickHouse Cloudに登録(PAYG)します。' -keywords: -- 'aws' -- 'marketplace' -- 'billing' -- 'PAYG' ---- - -import aws_marketplace_payg_1 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-1.png'; -import aws_marketplace_payg_2 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-2.png'; -import aws_marketplace_payg_3 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-3.png'; -import aws_marketplace_payg_4 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-4.png'; -import aws_marketplace_payg_5 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-5.png'; -import aws_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-6.png'; -import aws_marketplace_payg_7 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-7.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import aws_marketplace_payg_10 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-10.png'; -import aws_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-11.png'; -import aws_marketplace_payg_12 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-12.png'; -import Image from '@theme/IdealImage'; - -Get started with ClickHouse Cloud on the [AWS Marketplace](https://aws.amazon.com/marketplace) via a PAYG (Pay-as-you-go) Public Offer. - -## Prerequisites {#prerequisites} - -- 購入権限が付与されたAWSアカウントが必要です。 -- 購入するには、このアカウントでAWSマーケットプレイスにログインしている必要があります。 - -## Steps to sign up {#steps-to-sign-up} - -1. [AWS Marketplace](https://aws.amazon.com/marketplace) に移動し、ClickHouse Cloudを検索します。 - -
- -AWS Marketplace home page - -
- -2. [リスティング](https://aws.amazon.com/marketplace/pp/prodview-jettukeanwrfc)をクリックし、次に**購入オプションを見る**をクリックします。 - -
- -AWS Marketplace search for ClickHouse - -
- -3. 次の画面で契約を構成します: -- **契約期間** - PAYG契約は月単位で行われます。 -- **更新設定** - 契約を自動更新するかどうか設定できます。 -自動更新を有効にしない場合、請求サイクルの終了時に組織は自動的に猶予期間に入ります。 - -- **契約オプション** - このテキストボックスには任意の数字(または1)を入力できます。これは、公共のオファーの単価が$0であるため、支払う価格には影響しません。これらの単位は通常、ClickHouse Cloudからのプライベートオファーを受け入れる際に使用されます。 - -- **発注書** - これはオプションであり、無視して構いません。 - -
- -AWS Marketplace configure contract - -
- -上記の情報を入力したら、**契約を作成**をクリックします。表示された契約価格がゼロドルであることを確認でき、これは実質的に支払いがなく、使用に基づいて請求されることを意味します。 - -
- -AWS Marketplace confirm contract - -
- -4. **契約を作成**をクリックすると、確認と支払い($0が未払い)を行うためのモーダルが表示されます。 - -5. **今すぐ支払う**をクリックすると、AWSマーケットプレイスのClickHouse Cloudオファーに購読したことを確認するメッセージが表示されます。 - -
- -AWS Marketplace payment confirmation - -
- -6. この時点では、セットアップはまだ完了していないことに注意してください。**アカウントを設定する**をクリックしてClickHouse Cloudにリダイレクトし、ClickHouse Cloudにサインアップする必要があります。 - -7. ClickHouse Cloudにリダイレクトされたら、既存のアカウントでログインするか、新しいアカウントで登録できます。このステップは非常に重要で、あなたのClickHouse Cloud組織をAWSマーケットプレイスの請求に結びつけるために必要です。 - -
- -ClickHouse Cloud sign in page - -
- -新しいClickHouse Cloudユーザーの場合は、ページの下部にある**登録**をクリックします。新しいユーザーを作成し、メールを確認するように求められます。メールを確認した後、ClickHouse Cloudのログインページを離れ、新しいユーザー名を使って[https://console.clickhouse.cloud](https://console.clickhouse.cloud)にログインできます。 - -
- -ClickHouse Cloud sign up page - -
- -新しいユーザーの場合、ビジネスに関する基本情報も提供する必要があることに注意してください。以下のスクリーンショットを参照してください。 - -
- -ClickHouse Cloud sign up info form - -
- -
- -ClickHouse Cloud sign up info form 2 - -
- -既存のClickHouse Cloudユーザーの場合は、単に資格情報を使ってログインしてください。 - -8. ログインが成功すると、新しいClickHouse Cloud組織が作成されます。この組織はあなたのAWS請求アカウントに接続され、すべての使用はあなたのAWSアカウントを通じて請求されます。 - -9. ログインすると、請求が実際にAWSマーケットプレイスに結びついていることを確認でき、ClickHouse Cloudリソースの設定を開始できます。 - -
- -ClickHouse Cloud view AWS Marketplace billing - -
- -ClickHouse Cloud new services page - -
- -10. サインアップ確認のメールが届くはずです: - -
- -AWS Marketplace confirmation email - -
- -問題が発生した場合は、[サポートチーム](https://clickhouse.com/support/program)にお気軽にお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md.hash deleted file mode 100644 index 7d890aba13d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/aws-marketplace-payg.md.hash +++ /dev/null @@ -1 +0,0 @@ -772895e4efa284cf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md deleted file mode 100644 index 4c9f9b8fe6f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/azure-marketplace-committed-contract' -title: 'Azure Marketplace Committed Contract' -description: 'Azure Marketplace (Committed Contract) を通じて ClickHouse Cloud に登録する' -keywords: -- 'Microsoft' -- 'Azure' -- 'marketplace' -- 'billing' -- 'committed' -- 'committed contract' ---- - -import Image from '@theme/IdealImage'; -import azure_marketplace_committed_1 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-1.png'; -import azure_marketplace_committed_2 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-2.png'; -import azure_marketplace_committed_3 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-3.png'; -import azure_marketplace_committed_4 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-4.png'; -import azure_marketplace_committed_5 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-5.png'; -import azure_marketplace_committed_6 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-6.png'; -import azure_marketplace_committed_7 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-7.png'; -import azure_marketplace_committed_8 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-8.png'; -import azure_marketplace_committed_9 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-committed-9.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import azure_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-11.png'; -import azure_marketplace_payg_12 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-12.png'; - -Get started with ClickHouse Cloud on the [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps) via a committed contract. A committed contract, also known as a a Private Offer, allows customers to commit to spending a certain amount on ClickHouse Cloud over a period of time. - -## Prerequisites {#prerequisites} - -- ClickHouseからの特定の契約条件に基づくプライベートオファー。 - -## Steps to sign up {#steps-to-sign-up} - -1. プライベートオファーをレビューして受け入れるためのリンクを含むメールを受け取っているはずです。 - -
- -Azure Marketplace private offer email - -
- -2. メール内の**Review Private Offer**リンクをクリックします。これにより、プライベートオファーの詳細を含むGCP Marketplaceページに移動します。 - -
- -Azure Marketplace private offer details - -
- -3. オファーを受け入れると、**Private Offer Management**画面に移動します。Azureが購入用にオファーを準備するまでに少し時間がかかる場合があります。 - -
- -Azure Marketplace Private Offer Management page - -
- -Azure Marketplace Private Offer Management page loading - -
- -4. 数分後、ページをリフレッシュします。オファーは**Purchase**のために準備完了しているはずです。 - -
- -Azure Marketplace Private Offer Management page purchase enabled - -
- -5. **Purchase**をクリックします - フライアウトが開きます。以下を完了します: - -
- -- サブスクリプションとリソースグループ -- SaaSサブスクリプションの名前を提供します -- プライベートオファーのための請求プランを選択します。プライベートオファーが作成された期間(例えば、1年)のみ金額が表示されます。他の請求期間オプションは$0の金額となります。 -- 定期請求を希望するかどうかを選択します。定期請求が選択されていない場合、契約は請求期間の終了時に終了し、リソースは廃止されます。 -- **Review + subscribe**をクリックします。 - -
- -Azure Marketplace subscription form - -
- -6. 次の画面で、すべての詳細を確認し、**Subscribe**をクリックします。 - -
- -Azure Marketplace subscription confirmation - -
- -7. 次の画面には、**Your SaaS subscription in progress**が表示されます。 - -
- -Azure Marketplace subscription submitting page - -
- -8. 準備ができたら、**Configure account now**をクリックします。このステップは、AzureサブスクリプションをClickHouse Cloudの組織にバインドする重要なステップです。このステップなしでは、あなたのMarketplaceサブスクリプションは完了しません。 - -
- -Azure Marketplace configure account now button - -
- -9. ClickHouse Cloudのサインアップまたはサインインページにリダイレクトされます。新しいアカウントを使用してサインアップするか、既存のアカウントでサインインできます。サインインすると、新しい組織が作成され、Azure Marketplaceを通じて使用され、請求される準備が整います。 - -10. 進む前に、いくつかの質問 - 住所と会社の詳細 - に答える必要があります。 - -
- -ClickHouse Cloud sign up info form - -
- -ClickHouse Cloud sign up info form 2 - -
- -11. **Complete sign up**をクリックすると、ClickHouse Cloud内の組織に移動し、Azure Marketplaceを通じて請求されることを確認するための請求画面を見ることができます。 - -
- -
- -ClickHouse Cloud sign up info form - -
- -
- -ClickHouse Cloud sign up info form - -
- -問題が発生した場合は、[サポートチームに連絡する](https://clickhouse.com/support/program)ことをためらわないでください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md.hash deleted file mode 100644 index 75de449c4cd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-committed.md.hash +++ /dev/null @@ -1 +0,0 @@ -1911c3fe90af480b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md deleted file mode 100644 index a4c618491f1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/azure-marketplace-payg' -title: 'Azure Marketplace PAYG' -description: 'Subscribe to ClickHouse Cloud through the Azure Marketplace (PAYG).' -keywords: -- 'azure' -- 'marketplace' -- 'billing' -- 'PAYG' ---- - -import Image from '@theme/IdealImage'; -import azure_marketplace_payg_1 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-1.png'; -import azure_marketplace_payg_2 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-2.png'; -import azure_marketplace_payg_3 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-3.png'; -import azure_marketplace_payg_4 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-4.png'; -import azure_marketplace_payg_5 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-5.png'; -import azure_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-6.png'; -import azure_marketplace_payg_7 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-7.png'; -import azure_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-8.png'; -import azure_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-9.png'; -import azure_marketplace_payg_10 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-10.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import azure_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-11.png'; -import azure_marketplace_payg_12 from '@site/static/images/cloud/manage/billing/marketplace/azure-marketplace-payg-12.png'; - -ClickHouse Cloudを[Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps)でPAYG(従量課金)パブリックオファーを通じて始めましょう。 - -## 前提条件 {#prerequisites} - -- 購入権限を持つ請求管理者によって有効化されたAzureプロジェクト。 -- Azure MarketplaceでClickHouse Cloudに登録するには、購入権限を持つアカウントでログインし、適切なプロジェクトを選択する必要があります。 - -1. [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps)にアクセスし、ClickHouse Cloudを検索します。市場でオファーを購入できるように、ログインしていることを確認してください。 - -
- -ClickHouse Cloud sign up info form - -
- -2. 商品リストページで、**Get It Now**をクリックします。 - -
- -ClickHouse Cloud sign up info form - -
- -3. 次の画面で、名前、メール、および所在地情報を提供する必要があります。 - -
- -ClickHouse Cloud sign up info form - -
- -4. 次の画面で、**Subscribe**をクリックします。 - -
- -ClickHouse Cloud sign up info form - -
- -5. 次の画面で、サブスクリプション、リソースグループ、およびリソースグループの位置を選択します。リソースグループの位置は、ClickHouse Cloudでサービスを起動する予定の位置と同じである必要はありません。 - -
- -ClickHouse Cloud sign up info form - -
- -6. サブスクリプションの名前を提供する必要があり、利用可能なオプションから請求条件を選択する必要があります。「**Recurring billing**」をオンまたはオフに設定することができます。"オフ"に設定すると、請求期間が終了した後に契約が終了し、リソースは廃止されます。 - -
- -ClickHouse Cloud sign up info form - -
- -7. **"Review + subscribe"**をクリックします。 - -8. 次の画面で、すべてが正しいことを確認し、**Subscribe**をクリックします。 - -
- -ClickHouse Cloud sign up info form - -
- -9. この時点で、ClickHouse CloudのAzureサブスクリプションに登録されていますが、まだClickHouse Cloudでアカウントを設定していません。次のステップは、請求がAzure Marketplaceを通じて正しく行われるために、ClickHouse CloudがあなたのAzureサブスクリプションにバインドできるようにするために必要不可欠です。 - -
- -ClickHouse Cloud sign up info form - -
- -10. Azureのセットアップが完了すると、**Configure account now**ボタンがアクティブになります。 - -
- -ClickHouse Cloud sign up info form - -
- -11. **Configure account now**をクリックします。 - -
- -以下のようなメールが届き、アカウントの構成に関する詳細が記載されています: - -
- -ClickHouse Cloud sign up info form - -
- -12. ClickHouse Cloudのサインアップまたはサインインページにリダイレクトされます。新しいアカウントを使用してサインアップするか、既存のアカウントを使用してサインインできます。サインインすると、新しい組織が作成され、Azure Marketplaceを通じて使用および請求される準備が整います。 - -13. 先に進む前に、いくつかの質問 - 住所や会社の詳細 - に回答する必要があります。 - -
- -ClickHouse Cloud sign up info form - -
- -ClickHouse Cloud sign up info form 2 - -
- -14. **Complete sign up**をクリックすると、ClickHouse Cloud内の組織に移動され、請求画面を確認してAzure Marketplaceを通じて請求されていることを確認し、サービスを作成できるようになります。 - -
- -
- -ClickHouse Cloud sign up info form - -
- -
- -ClickHouse Cloud sign up info form - -
- -15. 問題が発生した場合は、[サポートチームに連絡する](https://clickhouse.com/support/program)ことをためらわないでください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md.hash deleted file mode 100644 index 230cd13426f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/azure-marketplace-payg.md.hash +++ /dev/null @@ -1 +0,0 @@ -54eeaae68b329348 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md deleted file mode 100644 index 278d67b2d9b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/gcp-marketplace-committed-contract' -title: 'GCP Marketplace Committed Contract' -description: 'Subscribe to ClickHouse Cloud through the GCP Marketplace (Committed - Contract)' -keywords: -- 'gcp' -- 'google' -- 'marketplace' -- 'billing' -- 'committed' -- 'committed contract' ---- - -import Image from '@theme/IdealImage'; -import gcp_marketplace_committed_1 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-1.png'; -import gcp_marketplace_committed_2 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-2.png'; -import gcp_marketplace_committed_3 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-3.png'; -import gcp_marketplace_committed_4 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-4.png'; -import gcp_marketplace_committed_5 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-5.png'; -import gcp_marketplace_committed_6 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-6.png'; -import gcp_marketplace_committed_7 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-committed-7.png'; -import aws_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-6.png'; -import aws_marketplace_payg_7 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-7.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import gcp_marketplace_payg_5 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-5.png'; -import aws_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-11.png'; -import gcp_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-6.png'; - -ClickHouse Cloud を [GCP Marketplace](https://console.cloud.google.com/marketplace) で利用開始するには、コミット契約を通じて行います。コミット契約は、プライベートオファーとも呼ばれ、顧客が一定の金額を ClickHouse Cloud に対して一定期間内に支払うことを約束するものです。 - -## 前提条件 {#prerequisites} - -- 特定の契約条件に基づく ClickHouse からのプライベートオファー。 - -## サインアップ手順 {#steps-to-sign-up} - -1. プライベートオファーを確認し、受け入れるためのリンクを含むメールを受け取っているはずです。 - -
- -GCP Marketplace プライベートオファーのメール - -
- -2. メール内の **Review Offer** リンクをクリックします。これにより、プライベートオファーの詳細が表示された GCP Marketplace ページに移動します。 - -
- -GCP Marketplace オファーの概要 - -
- -GCP Marketplace 価格の概要 - -
- -3. プライベートオファーの詳細を確認し、すべてが正しい場合は **Accept** をクリックします。 - -
- -GCP Marketplace 受諾ページ - -
- -4. **Go to product page** をクリックします。 - -
- -GCP Marketplace 受諾確認 - -
- -5. **Manage on provider** をクリックします。 - -
- -GCP Marketplace ClickHouse Cloud ページ - -
- -この時点で ClickHouse Cloud にリダイレクトし、サインアップまたはサインインを行うことが重要です。このステップを完了しないと、GCP Marketplace のサブスクリプションを ClickHouse Cloud にリンクすることができません。 - -
- -GCP Marketplace ウェブサイト離脱確認モーダル - -
- -6. ClickHouse Cloud にリダイレクトされたら、既存のアカウントでログインするか、新しいアカウントを登録できます。 - -
- -ClickHouse Cloud サインインページ - -
- -新しい ClickHouse Cloud ユーザーの場合は、ページの下部にある **Register** をクリックします。新しいユーザーを作成し、メールを確認するよう促されます。メールの確認後、ClickHouse Cloud のログインページを離れ、新しいユーザー名を使用して [https://console.clickhouse.cloud](https://console.clickhouse.cloud) にログインできます。 - -
- -ClickHouse Cloud サインアップページ - -
- -新しいユーザーの場合、ビジネスに関する基本情報を提供する必要があることに注意してください。以下のスクリーンショットを参照してください。 - -
- -ClickHouse Cloud サインアップ情報フォーム - -
- -ClickHouse Cloud サインアップ情報フォーム 2 - -
- -既存の ClickHouse Cloud ユーザーの場合は、資格情報を使用してログインしてください。 - -7. ログインに成功すると、新しい ClickHouse Cloud 組織が作成されます。この組織は、あなたの GCP 請求アカウントに接続され、すべての使用量があなたの GCP アカウントを通じて請求されます。 - -8. ログイン後、あなたの請求が実際に GCP Marketplace に紐付いていることを確認し、ClickHouse Cloud リソースの設定を開始できます。 - -
- -ClickHouse Cloud サインインページ - -
- -ClickHouse Cloud 新サービスページ - -
- -9. サインアップ確認のメールを受け取るはずです: - -
-
- -GCP Marketplace 確認メール - -
- -
- -問題が発生した場合は、[サポートチーム](https://clickhouse.com/support/program) にご連絡ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md.hash deleted file mode 100644 index c265642e980..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-committed.md.hash +++ /dev/null @@ -1 +0,0 @@ -cd061fff5e155c0a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md deleted file mode 100644 index 79cc060a329..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -slug: '/cloud/billing/marketplace/gcp-marketplace-payg' -title: 'GCP Marketplace PAYG' -description: 'Subscribe to ClickHouse Cloud through the GCP Marketplace (PAYG).' -keywords: -- 'gcp' -- 'marketplace' -- 'billing' -- 'PAYG' ---- - -import gcp_marketplace_payg_1 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-1.png'; -import gcp_marketplace_payg_2 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-2.png'; -import gcp_marketplace_payg_3 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-3.png'; -import gcp_marketplace_payg_4 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-4.png'; -import aws_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-6.png'; -import aws_marketplace_payg_7 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-7.png'; -import aws_marketplace_payg_8 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-8.png'; -import aws_marketplace_payg_9 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-9.png'; -import gcp_marketplace_payg_5 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-5.png'; -import aws_marketplace_payg_11 from '@site/static/images/cloud/manage/billing/marketplace/aws-marketplace-payg-11.png'; -import gcp_marketplace_payg_6 from '@site/static/images/cloud/manage/billing/marketplace/gcp-marketplace-payg-6.png'; -import Image from '@theme/IdealImage'; - -ClickHouse Cloudを[GCP Marketplace](https://console.cloud.google.com/marketplace)でPAYG(従量課金)公共オファーを通じて始めましょう。 - -## 必要条件 {#prerequisites} - -- 請求管理者によって購入権が有効化されているGCPプロジェクト。 -- GCP MarketplaceでClickHouse Cloudをサブスクライブするには、購入権を持つアカウントでログインし、適切なプロジェクトを選択する必要があります。 - -## サインアップの手順 {#steps-to-sign-up} - -1. [GCP Marketplace](https://cloud.google.com/marketplace)に行き、ClickHouse Cloudを検索します。正しいプロジェクトが選択されていることを確認してください。 - -GCP Marketplaceのホームページ - -2. [リスティング](https://console.cloud.google.com/marketplace/product/clickhouse-public/clickhouse-cloud)をクリックし、次に**Subscribe**をクリックします。 - -GCP MarketplaceのClickHouse Cloud - -3. 次の画面で、サブスクリプションを設定します: - -- プランはデフォルトで「ClickHouse Cloud」になります。 -- サブスクリプションの期間は「毎月」です。 -- 適切な請求アカウントを選択します。 -- 利用規約に同意し、**Subscribe**をクリックします。 - -
- -GCP Marketplaceでのサブスクリプション設定 - -
- -4. **Subscribe**をクリックすると、**Sign up with ClickHouse**のモーダルが表示されます。 - -
- -GCP Marketplaceのサインアップモーダル - -
- -5. この時点では、セットアップはまだ完了していないことに注意してください。**Set up your account**をクリックしてClickHouse Cloudにリダイレクトし、ClickHouse Cloudにサインアップする必要があります。 - -6. ClickHouse Cloudにリダイレクトされたら、既存のアカウントでログインするか、新しいアカウントを登録できます。このステップは非常に重要で、ClickHouse Cloudの組織をGCP Marketplaceの請求に結び付けることができます。 - -
- -ClickHouse Cloudのサインインページ - -
- -新しいClickHouse Cloudユーザーの場合は、ページの下部にある**Register**をクリックします。新しいユーザーを作成し、メールを確認するように求められます。メールを確認した後、ClickHouse Cloudのログインページを離れて、[https://console.clickhouse.cloud](https://console.clickhouse.cloud)で新しいユーザー名を使用してログインできます。 - -
- -ClickHouse Cloudのサインアップページ - -
- -新しいユーザーの場合、ビジネスに関する基本情報を提供する必要があることに注意してください。以下のスクリーンショットを参照してください。 - -
- -ClickHouse Cloudサインアップ情報フォーム - -
- -ClickHouse Cloudサインアップ情報フォーム2 - -
- -既存のClickHouse Cloudユーザーの場合は、単に資格情報を使用してログインします。 - -7. ログインが成功すると、新しいClickHouse Cloud組織が作成されます。この組織はあなたのGCP請求アカウントに接続され、すべての利用がGCPアカウントを通じて請求されます。 - -8. ログイン後、請求が実際にGCP Marketplaceに結び付けられていることを確認し、ClickHouse Cloudリソースの設定を開始できます。 - -
- -ClickHouse Cloudのサインインページ - -
- -ClickHouse Cloudの新しいサービスページ - -
- -9. サインアップを確認するメールを受け取るべきです: - -
-
- -GCP Marketplace確認メール - -
- -
- -問題が発生した場合は、[サポートチーム](https://clickhouse.com/support/program)にお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md.hash deleted file mode 100644 index 86ac0d2d692..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/gcp-marketplace-payg.md.hash +++ /dev/null @@ -1 +0,0 @@ -5a4b47d7cbaa8a2e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md deleted file mode 100644 index 09f1cffc474..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -slug: '/cloud/manage/marketplace/' -title: 'マーケットプレイス' -description: 'マーケットプレイスの目次ページ' -keywords: -- 'Marketplace Billing' -- 'AWS' -- 'GCP' ---- - -このセクションでは、マーケットプレイスに関連する請求トピックについて詳しく説明します。 - -| ページ | 説明 | -|---------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [マーケットプレイスの請求](/cloud/marketplace/marketplace-billing) | マーケットプレイスの請求に関するFAQ。 | -| [AWSマーケットプレイス PAYG](/cloud/billing/marketplace/aws-marketplace-payg) | PAYG(従量課金制)のパブリックオファーを通じてAWSマーケットプレイスでClickHouse Cloudの使用を開始します。 | -| [AWSマーケットプレイスのコミット契約](/cloud/billing/marketplace/aws-marketplace-committed-contract) | コミット契約を通じてAWSマーケットプレイスでClickHouse Cloudの使用を開始します。コミット契約は、プライベートオファーとも呼ばれ、顧客が特定の期間にわたってClickHouse Cloudに一定の金額を支出することを約束するものです。 | -| [GCPマーケットプレイス PAYG](/cloud/billing/marketplace/gcp-marketplace-payg) | PAYG(従量課金制)のパブリックオファーを通じてGCPマーケットプレイスでClickHouse Cloudの使用を開始します。 | -| [GCPマーケットプレイスのコミット契約](/cloud/billing/marketplace/gcp-marketplace-committed-contract) | コミット契約を通じてGCPマーケットプレイスでClickHouse Cloudの使用を開始します。コミット契約は、プライベートオファーとも呼ばれ、顧客が特定の期間にわたってClickHouse Cloudに一定の金額を支出することを約束するものです。 | -| [Azureマーケットプレイス PAYG](/cloud/billing/marketplace/azure-marketplace-payg) | PAYG(従量課金制)のパブリックオファーを通じてAzureマーケットプレイスでClickHouse Cloudの使用を開始します。 | -| [Azureマーケットプレイスのコミット契約](/cloud/billing/marketplace/azure-marketplace-committed-contract) | コミット契約を通じてAzureマーケットプレイスでClickHouse Cloudの使用を開始します。コミット契約は、プライベートオファーとも呼ばれ、顧客が特定の期間にわたってClickHouse Cloudに一定の金額を支出することを約束するものです。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md.hash deleted file mode 100644 index 8f65372a8b0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -dd80e7eb1ab38733 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md deleted file mode 100644 index 6ce4ff2fd1a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -slug: '/cloud/marketplace/marketplace-billing' -title: 'Marketplace Billing' -description: 'Subscribe to ClickHouse Cloud through the AWS, GCP, and Azure marketplace.' -keywords: -- 'aws' -- 'azure' -- 'gcp' -- 'google cloud' -- 'marketplace' -- 'billing' ---- - -import Image from '@theme/IdealImage'; -import marketplace_signup_and_org_linking from '@site/static/images/cloud/manage/billing/marketplace/marketplace_signup_and_org_linking.png' - -You can subscribe to ClickHouse Cloud through the AWS, GCP, and Azure marketplaces. This allows you to pay for ClickHouse Cloud through your existing cloud provider billing. - -You can either use pay-as-you-go (PAYG) or commit to a contract with ClickHouse Cloud through the marketplace. The billing will be handled by the cloud provider, and you will receive a single invoice for all your cloud services. - -- [AWS Marketplace PAYG](/cloud/billing/marketplace/aws-marketplace-payg) -- [AWS Marketplace Committed Contract](/cloud/billing/marketplace/aws-marketplace-committed-contract) -- [GCP Marketplace PAYG](/cloud/billing/marketplace/gcp-marketplace-payg) -- [GCP Marketplace Committed Contract](/cloud/billing/marketplace/gcp-marketplace-committed-contract) -- [Azure Marketplace PAYG](/cloud/billing/marketplace/azure-marketplace-payg) -- [Azure Marketplace Committed Contract](/cloud/billing/marketplace/azure-marketplace-committed-contract) - -## FAQs {#faqs} - -### How can I verify that my organization is connected to marketplace billing?​ {#how-can-i-verify-that-my-organization-is-connected-to-marketplace-billing} - -In the ClickHouse Cloud console, navigate to **Billing**. You should see the name of the marketplace and the link in the **Payment details** section. - -### I am an existing ClickHouse Cloud user. What happens when I subscribe to ClickHouse Cloud via AWS / GCP / Azure marketplace?​ {#i-am-an-existing-clickhouse-cloud-user-what-happens-when-i-subscribe-to-clickhouse-cloud-via-aws--gcp--azure-marketplace} - -Signing up for ClickHouse Cloud from the cloud provider marketplace is a two step process: -1. You first "subscribe" to ClickHouse Cloud on the cloud providers' marketplace portal. After you have finished subscribing, you click on "Pay Now" or "Manage on Provider" (depending on the marketplace). This redirects you to ClickHouse Cloud. -2. On Clickhouse Cloud you either register for a new account, or sign in with an existing account. Either way, a new ClickHouse Cloud organization will be created for you which is tied to your marketplace billing. - -NOTE: Your existing services and organizations from any prior ClickHouse Cloud signups will remain and they will not be connected to the marketplace billing. ClickHouse Cloud allows you to use the same account to manage multiple organization, each with different billing. - -You can switch between organizations from the bottom left menu of the ClickHouse Cloud console. - -### I am an existing ClickHouse Cloud user. What should I do if I want my existing services to be billed via marketplace?​ {#i-am-an-existing-clickhouse-cloud-user-what-should-i-do-if-i-want-my-existing-services-to-be-billed-via-marketplace} - -You will need to subscribe to ClickHouse Cloud via the cloud provider marketplace. Once you finish subscribing on the marketplace, and redirect to ClickHouse Cloud you will have the option of linking an existing ClickHouse Cloud organization to marketplace billing. From that point on, your existing resources will now get billed via the marketplace. - -Marketplace signup and org linking - -You can confirm from the organization's billing page that billing is indeed now linked to the marketplace. Please contact [ClickHouse Cloud support](https://clickhouse.com/support/program) if you run into any issues. - -:::note -Your existing services and organizations from any prior ClickHouse Cloud signups will remain and not be connected to the marketplace billing. -::: - -### I subscribed to ClickHouse Cloud as a marketplace user. How can I unsubscribe?​ {#i-subscribed-to-clickhouse-cloud-as-a-marketplace-user-how-can-i-unsubscribe} - -Note that you can simply stop using ClickHouse Cloud and delete all existing ClickHouse Cloud services. Even though the subscription will still be active, you will not be paying anything as ClickHouse Cloud doesn't have any recurring fees. - -If you want to unsubscribe, please navigate to the Cloud Provider console and cancel the subscription renewal there. Once the subscription ends, all existing services will be stopped and you will be prompted to add a credit card. If no card was added, after two weeks all existing services will be deleted. - -### I subscribed to ClickHouse Cloud as a marketplace user, and then unsubscribed. Now I want to subscribe back, what is the process?​ {#i-subscribed-to-clickhouse-cloud-as-a-marketplace-user-and-then-unsubscribed-now-i-want-to-subscribe-back-what-is-the-process} - -In that case please subscribe to the ClickHouse Cloud as usual (see sections on subscribing to ClickHouse Cloud via the marketplace). - -- For AWS marketplace a new ClickHouse Cloud organization will be created and connected to the marketplace. -- For the GCP marketplace your old organization will be reactivated. - -If you have any trouble with reactivating your marketplace org, please contact [ClickHouse Cloud Support](https://clickhouse.com/support/program). - -### How do I access my invoice for my marketplace subscription to the ClickHouse Cloud service?​ {#how-do-i-access-my-invoice-for-my-marketplace-subscription-to-the-clickhouse-cloud-service} - -- [AWS billing Console](https://us-east-1.console.aws.amazon.com/billing/home) -- [GCP Marketplace orders](https://console.cloud.google.com/marketplace/orders) (select the billing account that you used for subscription) - -### Why do the dates on the Usage statements not match my Marketplace Invoice?​ {#why-do-the-dates-on-the-usage-statements-not-match-my-marketplace-invoice} - -Marketplace billing follows the calendar month cycle. For example, for usage between December 1st and January 1st, an invoice will be generated between January 3rd and January 5th. - -ClickHouse Cloud usage statements follow a different billing cycle where usage is metered and reported over 30 days starting from the day of sign up. - -The usage and invoice dates will differ if these dates are not the same. Since usage statements track usage by day for a given service, users can rely on statements to see the breakdown of costs. - -### Where can I find general billing information​? {#where-can-i-find-general-billing-information} - -Please see the [Billing overview page](/cloud/manage/billing). - -### Is there a difference in ClickHouse Cloud pricing, whether paying through the cloud provider marketplace or directly to ClickHouse? {#is-there-a-difference-in-clickhouse-cloud-pricing-whether-paying-through-the-cloud-provider-marketplace-or-directly-to-clickhouse} - -There is no difference in pricing between marketplace billing and signing up directly with ClickHouse. In either case, your usage of ClickHouse Cloud is tracked in terms of ClickHouse Cloud Credits (CHCs), which are metered in the same way and billed accordingly. - -### Can I set up multiple ClickHouse Organizations to bill to a single cloud marketplace billing account or sub account (AWS, GCP, or Azure)? {#multiple-organizations-to-bill-to-single-cloud-marketplace-account} - -A single ClickHouse organization can only be configured to bill to a single Cloud marketplace billing account or sub account. - -### If my ClickHouse Organization is billed through a cloud marketplace committed spend agreement will I automatically move to PAYG billing when I run out of credits? {#automatically-move-to-PAYG-when-running-out-of-credit} - -If your marketplace committed spend contract is active and you run out of credits we will automatically move your organization to PAYG billing. However, when your existing contract expires, you will need to link a new marketplace contract to your organization or move your organization to direct billing via credit card. diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md.hash deleted file mode 100644 index d796d9d41a3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/marketplace/overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -00f0892aa3b90de5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md deleted file mode 100644 index 20534b6f3c5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -sidebar_label: 'Payment Thresholds' -slug: '/cloud/billing/payment-thresholds' -title: 'Payment Thresholds' -description: 'Payment thresholds and automatic invoicing for ClickHouse Cloud.' -keywords: -- 'billing' -- 'payment thresholds' -- 'automatic invoicing' -- 'invoice' ---- - - - - -# 支払いの閾値 - -ClickHouse Cloud の請求期間において、未払い額が $10,000 USD、またはその相当額に達すると、お客様の支払い方法が自動的に請求されます。請求が失敗した場合、猶予期間の後にサービスが一時停止または終了されることになります。 - -:::note -この支払いの閾値は、ClickHouse と契約した支出契約や他の交渉された契約を持つ顧客には適用されません。 -::: - -お客様の組織が支払いの閾値の90%に達し、期間の途中で支払いの閾値を超える見込みがある場合、組織に関連付けられた請求メールに通知が送信されます。支払いの閾値を超えた際には、通知のメールと請求書も送信されます。 - -これらの支払いの閾値は、顧客からのリクエストや ClickHouse の財務チームによって $10,000 未満に調整可能です。ご質問がある場合は、support@clickhouse.com にお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md.hash deleted file mode 100644 index f579328e279..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/billing/payment-thresholds.md.hash +++ /dev/null @@ -1 +0,0 @@ -f38b52780b999ee1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md deleted file mode 100644 index d64391600e7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -sidebar_label: 'ClickHouse Cloud Tiers' -slug: '/cloud/manage/cloud-tiers' -title: 'ClickHouse Cloud Tiers' -description: 'Cloud tiers available in ClickHouse Cloud' ---- - - - - -# ClickHouse Cloud Tiers - -ClickHouse Cloudには、いくつかのティアが用意されています。 -ティアは、任意の組織レベルで割り当てられます。したがって、組織内のサービスは同じティアに属します。 -このページでは、特定のユースケースに適したティアについて説明します。 - -**クラウドティアの概要:** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
[Basic](#basic)[Scale (推奨)](#scale)[Enterprise](#enterprise)
**サービス機能**
サービスの数✓ 無制限✓ 無制限✓ 無制限
ストレージ✓ 最大 1 TB / サービス✓ 無制限✓ 無制限
メモリ✓ 合計メモリ 8-12 GiB✓ 設定可能✓ 設定可能
可用性✓ 1 ゾーン✓ 2 つ以上のゾーン✓ 2 つ以上のゾーン
バックアップ✓ 24時間ごとに1回のバックアップ、1日保存✓ 設定可能✓ 設定可能
垂直スケーリング✓ 自動スケーリング✓ 標準プロファイルの自動、カスタムプロファイルの手動
横方向スケーリング✓ 手動スケーリング✓ 手動スケーリング
ClickPipes
早期アップグレード
コンピュートの分離
バックアップを自分のクラウドアカウントにエクスポート
スケジュールアップグレード
カスタムハードウェアプロファイル
**セキュリティ**
SAML/SSO
MFA
SOC 2 Type II
ISO 27001
プライベートネットワーク
S3ロールベースのアクセス
透過的データ暗号化 (CMEK for TDE)
HIPAA
- -## Basic {#basic} - -- 単一レプリカデプロイメントをサポートするコスト効率の高いオプションです。 -- 確固たる信頼性保証が必要ない小規模なデータボリュームの部門やユースケースに最適です。 - -:::note -Basicティアのサービスは、サイズが固定されていることを意図しており、自動および手動のスケーリングは許可されていません。 -ユーザーは、サービスをスケールするためにScaleまたはEnterpriseティアにアップグレードできます。 -::: - -## Scale {#scale} - -強化されたSLA(2つ以上のレプリカデプロイメント)、スケーラビリティ、および高度なセキュリティを必要とするワークロード向けに設計されています。 - -- 次のような機能のサポートを提供します: - - [プライベートネットワーキングのサポート](../security/private-link-overview.md). - - [コンピュートの分離](../reference/warehouses#what-is-compute-compute-separation). - - [柔軟なスケーリング](../manage/scaling.md) オプション(スケールアップ/ダウン、イン/アウト)。 - -## Enterprise {#enterprise} - -厳格なセキュリティおよびコンプライアンス要件を持つ大規模なミッションクリティカルなデプロイに対応します。 - -- Scaleのすべて、**さらに** -- 柔軟なスケーリング: 標準プロファイル(`1:4 vCPU:メモリ比`)、および`HighMemory (1:8比)`や`HighCPU (1:2比)`のカスタムプロファイル。 -- 最高レベルのパフォーマンスと信頼性の保証を提供します。 -- エンタープライズグレードのセキュリティをサポートします: - - シングルサインオン(SSO) - - 強化された暗号化: AWSおよびGCPサービスに対して。サービスはデフォルトで私たちのキーによって暗号化され、顧客管理暗号化キー(CMEK)を有効にするためにキーを回転させることができます。 -- スケジュールアップグレードを許可: ユーザーは、データベースおよびクラウドリリースのアップグレードのための週の曜日/時間ウィンドウを選択できます。 -- [HIPAA](../security/compliance-overview.md/#hipaa-since-2024) 遵守を提供します。 -- バックアップをユーザーのアカウントにエクスポートします。 - -:::note -3つのティアすべてにおける単一レプリカのサービスサイズは固定されることを意図しています(`8 GiB`、`12 GiB`)。 -::: - -## 別のティアへのアップグレード {#upgrading-to-a-different-tier} - -いつでもBasicからScale、またはScaleからEnterpriseにアップグレードできます。 - -:::note -ティアのダウングレードは不可能です。 -::: - ---- - -サービスの種類について質問がある場合は、[価格ページ](https://clickhouse.com/pricing)を参照するか、support@clickhouse.comにお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md.hash deleted file mode 100644 index 2ab9e9daa0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/cloud-tiers.md.hash +++ /dev/null @@ -1 +0,0 @@ -3fe46184738b67bf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md deleted file mode 100644 index 3af57e5ced6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -sidebar_label: 'ダッシュボード' -slug: '/cloud/manage/dashboards' -title: 'ダッシュボード' -description: 'SQLコンソールのダッシュボード機能を使用すると、保存されたクエリからの可視化情報を収集および共有できます。' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; -import Image from '@theme/IdealImage'; -import dashboards_2 from '@site/static/images/cloud/dashboards/2_dashboards.png'; -import dashboards_3 from '@site/static/images/cloud/dashboards/3_dashboards.png'; -import dashboards_4 from '@site/static/images/cloud/dashboards/4_dashboards.png'; -import dashboards_5 from '@site/static/images/cloud/dashboards/5_dashboards.png'; -import dashboards_6 from '@site/static/images/cloud/dashboards/6_dashboards.png'; -import dashboards_7 from '@site/static/images/cloud/dashboards/7_dashboards.png'; -import dashboards_8 from '@site/static/images/cloud/dashboards/8_dashboards.png'; -import dashboards_9 from '@site/static/images/cloud/dashboards/9_dashboards.png'; -import dashboards_10 from '@site/static/images/cloud/dashboards/10_dashboards.png'; -import dashboards_11 from '@site/static/images/cloud/dashboards/11_dashboards.png'; - - -# ダッシュボード - - - -SQLコンソールのダッシュボード機能を使うと、保存されたクエリから視覚化を収集して共有できます。まずはクエリを保存して視覚化し、ダッシュボードにクエリの視覚化を追加し、クエリパラメータを使ってダッシュボードをインタラクティブにするところから始めましょう。 - -## 基本概念 {#core-concepts} - -### クエリの共有 {#query-sharing} - -ダッシュボードを同僚と共有するには、基盤となる保存されたクエリも共有してください。視覚化を表示するには、ユーザーは少なくとも基盤となる保存されたクエリに対して読み取り専用アクセス権を持っている必要があります。 - -### インタラクティビティ {#interactivity} - -[クエリパラメータ](/sql-reference/syntax#defining-and-using-query-parameters) を使用してダッシュボードをインタラクティブにします。たとえば、`WHERE`句にクエリパラメータを追加してフィルターとして機能させることができます。 - -視覚化設定で「フィルター」タイプを選択することで、**Global**フィルターサイドペインからクエリパラメータ入力を切り替えることができます。また、ダッシュボード上の別のオブジェクト(テーブルなど)とリンクすることでクエリパラメータ入力を切り替えることもできます。以下のクイックスタートガイドの「[フィルターを設定する](/cloud/manage/dashboards#configure-a-filter)」セクションもご覧ください。 - -## クイックスタート {#quick-start} - -[query_log](/operations/system-tables/query_log) システムテーブルを使用して、ClickHouseサービスを監視するダッシュボードを作成しましょう。 - -## クイックスタート {#quick-start-1} - -### 保存されたクエリを作成する {#create-a-saved-query} - -視覚化するための保存されたクエリがすでにある場合は、このステップをスキップできます。 - -新しいクエリタブを開いて、ClickHouseのシステムテーブルを使用してサービスごとの日毎にクエリボリュームをカウントするクエリを記述しましょう: - -保存されたクエリを作成する - -クエリの結果はテーブル形式で表示することもでき、チャートビューから視覚化を構築し始めることもできます。次のステップでは、クエリを `queries over time` として保存します: - -クエリを保存 - -保存されたクエリに関する詳細なドキュメントは、[クエリを保存するセクション](/cloud/get-started/sql-console#saving-a-query)を参照してください。 - -別のクエリ `query count by query kind` を作成して、クエリの種類ごとのクエリ数をカウントすることもできます。以下は、SQLコンソールにおけるデータのバーグラフ視覚化です。 - -クエリ結果のバーグラフ視覚化 - -2つのクエリができたので、これらのクエリを視覚化し収集するダッシュボードを作成しましょう。 - -### ダッシュボードを作成する {#create-a-dashboard} - -ダッシュボードパネルに移動し、「新しいダッシュボード」をクリックします。名前を付けたら、最初のダッシュボードを正常に作成できました! - -新しいダッシュボードを作成 - -### 視覚化を追加する {#add-a-visualization} - -保存された2つのクエリ、`queries over time` と `query count by query kind` があります。最初のクエリを折れ線グラフとして視覚化してみましょう。視覚化にタイトルとサブタイトルを付け、視覚化するクエリを選択します。次に、「ライン」チャートタイプを選択し、x軸とy軸を割り当てます。 - -視覚化を追加 - -ここでは、数値フォーマット、凡例のレイアウト、および軸ラベルなど、さらにスタイルの変更を行うことができます。 - -次に、2つ目のクエリをテーブルとして視覚化し、折れ線グラフの下に配置しましょう。 - -クエリ結果をテーブルとして視覚化 - -2つの保存されたクエリを視覚化することにより、最初のダッシュボードを作成しました! - -### フィルターを設定する {#configure-a-filter} - -クエリの種類に基づくフィルターを追加して、Insertクエリに関連するトレンドのみを表示できるように、このダッシュボードをインタラクティブにしましょう。この作業は、[クエリパラメータ](/sql-reference/syntax#defining-and-using-query-parameters)を使用して実現します。 - -折れ線グラフの隣にある3つのドットをクリックし、クエリの横にあるペンのボタンをクリックしてインラインクエリエディタを開きます。ここで、ダッシュボードから直接基盤となる保存されたクエリを編集できます。 - -基盤となるクエリを編集 - -今、黄色の実行クエリボタンを押すと、先ほどのクエリがInsertクエリのみにフィルタリングされて表示されます。クエリを更新するために保存ボタンをクリックしてください。チャート設定に戻ると、折れ線グラフをフィルタリングできるようになります。 - -今、上部のリボンにあるGlobal Filtersを使用して、入力を変更することでフィルターを切り替えることができます。 - -グローバルフィルターを調整 - -折れ線グラフのフィルターをテーブルにリンクしたい場合は、視覚化設定に戻り、`query_kind`クエリパラメータの値ソースをテーブルに変更し、リンクするフィールドとして`query_kind`カラムを選択します。 - -クエリパラメータを変更 - -これで、クエリの種類テーブルから折れ線グラフのフィルターを直接制御できるようになり、ダッシュボードをインタラクティブにできます。 - -折れ線グラフのフィルターを制御 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md.hash deleted file mode 100644 index fc81eb3daa0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/dashboards.md.hash +++ /dev/null @@ -1 +0,0 @@ -91878617913c19ed diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md deleted file mode 100644 index d1f5e7f7701..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -slug: '/cloud/manage' -keywords: -- 'AWS' -- 'Cloud' -- 'serverless' -- 'management' -title: '概要' -hide_title: true -description: 'クラウドの管理ページの概要' ---- - - - - -# Managing Cloud - -このセクションでは、ClickHouseクラウドの管理に必要なすべての情報が提供されています。このセクションには以下のページが含まれています: - -| ページ | 説明 | -|-----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------| -| [ClickHouse Cloud Tiers](/cloud/manage/cloud-tiers) | 様々なクラウドティア、その特徴、および適切なものを選択するための考慮事項を説明します。 | -| [Integrations](/manage/integrations) | ClickHouse Cloudの組み込みインテグレーション、カスタムインテグレーション、およびサポートされていないインテグレーションに関する情報。 | -| [Backups](/cloud/manage/backups) | ClickHouse Cloudにおけるバックアップの動作、サービスのためにバックアップを構成するオプション、およびバックアップからの復元方法を説明します。| -| [Monitoring](/integrations/prometheus) | ClickHouseクラウドを監視する方法としてPrometheusを統合する方法。 | -| [Billing](/cloud/manage/billing/overview) | ClickHouse Cloudの価格モデル、コストに影響を与える要因を説明します。 | -| [Configuring Settings](/manage/settings) | ClickHouse Cloudの設定を構成する方法を説明します。 | -| [Replica-aware Routing](/manage/replica-aware-routing) | ClickHouse Cloudにおけるレプリカ認識ルーティングとは何か、その制限、及び構成方法を説明します。 | -| [Automatic Scaling](/manage/scaling) | ClickHouse Cloudサービスがリソースのニーズに応じて手動または自動でスケールアップ/ダウンする方法を説明します。 | -| [Service Uptime and SLA](/cloud/manage/service-uptime) | 本番インスタンスに提供されるサービスの稼働時間とサービスレベル契約に関する情報。 | -| [Notifications](/cloud/notifications) | ClickHouse Cloudの通知を受け取る方法、およびそれをカスタマイズする方法を示しています。 | -| [Upgrades](/manage/updates) | ClickHouse Cloudでのアップグレードの展開方法に関する情報。 | -| [Delete Account](/cloud/manage/close_account) | 必要に応じてアカウントを閉じるまたは削除する方法に関する情報。 | -| [Programmatic API Access with Postman](/cloud/manage/postman) | Postmanを使用してClickHouse APIをテストするためのガイド。 | -| [Troubleshooting](/faq/troubleshooting) | よくある問題のコレクションとそれらをトラブルシューティングする方法。 | -| [Data Transfer](./network-data-transfer.mdx) | ClickHouse Cloudがデータ転送の入出力をどのように計測するかについての詳細。 | -| [Jan 2025 Changes FAQ](./jan2025_faq/index.md) | 2025年1月に導入されたクラウドに関する変更点についての詳細。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md.hash deleted file mode 100644 index 45d161f70e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -abc77200856ec1c0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md deleted file mode 100644 index 6f4f027379f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -sidebar_label: 'Integrations' -slug: '/manage/integrations' -title: 'Integrations' -description: 'Integrations for ClickHouse' ---- - - - -To see a full list of integrations for ClickHouse, please see [this page](/integrations). - -## Proprietary Integrations for ClickHouse Cloud {#proprietary-integrations-for-clickhouse-cloud} - -Besides the dozens of integrations available for ClickHouse, there are also some proprietary integrations only available for ClickHouse Cloud: - -### ClickPipes {#clickpipes} - -[ClickPipes](/integrations/clickpipes)は、シンプルなWebベースのUIを使用してClickHouse Cloudにデータを取り込む管理された統合プラットフォームです。現在、Apache Kafka、S3、GCS、Amazon Kinesisをサポートしており、さらに多くの統合が近日中に登場予定です。 - -### Looker Studio for ClickHouse Cloud {#looker-studio-for-clickhouse-cloud} - -[Looker Studio](https://lookerstudio.google.com/)は、Googleが提供する人気のビジネスインテリジェンスツールです。Looker Studioは現在ClickHouseコネクタを提供しておらず、代わりにMySQLワイヤプロトコルに依存してClickHouseに接続します。 - -Looker Studioは、[MySQLインターフェース](/interfaces/mysql)を有効にすることでClickHouse Cloudに接続できます。Looker StudioをClickHouse Cloudに接続する方法の詳細については、[こちらのページ](/interfaces/mysql#enabling-the-mysql-interface-on-clickhouse-cloud)をご覧ください。 - -### MySQL Interface {#mysql-interface} - -現在、一部のアプリケーションはClickHouseワイヤプロトコルをサポートしていません。これらのアプリケーションでClickHouse Cloudを使用するには、Cloud Consoleを通じてMySQLワイヤプロトコルを有効にすることができます。MySQLワイヤプロトコルをCloud Consoleを通じて有効にする方法の詳細については、[こちらのページ](/interfaces/mysql#enabling-the-mysql-interface-on-clickhouse-cloud)をご覧ください。 - -## Unsupported Integrations {#unsupported-integrations} - -次の統合機能は、現時点でClickHouse Cloudでは利用できません。これらは実験的機能です。アプリケーションでこれらの機能をサポートする必要がある場合は、support@clickhouse.comまでお問い合わせください。 - -- [MaterializedPostgreSQL](/engines/table-engines/integrations/materialized-postgresql) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md.hash deleted file mode 100644 index d57de49d713..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/integrations.md.hash +++ /dev/null @@ -1 +0,0 @@ -edb5a54525d091ab diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md deleted file mode 100644 index 357f381abc0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'バックアップポリシー' -slug: '/cloud/manage/jan-2025-faq/backup' -keywords: -- 'new tiers' -- 'plans' -- 'pricing' -- 'backups' -description: '新しい階層のバックアップポリシー' ---- - - - -## バックアップポリシーとは何ですか? {#what-is-the-backup-policy} -Basic、Scale、およびEnterpriseティアでは、バックアップがメーター制であり、ストレージとは別に請求されます。 -すべてのサービスは、デフォルトで1日1回のバックアップが設定されており、ScaleティアからはCloud Consoleの設定タブを介して追加のバックアップを構成することができます。各バックアップは少なくとも24時間保持されます。 - -## ユーザーがデフォルトのバックアップとは別に設定した現在の構成はどうなりますか? {#what-happens-to-current-configurations-that-users-have-set-up-separate-from-default-backups} - -顧客特有のバックアップ構成は引き継がれます。ユーザーは新しいティアで自由に変更できます。 - -## ティアによってバックアップ料金は異なりますか? {#are-backups-charged-differently-across-tiers} - -バックアップのコストはすべてのティアで同じです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md.hash deleted file mode 100644 index 34cab564c01..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/backup.md.hash +++ /dev/null @@ -1 +0,0 @@ -f8155d394fd70e53 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md deleted file mode 100644 index dfaabe852bb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: '課金' -slug: '/cloud/manage/jan-2025-faq/billing' -keywords: -- 'new pricing' -- 'billing' -description: '新しい価格層の課金詳細' ---- - - - -## Billing {#billing} - -### 使用量の測定と請求に変更はありますか? {#are-there-any-changes-to-how-usage-is-metered-and-charged} - -計算およびストレージの次元ごとの単価が変更され、データ転送および ClickPipes 使用量を考慮するための二つの追加の次元があります。 - -いくつかの注目すべき変更点: - -- TB あたりのストレージ価格が引き下げられ、ストレージコストにバックアップは含まれなくなります(バックアップは別途請求し、一つのバックアップのみが必要になります)。ストレージコストは全てのティアにおいて同じで、地域やクラウドサービスプロバイダーによって異なります。 -- 計算コストはティア、地域、クラウドサービスプロバイダーによって異なります。 -- データ転送に対する新しい料金次元は、地域間および公共インターネット上でのデータエグレスにのみ適用されます。 -- ClickPipes 使用に対する新しい料金次元があります。 - -### 既存のコミットされた支出契約を持つユーザーには何が起こりますか? {#what-happens-to-users-with-existing-committed-spend-contracts} - -アクティブなコミットされた支出契約を持つユーザーは、契約が終了するまで、新しい次元ごとの単価に影響を受けません。ただし、データ転送および ClickPipes に対する新しい料金次元は 2025 年 3 月 24 日から適用されます。ほとんどの顧客は、これらの新しい次元から月次請求が大幅に増加することはありません。 - -### ClickHouse とのコミットされた支出契約のあるユーザーは、古いプランでサービスを起動し続けることができますか? {#can-users-on-a-committed-spend-agreement-with-clickhouse-continue-to-launch-services-on-the-old-plan} - -はい、ユーザーは契約の終了日まで開発および生産サービスを起動し続けることができます。更新時には新しい料金プランが反映されます。 - -契約を変更する必要がある場合や、これらの変更が将来どのように影響するかについて質問がある場合は、サポートチームまたは営業担当者にお問い合わせください。 - -### ユーザーが契約の終了前にクレジットを使い果たし PAYG に移行した場合はどうなりますか? {#what-happens-if-users-exhaust-their-credits-before-the-end-of-the-contract-and-go-to-payg} - -コミット支出契約でクレジットが更新日より前に使い果たされた場合、私たちは現在の料金で更新まで請求します(現在のポリシーに従って)。 - -### 月次 PAYG のユーザーには何が起こりますか? {#what-happens-to-users-on-the-monthly-payg} - -月次 PAYG プランのユーザーは、開発および生産サービスに対して古い料金プランを使用して請求され続けます。彼らは 2025 年 7 月 23 日までに新しいプランへ自己移行することができます。そうでない場合、この日にすべてがスケール構成に移行され、新しいプランに基づいて請求されます。 - -### 過去のプランを参照するにはどこを見ることができますか? {#where-can-i-reference-legacy-plans} - -過去のプランは [こちら](https://clickhouse.com/pricing?legacy=true) で参照可能です。 - -## Marketplaces {#marketplaces} - -### CSP マーケットプレイスを通じたユーザーへの請求方法に変更はありますか? {#are-there-changes-to-how-users-are-charged-via-the-csp-marketplaces} - -CSP マーケットプレイスを通じて ClickHouse Cloud にサインアップしたユーザーは、CHCs (ClickHouse Cloud Credits) という形で使用量が発生します。この挙動は変更されていません。ただし、クレジット使用の基盤となる構成は、ここで概説された料金およびパッケージの変更に沿うものとなり、データ転送の使用量と ClickPipes に対する請求が含まれますが、これらが稼働した後です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md.hash deleted file mode 100644 index 700d3b03de4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/billing.md.hash +++ /dev/null @@ -1 +0,0 @@ -8a24b55dce023917 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md deleted file mode 100644 index de6c17954fb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: 'New Pricing Dimensions' -slug: '/cloud/manage/jan-2025-faq/pricing-dimensions' -keywords: -- 'new pricing' -- 'dimensions' -description: 'Pricing dimensions for data transfer and ClickPipes' ---- - -import Image from '@theme/IdealImage'; -import clickpipesPricingFaq1 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_1.png'; -import clickpipesPricingFaq2 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_2.png'; -import clickpipesPricingFaq3 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_3.png'; -import NetworkPricing from '@site/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md'; - -以下の次元が新しい ClickHouse Cloud の料金に追加されました。 - -:::note -データ転送および ClickPipes の料金は、2025年3月24日まではレガシープラン(開発、プロダクション、および専用)には適用されません。 -::: - -## データ転送料金 {#data-transfer-pricing} - -### ユーザーはどのようにデータ転送の料金を支払い、これは組織のティアや地域によって異なりますか? {#how-are-users-charged-for-data-transfer-and-will-this-vary-across-organization-tiers-and-regions} - -- ユーザーはデータ転送に対して、パブリックインターネットの出口および地域間の出口の2つの次元に沿って料金を支払います。地域内のデータ転送やプライベートリンク/プライベートサービスコネクトの使用とデータ転送に対しては料金は発生しません。ただし、ユーザーに適切に料金を請求する能力に影響を与える使用パターンを確認した場合、追加のデータ転送料金の次元を実装する権利を留保します。 -- データ転送料金は、クラウドサービスプロバイダー(CSP)および地域によって異なります。 -- データ転送料金は**組織のティアの間では**異ならないでしょう。 -- パブリック出口の料金は、発信地域のみに基づいています。地域間(またはクロスリージョン)の料金は、発信地域および宛先地域の両方に依存します。 - - - -### データ転送料金は使用量の増加に伴って段階的になりますか? {#will-data-transfer-pricing-be-tiered-as-usage-increases} - -データ転送の料金は使用量の増加に伴って**段階的にはなりません**。料金は地域やクラウドサービスプロバイダーによって異なることに注意してください。 - -## ClickPipes 料金 FAQ {#clickpipes-pricing-faq} - -### なぜ今 ClickPipes の料金モデルを導入するのですか? {#why-are-we-introducing-a-pricing-model-for-clickpipes-now} - -最初は ClickPipes を無料で起動することを決定し、フィードバックを収集し、機能を洗練し、ユーザーのニーズを満たすことを目的としています。GA プラットフォームが成長し、何兆行ものデータを処理する中で効果的にテストをクリアしたため、料金モデルを導入することでサービスの改善を続け、インフラを維持し、専用サポートと新しいコネクタを提供することが可能になります。 - -### ClickPipes のレプリカとは何ですか? {#what-are-clickpipes-replicas} - -ClickPipes は、ClickHouse Cloud サービスとは独立して実行され、スケールする専用のインフラを介してリモートデータソースからデータを取り込みます。この理由から、専用のコンピュートレプリカを使用します。以下の図は、簡略化されたアーキテクチャを示しています。 - -ストリーミング ClickPipes の場合、ClickPipes のレプリカはリモートデータソース(例えば、Kafka ブローカー)にアクセスし、データを取り込み、処理して宛先 ClickHouse サービスに取り込みます。 - -ClickPipes Replicas - Streaming ClickPipes - -オブジェクトストレージ ClickPipes の場合、ClickPipes のレプリカはデータロードタスクをオーケストレーションします(コピーするファイルを特定し、状態を維持し、パーティションを移動)し、データは ClickHouse サービスから直接取り込まれます。 - -ClickPipes Replicas - Object Storage ClickPipes - -### レプリカのデフォルト数とそのサイズは何ですか? {#what-is-the-default-number-of-replicas-and-their-size} - -各 ClickPipe は、2 GiB の RAM と 0.5 vCPU が提供される 1 レプリカがデフォルトです。これは、**0.25** ClickHouse コンピュートユニット(1 ユニット = 8 GiB RAM、2 vCPU)に相当します。 - -### ClickPipes のレプリカをスケールできますか? {#can-clickpipes-replicas-be-scaled} - -現在、ストリーミング用の ClickPipes のみが、基本ユニットとして **0.25** ClickHouse コンピュートユニットを持つ複数のレプリカを追加することで水平にスケール可能です。特定のユースケースに応じて垂直スケーリングも利用可能です(レプリカごとにもっと多くの CPU と RAM を追加)。 - -### どれだけの ClickPipes レプリカが必要ですか? {#how-many-clickpipes-replicas-do-i-need} - -これは、ワークロードのスループットとレイテンシ要件によって異なります。デフォルトで 1 レプリカから始め、レイテンシを測定し、必要に応じてレプリカを追加することをお勧めします。Kafka ClickPipes の場合、Kafka ブローカーのパーティションもそれに応じてスケールする必要があります。スケーリングコントロールは、各ストリーミング ClickPipe の「設定」の下にあります。 - -ClickPipes Replicas - How many ClickPipes replicas do I need? - -### ClickPipes の料金構造はどのようになっていますか? {#what-does-the-clickpipes-pricing-structure-look-like} - -料金は2つの次元で構成されています: -- **コンピュート**:ユニットあたりの時間単価 - コンピュートは、ClickPipes レプリカポッドがデータを積極的に取り込むかどうかに関わらず、実行コストを表します。すべての ClickPipes タイプに適用されます。 -- **取り込まれたデータ**:GB あたりの料金 - 取り込まれたデータレートは、すべてのストリーミング ClickPipes(Kafka、Confluent、Amazon MSK、Amazon Kinesis、Redpanda、WarpStream、Azure Event Hubs)に適用され、レプリカポッドを介して転送されたデータに対して適用されます。取り込まれたデータサイズ(GB)は、ソースから受信したバイトに基づいて請求されます(非圧縮または圧縮)。 - -### ClickPipes の公開料金は何ですか? {#what-are-the-clickpipes-public-prices} - -- コンピュート:\$0.20 per unit per hour(\$0.05 per replica per hour) -- 取り込まれたデータ:\$0.04 per GB - -### イラスト例での例はどのようになりますか? {#how-does-it-look-in-an-illustrative-example} - -例えば、1 TB のデータを 24 時間の間、単一のレプリカ(0.25 コンピュートユニット)を使用して Kafka コネクタ経由で取り込む場合、費用は以下のようになります: - -$$ -(0.25 \times 0.20 \times 24) + (0.04 \times 1000) = \$41.2 -$$ -
- -オブジェクトストレージコネクタ(S3 と GCS)の場合、ClickPipes ポッドはデータを処理することはなく、転送をオーケストレーションしているだけであるため、ClickPipes のコンピュートコストのみが発生します: - -$$ -0.25 \times 0.20 \times 24 = \$1.2 -$$ - -### 新しい料金モデルはいつ発効しますか? {#when-does-the-new-pricing-model-take-effect} - -新しい料金モデルは、2025年1月27日以降に作成されたすべての組織に適用されます。 - -### 現在のユーザーにはどうなりますか? {#what-happens-to-current-users} - -既存のユーザーには、ClickPipes サービスが引き続き無料で提供される **60日間の猶予期間** が設けられます。既存のユーザーへの ClickPipes の請求は **2025年3月24日** に自動的に開始されます。 - -### ClickPipes の料金は市場とどのように比較されますか? {#how-does-clickpipes-pricing-compare-to-the-market} - -ClickPipes の料金の背後にある哲学は、プラットフォームの運営コストをカバーし、ClickHouse Cloud へのデータ移動を簡単かつ信頼性の高い方法で提供することです。この観点から、我々の市場分析では競争力のある位置にあることが明らかになりました。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md.hash deleted file mode 100644 index e2633d0b1b2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/dimensions.md.hash +++ /dev/null @@ -1 +0,0 @@ -b79aed6d9df58248 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md deleted file mode 100644 index 8965b6b9cb6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: 'Jan 2025 Changes FAQ' -slug: '/cloud/manage/jan-2025-faq' -description: '新しい価格設定FAQのインデックスページ' -keywords: -- 'new pricing' -- 'faq' ---- - - - - -| ページ | 説明 | -|-----|-----| -| [要約](/cloud/manage/jan-2025-faq/summary) | 新しい ClickHouse Cloud タイアの要約 | -| [新しいタイアの説明](/cloud/manage/jan-2025-faq/new-tiers) | 新しいタイアと機能の説明 | -| [請求](/cloud/manage/jan-2025-faq/billing) | 新しい価格タイアの請求の詳細 | -| [新しい価格次元](/cloud/manage/jan-2025-faq/pricing-dimensions) | データ転送と ClickPipes の価格次元 | -| [スケーリング](/cloud/manage/jan-2025-faq/scaling) | 新しい価格タイアのスケーリング動作 | -| [バックアップポリシー](/cloud/manage/jan-2025-faq/backup) | 新しいタイアのバックアップポリシー | -| [新プランへの移行](/cloud/manage/jan-2025-faq/plan-migrations) | 新しいプラン、タイア、価格への移行、決定方法、コストの見積もり | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md.hash deleted file mode 100644 index 11508e8b69f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -a0fba25160bcb7f0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md deleted file mode 100644 index 6b4bd586247..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: '新しい階層の説明' -slug: '/cloud/manage/jan-2025-faq/new-tiers' -keywords: -- 'new tiers' -- 'features' -- 'pricing' -- 'description' -description: '新しい階層と機能の説明' ---- - - - -## 主要な変更の概要 {#summary-of-key-changes} - -### 機能とティアのマッピングに関する重要な変更とは何か? {#what-key-changes-to-expect-with-regard-to-features-to-tier-mapping} - -- **Private Link/Private Service Connect:** プライベート接続は、ScaleおよびEnterpriseティアのすべてのサービスタイプでサポートされるようになりました(シングルレプリカサービスを含む)。これにより、プロダクション(大規模)環境と開発(小規模)環境の両方でPrivate Linkを利用できるようになります。 -- **バックアップ:** すべてのサービスは、デフォルトで1つのバックアップが提供され、追加のバックアップは別途料金が発生します。ユーザーは、追加のバックアップを管理するために構成可能なバックアップコントロールを活用できます。これは、バックアップ要件が少ないサービスが高いバンドル価格を支払う必要がないことを意味します。詳細については、Backup FAQを参照してください。 -- **強化された暗号化:** この機能は、AWSおよびGCPにおけるシングルレプリカサービスを含むEnterpriseティアサービスで利用可能です。サービスはデフォルトで私たちのキーによって暗号化されており、顧客管理暗号化キー(CMEK)を可能にするために、そのキーにローテーションすることができます。 -- **シングルサインオン(SSO):** この機能はEnterpriseティアで提供されており、組織の有効化にはサポートチケットが必要です。複数の組織を持つユーザーは、各組織でSSOを使用するためにすべての組織がEnterpriseティアにあることを確認する必要があります。 - -## ベーシックティア {#basic-tier} - -### ベーシックティアに関する考慮事項は何ですか? {#what-are-the-considerations-for-the-basic-tier} - -ベーシックティアは、小規模なワークロード向けに設計されています。ユーザーは、高可用性を必要とせず、プロトタイプに取り組む小規模な分析アプリケーションを展開したいと考えています。このティアは、スケール、信頼性(DR/HA)、およびデータ耐久性が必要なワークロードには適していません。このティアは、固定サイズのシングルレプリカサービス(1x8GiBまたは1x12GiB)をサポートします。詳細については、ドキュメントと[サポートポリシー](https://clickhouse.com/support/program)を参照してください。 - -### ベーシックティアのユーザーは、Private LinkおよびPrivate Service Connectにアクセスできますか? {#can-users-on-the-basic-tier-access-private-link-and-private-service-connect} - -いいえ、ユーザーはこの機能にアクセスするためにScaleまたはEnterpriseにアップグレードする必要があります。 - -### ベーシックおよびScaleティアのユーザーは、組織のためにSSOを設定できますか? {#can-users-on-the-basic-and-scale-tiers-set-up-sso-for-the-organization} - -いいえ、ユーザーはこの機能にアクセスするためにEnterpriseティアにアップグレードする必要があります。 - -### ユーザーはすべてのティアでシングルレプリカサービスを起動できますか? {#can-users-launch-single-replica-services-in-all-tiers} - -はい、シングルレプリカサービスはすべての3つのティアでサポートされています。ユーザーはスケールアウトできますが、シングルレプリカへのスケールインは許可されていません。 - -### ベーシックティアでユーザーはスケールアップ/ダウンやレプリカを追加できますか? {#can-users-scale-updown-and-add-more-replicas-on-the-basic-tier} - -いいえ、このティアのサービスは、小規模で固定サイズのワークロード(シングルレプリカ `1x8GiB` または `1x12GiB`)をサポートするためのものです。ユーザーがスケールアップ/ダウンやレプリカを追加する必要がある場合、ScaleまたはEnterpriseティアへのアップグレードを促されます。 - -## スケールティア {#scale-tier} - -### 新しいプラン(ベーシック/スケール/エンタープライズ)のどのティアがコンピュート-コンピュート分離をサポートしていますか? {#which-tiers-on-the-new-plans-basicscaleenterprise-support-compute-compute-separation} - -コンピュート-コンピュート分離は、スケールおよびエンタープライズティアのみサポートされています。この機能は、少なくとも2つのレプリカの親サービスを実行する必要があることにも注意してください。 - -### 従来のプラン(プロダクション/開発)のユーザーは、コンピュート-コンピュート分離にアクセスできますか? {#can-users-on-the-legacy-plans-productiondevelopment-access-compute-compute-separation} - -コンピュート-コンピュート分離は、既存の開発およびプロダクションサービスではサポートされていません。ただし、プライベートプレビューおよびベータに参加したユーザーを除きます。追加の質問がある場合は、[サポート](https://clickhouse.com/support/program)までお問い合わせください。 - -## エンタープライズティア {#enterprise-tier} - -### エンタープライズティアでサポートされている異なるハードウェアプロファイルは何ですか? {#what-different-hardware-profiles-are-supported-for-the-enterprise-tier} - -エンタープライズティアは、標準プロファイル(1:4 vCPU:メモリ比)、および`highMem (1:8比)`と`highCPU (1:2比)`の**カスタムプロファイル**をサポートし、ユーザーが最適な構成を選択する柔軟性を提供します。エンタープライズティアは、ベーシックおよびスケールティアと並んで展開された共有コンピュートリソースを使用します。 - -### エンタープライズティアで専用に提供される機能は何ですか? {#what-are-the-features-exclusively-offered-on-the-enterprise-tier} - -- **カスタムプロファイル:** インスタンスタイプ選択のオプションとして、標準プロファイル(1:4 vCPU:メモリ比)および`highMem (1:8比)`、`highCPU (1:2比)`のカスタムプロファイルがあります。 -- **エンタープライズグレードのセキュリティ:** - - **シングルサインオン(SSO)** - - **強化された暗号化:** AWSおよびGCPサービス用。サービスはデフォルトで私たちのキーによって暗号化されており、顧客管理暗号化キー(CMEK)を可能にするために、そのキーにローテーションすることができます。 -- **スケジュールされたアップグレード:** ユーザーは、データベースおよびクラウドリリースのためのアップグレードの日および時間ウィンドウを選択できます。 -- **HIPAA準拠:** 顧客は、HIPAA準拠のリージョンを有効にする前に、法務を通じてビジネスアソシエイト契約(BAA)に署名する必要があります。 -- **プライベートリージョン:** セルフサービス有効ではなく、ユーザーはリクエストを営業部門(sales@clickhouse.com)を通じてルーティングする必要があります。 -- **バックアップを顧客のクラウドアカウントにエクスポート**します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md.hash deleted file mode 100644 index 4ccbcbd2ee7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/new_tiers.md.hash +++ /dev/null @@ -1 +0,0 @@ -23817a9d9793c6b3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md deleted file mode 100644 index 57cfd7c5b41..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: '新しいプランへの移行' -slug: '/cloud/manage/jan-2025-faq/plan-migrations' -keywords: -- 'migration' -- 'new tiers' -- 'pricing' -- 'cost' -- 'estimation' -description: '新プラン、階層、価格への移行、決定方法とコストの見積もり' ---- - - - -## 新しいプランの選択 {#choosing-new-plans} - -### 新しく作成された組織は古い(レガシー)プランでサービスを開始できますか? {#can-new-organizations-launch-services-on-the-old-legacy-plan} - -いいえ、新しく作成された組織は発表後に古いプランへのアクセスを持ちません。 - -### ユーザーは新しい価格プランにセルフサービスで移行できますか? {#can-users-migrate-to-the-new-pricing-plan-self-serve} - -はい、以下にセルフサービス移行のガイダンスがあります: - -| 現行プラン | 新プラン | セルフサービス移行 | -|--------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------| -| 開発 | 基本 | 組織内のすべてのサービスが開発をサポートしている場合にサポート | -| 開発 | スケール(2レプリカ以上) | :white_check_mark: | -| 開発 | エンタープライズ(2レプリカ以上) | :white_check_mark: | -| 本番 | スケール(3レプリカ以上) | :white_check_mark: | -| 本番 | エンタープライズ(3レプリカ以上) | :white_check_mark: | -| 専用 | [サポート](https://clickhouse.com/support/program)にお問い合わせください | - -### 開発および本番サービスを試用中のユーザーはどのような体験をしますか? {#what-will-the-experience-be-for-users-in-trial-running-development-and-production-services} - -ユーザーは試用中にアップグレードし、新しいサービス階層とそれがサポートする機能を評価するために試用クレジットを引き続き使用できます。ただし、同じ開発および本番サービスを引き続き使用することを選択した場合、PAYGにアップグレードできます。2025年7月23日前に移行する必要があります。 - -### ユーザーは階層をアップグレードできますか?たとえば、基本 → スケール、スケール → エンタープライズなど? {#can-users-upgrade-their-tiers-ie-basic--scale-scale--enterprise-etc} - -はい、ユーザーはセルフサービスでアップグレードでき、アップグレード後の価格は階層の選択を反映します。 - -### ユーザーは高コスト階層から低コスト階層に移動できますか?たとえば、エンタープライズ → スケール、スケール → 基本、エンタープライズ → 基本のセルフサービスなど? {#can-users-move-from-a-higher-to-a-lower-cost-tier-eg-enterprise--scale-scale--basic-enterprise--basic-self-serve} - -いいえ、階層のダウングレードは許可されていません。 - -### 組織内に開発サービスのみがあるユーザーは基本階層に移行できますか? {#can-users-with-only-development-services-in-the-organization-migrate-to-the-basic-tier} - -はい、これは許可されます。ユーザーには過去の使用に基づいて推奨が与えられ、基本 `1x8GiB` または `1x12GiB` を選択できます。 - -### 同じ組織内に開発と本番サービスがあるユーザーは基本階層に移動できますか? {#can-users-with-a-development-and-production-service-in-the-same-organization-move-to-the-basic-tier} - -いいえ、ユーザーが同じ組織に開発と本番サービスの両方を持っている場合、セルフサービスでスケールまたはエンタープライズ階層にのみ移行できます。基本に移行したい場合、すべての既存の本番サービスを削除する必要があります。 - -### 新しい階層でのスケーリングの動作に関する変更はありますか? {#are-there-any-changes-related-to-the-scaling-behavior-with-the-new-tiers} - -我々は、コンピュートレプリカ用の新しい垂直スケーリングメカニズムを導入します。これを「Make Before Break」(MBB)と呼びます。このアプローチでは、古いレプリカを削除する前に新しいサイズのレプリカを1つ以上追加し、スケーリング操作中に容量の損失を防ぎます。既存のレプリカの削除と新しいレプリカの追加の間のギャップを解消することで、MBBはよりシームレスで中断の少ないスケーリングプロセスを実現します。リソースの高い使用率が追加の容量を必要とするスケールアップシナリオでは、レプリカを前もって削除することはリソース制約を悪化させるだけなので、特に有益です。 - -この変更の一環として、過去のシステムテーブルデータはスケーリングイベントの一部として最大30日間保持されます。さらに、AWSまたはGCPでのサービスに関しては2024年12月19日以前のシステムテーブルデータ、Azureでのサービスに関しては2025年1月14日以前のデータは新しい組織階層への移行の一部として保持されません。 - -## コストの推定 {#estimating-costs} - -### ユーザーは移行中にどのようにガイドされ、自分のニーズに最適な階層を理解しますか? {#how-will-users-be-guided-during-migration-understanding-what-tier-best-fits-their-needs} - -コンソールは、サービスがある場合に過去の使用に基づいて各サービスの推奨オプションを提示します。新しいユーザーは、詳細に記載された機能と機能を確認し、自分のニーズに最適な階層を決定できます。 - -### ユーザーは新しい価格で「ウェアハウス」をどのようにサイズ設定し、コストを推定しますか? {#how-do-users-size-and-estimate-the-cost-of-warehouses-in-the-new-pricing} - -[ Pricing](https://clickhouse.com/pricing) ページにある価格計算機を参照してください。これにより、ワークロードのサイズと階層選択に基づいてコストを推定できます。 - -## 移行の実施 {#undertaking-the-migration} - -### 移行を実施するためのサービスバージョンの前提条件は何ですか? {#what-are-service-version-pre-requisites-to-undertaking-the-migration} - -サービスはバージョン24.8以降であり、SharedMergeTreeに移行されている必要があります。 - -### 現行の開発および本番サービスのユーザーの移行体験はどのようなものですか?ユーザーはサービスが利用できないメンテナンスウィンドウを計画する必要がありますか? {#what-is-the-migration-experience-for-users-of-the-current-development-and-production-services-do-users-need-to-plan-for-a-maintenance-window-where-the-service-is-unavailable} - -開発および本番サービスを新しい価格階層に移行する際、サーバーの再起動がトリガーされる可能性があります。専用サービスを移行するには、[サポート](https://clickhouse.com/support/program)にお問い合わせください。 - -### 移行後、ユーザーが取るべき他のアクションは何ですか? {#what-other-actions-should-a-user-take-after-the-migration} - -APIアクセスパターンは異なります。 - -新しいサービスを作成するためにOpenAPIを使用するユーザーは、サービス作成の`POST`リクエストから`tier`フィールドを削除する必要があります。 - -`tier`フィールドはサービスオブジェクトから削除され、もはやサービス階層は存在しません。 -これは`POST`、`GET`、および`PATCH`サービスリクエストによって返されるオブジェクトに影響を及ぼします。したがって、これらのAPIを消費するコードは、これらの変更を処理するように調整する必要があります。 - -各サービスは、スケールおよびエンタープライズ階層でのデフォルトのレプリカ数は3、基本階層では1です。 -スケールおよびエンタープライズ階層では、サービス作成リクエストで`numReplicas`フィールドを渡すことにより調整できます。 -ウェアハウス内の最初のサービスの`numReplicas`フィールドの値は2から20の範囲内である必要があります。既存のウェアハウス内で作成されたサービスは、最低1のレプリカ数を持つことができます。 - -### 自動化のために既存のTerraformプロバイダーを使用している場合、ユーザーはどのような変更を行う必要がありますか? {#what-changes-should-the-users-make-if-using-the-existing-terraform-provider-for-automation} - -組織が新しいプランのいずれかに移行した後、ユーザーはTerraformプロバイダーのバージョン2.0.0以上を使用する必要があります。 - -新しいTerraformプロバイダーは、サービスの`tier`属性の変更を処理するために必要です。 - -移行後、`tier`フィールドは受け付けられなくなりますので、これへの参照は削除する必要があります。 - -ユーザーはサービスリソースのプロパティとして`num_replicas`フィールドを指定できるようになります。 - -各サービスは、スケールおよびエンタープライズ階層でのデフォルトのレプリカ数は3、基本階層では1です。 -スケールおよびエンタープライズ階層では、サービス作成リクエストで`numReplicas`フィールドを渡すことで調整できます。 -ウェアハウス内の最初のサービスの`num_replicas`フィールドの値は2から20の範囲内である必要があります。既存のウェアハウス内で作成されたサービスは、最低1のレプリカ数を持つことができます。 - -### ユーザーはデータベースアクセスに変更を加える必要がありますか? {#will-users-have-to-make-any-changes-to-the-database-access} - -いいえ、データベースのユーザー名/パスワードは以前と同じように機能します。 - -### ユーザーはプライベートネットワーキング機能を再構成する必要がありますか? {#will-users-have-to-reconfigure-private-networking-features} - -いいえ、ユーザーは本番サービスをスケールまたはエンタープライズに移動した後、既存のプライベートネットワーキング(プライベートリンク、PSCなど)構成を使用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md.hash deleted file mode 100644 index 3c46b8ad14c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/plan_migrations.md.hash +++ /dev/null @@ -1 +0,0 @@ -0a040aac8ced5f00 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md deleted file mode 100644 index 84407a9d2a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: 'スケーリング' -slug: '/cloud/manage/jan-2025-faq/scaling' -keywords: -- 'new pricing' -- 'faq' -- 'scaling' -description: '新しい価格体系におけるスケーリング動作' ---- - - - -ClickHouse Cloudでは、垂直方向(レプリカサイズの増加)と水平方向(レプリカの追加)の両方でスケーリングが可能です。 - -## 各ティアに対してどのようなスケーリングオプションが利用可能ですか? {#what-scaling-options-will-be-available-for-each-tier} - -各ティアのスケーリングの動作は次のとおりです: - -* **Basic**: Basicティアは、単一レプリカサービスのみをサポートします。これらのサービスはサイズが固定であり、垂直または水平方向のスケーリングは許可されません。ユーザーは、サービスをスケールまたはエンタープライズティアにアップグレードしてスケーリングすることができます。 -* **Scale**: Scaleティアは単一および複数レプリカサービスをサポートします。複数レプリカサービスのスケーリングが許可されます。 - * サービスは、複数レプリカセットアップにスケーリングした後に、CSP/地域がサポートする最大レプリカサイズに垂直スケーリングできます。2つ以上のレプリカのみが垂直スケーリング可能です。 - * 手動の水平方向のスケーリングが可能です。 -* **Enterprise**: Enterpriseティアは単一および複数レプリカサービスをサポートし、複数レプリカサービスのスケーリングが許可されます。 - * サービスはCSP/地域がサポートする最大レプリカサイズに垂直スケーリングできます。 - * 標準プロファイル(1:4 CPU対メモリ比)は垂直的な自動スケーリングをサポートします。 - * カスタムプロファイル(`highMemory`および`highCPU`)は、サポートチケットを通じて垂直スケーリングできます。 - * 手動の水平方向のスケーリングが可能です。 - -:::note -サービスは最大20レプリカに水平方向でスケーリングできます。追加のレプリカが必要な場合は、サポートチームにお問い合わせください。 -::: - -## ユーザーはサービスをスケールできますか? {#can-users-scale-in-their-service} - -スケーリングは2つ以上のレプリカに制限されます。スケールアウト後、ユーザーは単一レプリカにスケールダウンすることは許可されません。これは、安定性の低下やデータ損失の可能性をもたらす可能性があるためです。 - -## 新しいティアに関連するスケーリングの動作に変更はありますか? {#are-there-any-changes-related-to-the-scaling-behavior-with-the-new-tiers} - -コンピュートレプリカのために「Make Before Break」(MBB)と呼ばれる新しい垂直スケーリングのメカニズムを導入します。このアプローチでは、古いレプリカを削除する前に新しいサイズの1つ以上のレプリカが追加され、スケーリング操作中の容量の損失を防ぎます。既存のレプリカを削除することと新しいレプリカを追加することの間のギャップをなくすことで、MBBはよりシームレスで中断の少ないスケーリングプロセスを実現します。特に、リソースの高い利用が追加のキャパシティの必要性を引き起こすスケールアップシナリオにおいて有益です。レプリカを早急に削除すると、リソースの制約がさらに悪化する可能性があります。 - -この変更の一環として、スケーリングイベントの一部として、過去のシステムテーブルデータは最大30日間保持されることに注意してください。さらに、AWSまたはGCP上のサービスでは2024年12月19日以前のシステムテーブルデータ、およびAzure上のサービスでは2025年1月14日以前のデータは、新しい組織ティアへの移行の一環として保持されません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md.hash deleted file mode 100644 index e6030df36b1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/scaling.md.hash +++ /dev/null @@ -1 +0,0 @@ -f9d028e52cee5294 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md deleted file mode 100644 index 33ae92cdade..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: '概要' -slug: '/cloud/manage/jan-2025-faq/summary' -keywords: -- 'new tiers' -- 'packaging' -- 'pricing faq' -- 'summary' -description: '新しいClickHouse Cloud Tierの概要' ---- - - - -The following FAQ summarizes common questions with respect to new tiers introduced in ClickHouse Cloud starting in January 2025. - -## What has changed with ClickHouse Cloud tiers? {#what-has-changed-with-clickhouse-cloud-tiers} - -At ClickHouse, we are dedicated to adapting our products to meet the ever-changing requirements of our customers. Since its introduction in GA over the past two years, ClickHouse Cloud has evolved substantially, and we've gained invaluable insights into how our customers leverage our cloud offerings. - -We are introducing new features to optimize the sizing and cost-efficiency of ClickHouse Cloud services for your workloads. These include compute-compute separation, high-performance machine types, and single-replica services. We are also evolving automatic scaling and managed upgrades to execute in a more seamless and reactive fashion. - -We are adding a new Enterprise tier to serve the needs of the most demanding customers and workloads, with focus on industry-specific security and compliance features, even more controls over underlying hardware and upgrades, and advanced disaster recovery features. - -You can read about these and other functional changes in this [blog](https://clickhouse.com/blog/evolution-of-clickhouse-cloud-new-features-superior-performance-tailored-offerings). - -## What action is required? {#what-action-is-required} - -To support these changes, we are restructuring our current tiers to more closely match how our evolving customer base is using our offerings, and you need to take action to select a new plan. - -Details and timelines for making these selections are described below. - -## How are tiers changing? {#how-are-tiers-changing} - -We are transitioning from a model that organizes paid tiers purely by "service types" which are delineated by both capacity and features (namely, these are Development, Production, and Dedicated tiers) to one that organizes paid tiers by feature availability. These new tiers are called Basic, Scale, and Enterprise and are described in more detail below. - -This change brings several key benefits: - -* **Consistent Feature Access**: 機能は、すべてのサイズのサービスで利用可能であり、またその上のすべてのティアでも利用できます。たとえば、以前はProductionサービスタイプでのみ利用可能だったプライベートネットワーキングは、Scaleティアからすべてのサービスにアクセスできるようになるため、開発と本番のワークロードに応じてデプロイできます。 - -* **Organizational-Level Features**: 適切なプランとともに組織レベルで構築された機能を提供できるようになり、顧客が必要とするツールを適切なレベルのサービスで受け取れるようになります。たとえば、SSO(シングルサインオン)およびCMEK(顧客管理暗号化キー)へのアクセスはEnterpriseティアで利用可能です。 - -* **Optimized Support Plans**: 新しいパッケージ構造は、サポート応答時間を有料ティアと一致させることができ、さまざまな顧客のニーズを効果的に満たします。たとえば、Enterpriseティアの顧客には専任のサポートエンジニアが提供されます。 - -以下では、新しいティアの概要を提供し、ユースケースとの関連性を説明し、主要機能を概説します。 - -**Basic: A taste of ClickHouse** - -* Basicティアは、データ量が少なく、要求の厳しくないワークロードを持つ組織向けの手頃なオプションを提供するように設計されています。このティアでは、最大12GBのメモリと\< 1TBのストレージを持つ単一レプリカデプロイメントを実行可能であり、信頼性保証を必要としない小規模なユースケースに最適です。 - -**Scale: Enhanced SLAs and scalability** - -* Scaleティアは、強化されたSLA、高いスケーラビリティおよび高度なセキュリティ対策を必要とするワークロードに適しています。 -* 任意のレプリケーションファクターで無制限のコンピュートとストレージを提供し、コンピュート-コンピュート分離へのアクセス、そして自動的な垂直および水平方向のスケーリングを提供します。 -* 主な機能には次のものが含まれます: - * プライベートネットワーキング、カスタマイズバックアップコントロール、多要素認証などのサポート - * 最適化されたリソース使用のためのコンピュート-コンピュート分離 - * 変化する需要に応じた柔軟なスケーリングオプション(垂直および水平の両方) - -**Enterprise: Mission-critical deployments** - -* Enterpriseティアは、大規模でミッションクリティカルなClickHouseデプロイメントを実行するための最適な場所です。 -* 最も厳格なセキュリティおよびコンプライアンスのニーズを持つ組織に最適で、最高レベルのパフォーマンスと信頼性を必要とします。 -* 主な機能には次のものが含まれます: - * HIPAAなどの業界特有のコンプライアンス認証 - * SSO(シングルサインオン)およびCMEK(顧客管理暗号化キー)へのセルフサービスアクセス - * 最小限の中断を保証するスケジュールされたアップグレード - * 高メモリ、高CPUオプションおよびプライベートリージョンを含むカスタム構成のサポート - -新しいティアの詳細は、私たちの[ウェブサイト](https://clickhouse.com/pricing)で説明されています。 - -## How is pricing changing? {#how-is-pricing-changing} - -In addition to evolving our paid tiers, we are making the following adjustments to our overall pricing structure and price points: - -* **Storage**: ストレージのTBあたりの価格が引き下げられ、ストレージコストにバックアップは含まれなくなります。 -* **Backups**: バックアップは別途料金が発生し、1つのバックアップのみが必須となります。 -* **Compute**: コンピュートコストは増加し、ティアとリージョンによって異なります。この増加は、コンピュート-コンピュート分離および単一レプリカサービスの導入によりバランスが取られる場合があります。 -* **Data Transfer**: インターネット経由のデータ転送および地域を越えたデータ転送に対して料金を導入します。私たちの分析に基づくと、ほとんどの顧客はこの新しい次元に基づいて月額料金が大幅に増加しないと考えています。 -* **ClickPipes**: 管理されたインジェストサービスは、導入期間中は無料で提供されていましたが、現在はコンピュートと取り込まれたデータに基づいて料金が発生します。私たちの分析に基づくと、ほとんどの顧客はこの新しい次元に基づいて月額料金が大幅に増加しないと考えています。 - -## When will these changes take effect? {#when-will-these-changes-take-effect} - -While changes are effective immediately for new customers, existing customers will have from 6 months to a year to transition to new plans. - -Detailed breakdown of effective dates is below: - -* **New Customers**: 新しいプランは、ClickHouse Cloudの新規顧客に対して**2025年1月27日**に発効します。 -* **Existing PAYG Customers**: 従量課金制(PAYG)の顧客は、**2025年7月23日**までの6ヶ月間に新しいプランに移行する必要があります。 -* **Existing Committed Spend Customers**: コミットメント契約のある顧客は、現在の契約の終了時に条件を再交渉できます。 -* **New usage dimensions** for Data Transfer and ClickPipes are effective for both PAYG and Committed Spend customers 8 weeks following this announcement on **2025年3月24日**. - -## What actions should you take? {#what-actions-should-you-take} - -If you are a **pay-as-you-go (PAYG) customer**, you can migrate to a new plan through the self-service options available in your ClickHouse Cloud console. - -If you are a **committed spend customer**, please reach out to your account representative to discuss your custom migration plan and timeline. - -**Need assistance?** -We're here to support you through this transition. If you have any questions or need personalized help, please reach out to your account representative or contact our support team. diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md.hash deleted file mode 100644 index ea268081632..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/jan2025_faq/summary.md.hash +++ /dev/null @@ -1 +0,0 @@ -68a5a5b3824ebd73 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx deleted file mode 100644 index 42bc2384b5e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -'sidebar_label': 'データ転送' -'slug': '/cloud/manage/network-data-transfer' -'title': 'Data Transfer' -'description': 'ClickHouse Cloud がデータ転送の送信および受信のデータを計測する方法について詳しく学ぶ' ---- - -import NetworkPricing from '@site/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/_snippets/_network_transfer_rates.md'; - -ClickHouse Cloudは、データの転送量をIngressとEgressで計測します。 -これには、ClickHouse Cloudへのデータの出入りや、地域内および地域間のデータ転送が含まれます。 -この使用量はサービスレベルで追跡されます。この使用量に基づいて、顧客はデータ転送料金を負担し、それが月々の請求書に追加されます。 - -ClickHouse Cloudは以下に対して料金を請求します: -- ClickHouse Cloudから公衆インターネットへのデータEgress、他のクラウドプロバイダーの他の地域への転送を含む。 -- 同じクラウドプロバイダーの別の地域へのデータEgress。 - -地域内のデータ転送やPrivate Link/Private Service Connectの使用およびデータ転送には料金が発生しません。 -ただし、ユーザーに適切に料金を請求する能力に影響を与える使用パターンが見られた場合、追加のデータ転送料金計算の次元を実施する権利を留保します。 - -データ転送料金は、クラウドサービスプロバイダー(CSP)や地域によって異なります。 -公衆インターネットEgressの料金は、発信元地域のみに基づいています。 -地域間(またはクロス地域)の料金は、発信元と宛先の両方の地域に依存します。 - -**データ転送コストを最小化するためのベストプラクティス** - -データをIngressおよびEgressする際に、データ転送コストを最小化するために考慮すべきいくつかのパターンがあります。 -1. ClickHouse CloudからデータをIngressまたはEgressする際には、データ転送量と関連コストを最小化するために可能な限り圧縮を使用してください。 -2. 非インライン値(例:INSERT INTO [TABLE] FROM INFILE [FILE] FORMAT NATIVE)を使用してネイティブプロトコル経由でINSERTを行う場合、ClickHouseクライアントはデータをパックするためにサーバーからメタデータを取得します。メタデータがINSERTペイロードより大きい場合、サーバーの視点からは受信量以上のEgressが見られることがあります。これが受け入れられない場合は、VALUES構文でデータをインライン化するか、HTTPプロトコルを使用してみてください。 - -以下の表は、クラウドプロバイダーと地域によって公衆インターネットまたは地域間のEgressに対するデータ転送料金がどのように異なるかを示しています。 - -:::note -ClickHouse Cloudは、発信元と宛先の地域に応じて、Tier 1からTier 4までの段階で地域間の使用量を計測します。以下の表は地域間データ転送の各組み合わせのティアを示しています。ClickHouse Cloudの請求使用画面では、ティアごとに区分されたデータ転送使用量を見ることができます。 -::: - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx.hash deleted file mode 100644 index 741af24988b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/network-data-transfer.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -3af7c4d1ff2933e5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md deleted file mode 100644 index 8e0f72cc7dc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: 'Notifications' -slug: '/cloud/notifications' -description: 'ClickHouse Cloud サービス用の通知' -keywords: -- 'cloud' -- 'notifications' ---- - -import Image from '@theme/IdealImage'; -import notifications_1 from '@site/static/images/cloud/manage/notifications-1.png'; -import notifications_2 from '@site/static/images/cloud/manage/notifications-2.png'; -import notifications_3 from '@site/static/images/cloud/manage/notifications-3.png'; -import notifications_4 from '@site/static/images/cloud/manage/notifications-4.png'; - -ClickHouse Cloud は、サービスや組織に関連する重要なイベントについての通知を送信します。通知がどのように送信され、構成されるかを理解するために、いくつかの概念を把握しておく必要があります。 - -1. **通知のカテゴリ**: 課金通知やサービス関連の通知など、通知のグループを指します。各カテゴリ内には、配信モードを設定できる複数の通知があります。 -2. **通知の重要度**: 通知の重要度は、通知がどれほど重要であるかに応じて `info`、`warning`、または `critical` のいずれかとなります。これは構成できません。 -3. **通知のチャネル**: チャネルは、通知が受信される方法を指します。たとえば、UI、メール、Slack などです。ほとんどの通知はこれを構成可能です。 - -## 通知の受信 {#receiving-notifications} - -通知はさまざまなチャネルを介して受信できます。現時点では、ClickHouse Cloud はメール、ClickHouse Cloud UI、Slack を通じて通知を受信することをサポートしています。左上のメニューのベルアイコンをクリックすると、現在の通知が表示され、フライアウトが開きます。フライアウトの下部にある **すべて表示** ボタンをクリックすると、すべての通知のアクティビティログを表示するページに移動します。 - -ClickHouse Cloud notifications flyout - -ClickHouse Cloud notifications activity log - -## 通知のカスタマイズ {#customizing-notifications} - -各通知について、通知の受け取り方法をカスタマイズできます。通知のフライアウトまたは通知アクティビティログの2番目のタブから設定画面にアクセスできます。 - -Cloud ユーザーは、Cloud UI を介して配信される通知をカスタマイズでき、これらのカスタマイズは各ユーザーに反映されます。Cloud ユーザーは、自分のメールに配信される通知もカスタマイズできますが、カスタムメールに配信される通知や Slack チャンネルに送信される通知をカスタマイズできるのは、管理者権限を持つユーザーのみです。 - -特定の通知の配信を構成するには、鉛筆アイコンをクリックして通知の配信チャネルを変更します。 - -ClickHouse Cloud notifications settings screen - -ClickHouse Cloud notification delivery settings - -:::note -**支払い失敗**などの特定の **必須** 通知は構成不可です。 -::: - -## サポートされている通知 {#supported-notifications} - -現在、課金に関連する通知(支払い失敗、使用量が閾値を超えた等)やスケーリングイベントに関連する通知(スケーリング完了、スケーリングブロック等)を送信しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md.hash deleted file mode 100644 index 524fa5a156e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/notifications.md.hash +++ /dev/null @@ -1 +0,0 @@ -b2da1b2f0129acd4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md deleted file mode 100644 index 99ea0f4fc5e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_label: 'Managing API Keys' -slug: '/cloud/manage/openapi' -title: 'Managing API Keys' -description: 'ClickHouse Cloud provides an API utilizing OpenAPI that allows you - to programmatically manage your account and aspects of your services.' ---- - -import image_01 from '@site/static/images/cloud/manage/openapi1.png'; -import image_02 from '@site/static/images/cloud/manage/openapi2.png'; -import image_03 from '@site/static/images/cloud/manage/openapi3.png'; -import image_04 from '@site/static/images/cloud/manage/openapi4.png'; -import image_05 from '@site/static/images/cloud/manage/openapi5.png'; -import Image from '@theme/IdealImage'; - - -# APIキーの管理 - -ClickHouse Cloudは、アカウントやサービスの側面をプログラム的に管理するためのAPIを提供しており、OpenAPIを利用しています。 - -:::note -このドキュメントはClickHouse Cloud APIについて説明します。データベースAPIエンドポイントについては、[Cloud Endpoints API](/cloud/get-started/query-endpoints.md)をご覧ください。 -::: - -1. 左メニューの**API Keys**タブを使用して、APIキーを作成および管理できます。 - - API Keys tab - -2. **API Keys**ページでは、最初のAPIキーを作成するためのプロンプトが最初に表示されます。最初のキーが作成された後は、右上の`New API Key`ボタンを使用して新しいキーを作成できます。 - - API Keys page - -3. APIキーを作成するには、キー名、キーの権限、有効期限を指定し、`Generate API Key`をクリックします。 -
-:::note -権限は、ClickHouse Cloudの[定義済みロール](/cloud/security/cloud-access-management/overview#console-users-and-roles)に準拠しています。開発者ロールは、割り当てられたサービスに対して読み取り専用の権限を持ち、管理者ロールは完全な読み書き権限を持ちます。 -::: - - Create API key form - -4. 次の画面には、Key IDとKey secretが表示されます。これらの値をコピーして、安全な場所に保存してください(たとえば、ボールトなど)。この画面から離れると、値は再表示されません。 - - API key details - -5. ClickHouse Cloud APIは、[HTTP Basic Authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication)を使用してAPIキーの有効性を確認します。以下は、`curl`を使用してClickHouse Cloud APIにリクエストを送信する方法の例です: - -```bash -$ KEY_ID=mykeyid -$ KEY_SECRET=mykeysecret - -$ curl --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations -``` - -6. **API Keys**ページに戻ると、キー名、Key IDの最後の4文字、権限、ステータス、有効期限、作成者が表示されます。この画面からキー名、権限、有効期限を編集することができます。また、ここからキーを無効にしたり削除したりすることも可能です。 -
-:::note -APIキーを削除することは、永久的なアクションです。このキーを使用しているサービスは、ClickHouse Cloudへのアクセスを直ちに失います。 -::: - - API Keys management page - -## エンドポイント {#endpoints} - -エンドポイントの詳細については、[APIリファレンス](https://clickhouse.com/docs/cloud/manage/api/swagger)をご覧ください。 -APIキーとAPIシークレットを使って、ベースURL `https://api.clickhouse.cloud/v1`にアクセスしてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md.hash deleted file mode 100644 index 38f906e2b88..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/openapi.md.hash +++ /dev/null @@ -1 +0,0 @@ -ce3e9dc8fd39764b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md deleted file mode 100644 index b6318cc794f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -slug: '/cloud/manage/postman' -sidebar_label: 'Programmatic API access with Postman' -title: 'Programmatic API access with Postman' -description: 'This guide will help you test the ClickHouse Cloud API using Postman' ---- - -import Image from '@theme/IdealImage'; -import postman1 from '@site/static/images/cloud/manage/postman/postman1.png'; -import postman2 from '@site/static/images/cloud/manage/postman/postman2.png'; -import postman3 from '@site/static/images/cloud/manage/postman/postman3.png'; -import postman4 from '@site/static/images/cloud/manage/postman/postman4.png'; -import postman5 from '@site/static/images/cloud/manage/postman/postman5.png'; -import postman6 from '@site/static/images/cloud/manage/postman/postman6.png'; -import postman7 from '@site/static/images/cloud/manage/postman/postman7.png'; -import postman8 from '@site/static/images/cloud/manage/postman/postman8.png'; -import postman9 from '@site/static/images/cloud/manage/postman/postman9.png'; -import postman10 from '@site/static/images/cloud/manage/postman/postman10.png'; -import postman11 from '@site/static/images/cloud/manage/postman/postman11.png'; -import postman12 from '@site/static/images/cloud/manage/postman/postman12.png'; -import postman13 from '@site/static/images/cloud/manage/postman/postman13.png'; -import postman14 from '@site/static/images/cloud/manage/postman/postman14.png'; -import postman15 from '@site/static/images/cloud/manage/postman/postman15.png'; -import postman16 from '@site/static/images/cloud/manage/postman/postman16.png'; -import postman17 from '@site/static/images/cloud/manage/postman/postman17.png'; - -このガイドでは、[Postman](https://www.postman.com/product/what-is-postman/)を使用してClickHouse Cloud APIをテストする方法を説明します。 -Postmanアプリケーションは、Webブラウザ内で使用できるほか、デスクトップにダウンロードすることもできます。 - -### アカウントを作成する {#create-an-account} -* 無料アカウントは[https://www.postman.com](https://www.postman.com)で利用できます。 - -Postman site - -### ワークスペースを作成する {#create-a-workspace} -* ワークスペースに名前を付け、可視性レベルを設定します。 - -Create workspace - -### コレクションを作成する {#create-a-collection} -* 左上のメニューの「Explore」の下で「Import」をクリックします: - -Explore > Import - -* モーダルが表示されます: - -API URL entry - -* APIアドレス「https://api.clickhouse.cloud/v1」を入力し、「Enter」を押します: - -Import - -* 「Import」ボタンをクリックして「Postman Collection」を選択します: - -Collection > Import - -### ClickHouse Cloud API仕様とのインターフェース {#interface-with-the-clickhouse-cloud-api-spec} -* 「ClickHouse Cloud用API仕様」が「Collections」(左ナビゲーション)内に表示されます。 - -Import your API - -* 「ClickHouse Cloud用API仕様」をクリックします。中間ペインから「Authorization」タブを選択します: - -Import complete - -### 認証を設定する {#set-authorization} -* ドロップダウンメニューを切り替えて「Basic Auth」を選択します: - -Basic auth - -* ClickHouse Cloud APIキーをセットアップした際に受け取ったユーザー名とパスワードを入力します: - -credentials - -### 変数を有効にする {#enable-variables} -* [変数](https://learning.postman.com/docs/sending-requests/variables/)を使用すると、Postman内で値を保存および再利用でき、APIテストが容易になります。 -#### Organization IDとService IDを設定する {#set-the-organization-id-and-service-id} -* 「Collection」内で、中央ペインの「Variable」タブをクリックします(Base URLは前のAPIインポートによって設定されているはずです)。 -* `baseURL`の下の「新しい値を追加」をクリックし、あなたの組織IDとサービスIDに置き換えます: - -Organization ID and Service ID - -## ClickHouse Cloud API機能をテストする {#test-the-clickhouse-cloud-api-functionalities} -### 「GET 利用可能な組織のリスト」をテストする {#test-get-list-of-available-organizations} -* 「ClickHouse Cloud用OpenAPI仕様」の下で、フォルダーを展開 > V1 > organizations -* 「GET 利用可能な組織のリスト」をクリックし、右側の青い「Send」ボタンを押します: - -Test retrieval of organizations - -* 返された結果は、「status": 200」と共に組織の詳細を返すはずです(「status」が400で、組織情報が表示されない場合は、設定が正しくありません)。 - -Status - -### 「GET 組織の詳細」をテストする {#test-get-organizational-details} -* `organizationid`フォルダーの下に移動し、「GET 組織の詳細」へ: -* 中央フレームのメニューのParamsに`organizationid`が必要です。 - -Test retrieval of organization details - -* この値を波括弧内の`orgid`で編集します `{{orgid}}`(この値を設定したことで、メニューが表示され、値が表示されます): - -Submit test - -* 「Save」ボタンを押した後、画面右上の青い「Send」ボタンを押します。 - -Return value - -* 返された結果は、「status": 200」と共に組織の詳細を返すはずです(「status」が400で、組織情報が表示されない場合は、設定が正しくありません)。 - -### 「GET サービスの詳細」をテストする {#test-get-service-details} -* 「GET サービスの詳細」をクリックします。 -* `organizationid`と`serviceid`の値をそれぞれ`{{orgid}}`と`{{serviceid}}`に編集します。 -* 「Save」を押し、次に右の青い「Send」ボタンを押します。 - -List of services - -* 返された結果は、「status": 200」と共にサービスのリストとその詳細を返すはずです(「status」が400で、サービス情報が表示されない場合は、設定が正しくありません)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md.hash deleted file mode 100644 index 2d3a25ae06d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/postman.md.hash +++ /dev/null @@ -1 +0,0 @@ -ba64d5e36ffe7fc6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md deleted file mode 100644 index d47ca798117..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: 'レプリカ意識型ルーティング' -slug: '/manage/replica-aware-routing' -description: 'キャッシュ再利用を増やすためのレプリカ意識型ルーティングの使用方法' -keywords: -- 'cloud' -- 'sticky endpoints' -- 'sticky' -- 'endpoints' -- 'sticky routing' -- 'routing' -- 'replica aware routing' ---- - - - - -# レプリカ対応ルーティング (プライベートプレビュー) - -レプリカ対応ルーティング(スティッキーセッション、スティッキールーティング、またはセッションアフィニティとも呼ばれる)は、[Envoyプロキシのリングハッシュ負荷分散](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/load_balancers#ring-hash)を利用しています。レプリカ対応ルーティングの主な目的は、キャッシュ再利用の機会を増やすことです。それは隔離を保証するものではありません。 - -サービスのレプリカ対応ルーティングを有効にすると、サービスホスト名の上にワイルドカードサブドメインを許可します。ホスト名が `abcxyz123.us-west-2.aws.clickhouse.cloud` のサービスの場合、`*.sticky.abcxyz123.us-west-2.aws.clickhouse.cloud` に一致する任意のホスト名を使用してサービスにアクセスできます: - -|例のホスト名| -|---| -|`aaa.sticky.abcxyz123.us-west-2.aws.clickhouse.cloud`| -|`000.sticky.abcxyz123.us-west-2.aws.clickhouse.cloud`| -|`clickhouse-is-the-best.sticky.abcxyz123.us-west-2.aws.clickhouse.cloud`| - -Envoyがそのようなパターンに一致するホスト名を受け取ると、ホスト名に基づいてルーティングハッシュを計算し、計算されたハッシュに基づいてハッシュリング上の対応するClickHouseサーバーを見つけます。サービスに対する変更がないと仮定すると(例: サーバーの再起動、スケールアウト/ イン)、Envoyは常に同じClickHouseサーバーを選択して接続します。 - -元のホスト名は、デフォルトのルーティングアルゴリズムである `LEAST_CONNECTION` 負荷分散を引き続き使用することに注意してください。 - -## レプリカ対応ルーティングの制限 {#limitations-of-replica-aware-routing} - -### レプリカ対応ルーティングは隔離を保証しません {#replica-aware-routing-does-not-guarantee-isolation} - -サービスへのいかなる中断、例えばサーバーポッドの再起動(バージョンアップグレード、クラッシュ、縦型スケーリングなどによる理由で)、サーバーのスケールアウト/インなどが、ルーティングハッシュリングを中断させます。これにより、同じホスト名の接続が異なるサーバーポッドに到達することになります。 - -### レプリカ対応ルーティングはプライベートリンクでそのまま動作しません {#replica-aware-routing-does-not-work-out-of-the-box-with-private-link} - -顧客は新しいホスト名パターンの名前解決を機能させるために、手動でDNSエントリを追加する必要があります。これを不適切に使用すると、サーバーロードの不均衡を引き起こす可能性があります。 - -## レプリカ対応ルーティングの設定 {#configuring-replica-aware-routing} - -レプリカ対応ルーティングを有効にするには、[サポートチームにお問い合わせください](https://clickhouse.com/support)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md.hash deleted file mode 100644 index d5f3da22468..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/replica-aware-routing.md.hash +++ /dev/null @@ -1 +0,0 @@ -7281c75a23ca0bfe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md deleted file mode 100644 index 7b6650695f5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: '自動スケーリング' -slug: '/manage/scaling' -description: 'ClickHouse Cloud での自動スケーリングの設定' -keywords: -- 'autoscaling' -- 'auto scaling' -- 'scaling' -- 'horizontal' -- 'vertical' -- 'bursts' -title: 'Automatic Scaling' ---- - -import Image from '@theme/IdealImage'; -import auto_scaling from '@site/static/images/cloud/manage/AutoScaling.png'; -import scaling_patch_request from '@site/static/images/cloud/manage/scaling-patch-request.png'; -import scaling_patch_response from '@site/static/images/cloud/manage/scaling-patch-response.png'; -import scaling_configure from '@site/static/images/cloud/manage/scaling-configure.png'; -import scaling_memory_allocation from '@site/static/images/cloud/manage/scaling-memory-allocation.png'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - - -# 自動スケーリング - -スケーリングは、クライアントの需要に応じて利用可能なリソースを調整する能力です。Scale および Enterprise (標準 1:4 プロファイル) 階層のサービスは、APIをプログラム的に呼び出したり、UIで設定を変更することで水平にスケーリングできます。あるいは、これらのサービスはアプリケーションの需要に応じて **自動的に** 垂直スケーリングすることもできます。 - - - -## ClickHouse Cloud におけるスケーリングの仕組み {#how-scaling-works-in-clickhouse-cloud} - -現在、ClickHouse Cloud は、Scale 階層サービス向けに垂直自動スケーリングと手動の水平スケーリングをサポートしています。 - -Enterprise 階層サービスにおけるスケーリングの動作は以下の通りです: - -- **水平スケーリング**: 手動の水平スケーリングは、エンタープライズ層の全ての標準およびカスタムプロファイルで利用可能です。 -- **垂直スケーリング**: - - 標準プロファイル (1:4) は垂直自動スケーリングをサポートします。 - - カスタムプロファイルは、立ち上げ時には垂直自動スケーリングや手動の垂直スケーリングをサポートしません。ただし、サポートに連絡することで垂直にスケーリングできます。 - -:::note -我々は、コンピュートレプリカ用の新しい垂直スケーリングメカニズムを導入しています。これを「Make Before Break」(MBB) と呼んでいます。このアプローチでは、古いレプリカを削除する前に新しいサイズのレプリカを1つ以上追加することで、スケーリング操作中のキャパシティの損失を防ぎます。既存のレプリカを削除することと新しいレプリカを追加することの間のギャップを排除することで、MBBはよりシームレスで中断の少ないスケーリングプロセスを実現します。これは特にスケールアップのシナリオで有益であり、高リソース利用率が追加キャパシティの必要性を引き起こす場合において、早すぎるレプリカの削除はリソース制約を悪化させるだけです。 - -この変更の一環として、スケーリングイベントの一部として、過去のシステムテーブルデータが最大30日間保持されることに注意してください。さらに、AWS または GCP 上のサービスでは2024年12月19日以前の、Azure 上のサービスでは2025年1月14日以前のシステムテーブルデータは新しい組織階層への移行の一部として保持されません。 -::: - -### 垂直自動スケーリング {#vertical-auto-scaling} - - - -Scale および Enterprise サービスは、CPU とメモリの使用状況に基づいた自動スケーリングをサポートします。我々は、サービスの過去30時間の使用状況を常に監視して、スケーリングの決定を行います。使用状況が特定の閾値を超えたり下回ったりした場合、需要に応じてサービスを適切にスケーリングします。 - -CPUベースの自動スケーリングは、CPU使用率が50-75%の範囲で上限閾値を超えると発動します(実際の閾値はクラスターのサイズに依存します)。この時点で、クラスターへのCPUの割り当ては倍増します。CPU使用率が上限閾値の半分(例えば、上限閾値が50%の場合、25%に)以下に下がると、CPUの割り当ては半減します。 - -メモリベースの自動スケーリングは、最大メモリ使用量の125%まで、または OOM (Out Of Memory) エラーが発生した場合には150%までスケールします。 - -**CPU** または **メモリ** の推奨のうち大きい方が選ばれ、サービスに割り当てられるCPU とメモリは `1` CPU と `4 GiB` メモリの単位で同時にスケールされます。 - -### 垂直自動スケーリングの設定 {#configuring-vertical-auto-scaling} - -ClickHouse Cloud Scale または Enterprise サービスのスケーリングは、**Admin** ロールを持つ組織メンバーによって調整できます。垂直自動スケーリングを設定するには、サービスの **設定** タブに移動し、以下のように最小および最大メモリ、CPU 設定を調整します。 - -:::note -単一のレプリカサービスは、すべての階層でスケーリングできません。 -::: - -Scaling settings page - -レプリカの **最大メモリ** を **最小メモリ** より高い値に設定してください。これにより、サービスはその範囲内で必要に応じてスケールします。これらの設定は、初期サービス作成フロー中にも利用可能です。サービス内の各レプリカには、同じメモリおよびCPUリソースが割り当てられます。 - -これらの値を同じに設定することもでき、実質的にサービスを特定の構成に「ピン留め」することが可能です。そうすることで、選択したサイズに即座にスケーリングを強制します。 - -これを行うと、クラスター内の自動スケーリングが無効になり、これらの設定を超えるCPU またはメモリ使用量の増加からサービスを保護することができなくなります。 - -:::note -Enterprise 階層サービスでは、標準 1:4 プロファイルが垂直自動スケーリングをサポートします。カスタムプロファイルは、立ち上げ時には垂直自動スケーリングや手動の垂直スケーリングをサポートしません。ただし、サポートに連絡することでこれらのサービスを垂直にスケーリングできます。 -::: - -## 手動水平スケーリング {#manual-horizontal-scaling} - - - -ClickHouse Cloud の [公開API](https://clickhouse.com/docs/cloud/manage/api/swagger#/paths/~1v1~1organizations~1:organizationId~1services~1:serviceId~1scaling/patch) を使用して、サービスのスケーリング設定を更新したり、クラウドコンソールからレプリカの数を調整したりできます。 - -**Scale** および **Enterprise** 階層は、単一レプリカサービスをサポートしています。ただし、これらの階層のサービスが複数のレプリカで開始する場合、または複数のレプリカにスケールアウトする場合、最小 `2` レプリカに戻すことしかできません。 - -:::note -サービスは最大20レプリカまで水平スケーリングできます。追加のレプリカが必要な場合は、サポートチームにご連絡ください。 -::: - -### API経由の水平スケーリング {#horizontal-scaling-via-api} - -クラスターを水平にスケーリングするには、APIを介して `PATCH` リクエストを発行してレプリカの数を調整します。以下のスクリーンショットは、`3` レプリカクラスターを `6` レプリカにスケールアウトするためのAPI呼び出しを示し、対応するレスポンスを示しています。 - -Scaling PATCH request - -*`numReplicas` を更新するための `PATCH` リクエスト* - -Scaling PATCH response - -*`PATCH` リクエストのレスポンス* - -新しいスケーリングリクエストを発行するか、1つのリクエストが進行中の状態で複数のリクエストを連続して発行した場合、スケーリングサービスは中間状態を無視し、最終的なレプリカ数に収束します。 - -### UIを介した水平スケーリング {#horizontal-scaling-via-ui} - -UIからサービスを水平にスケーリングするには、サービスの **設定** ページでレプリカの数を調整することができます。 - -Scaling configuration settings - -*ClickHouse Cloudコンソールからのサービススケーリング設定* - -サービスがスケールした後、クラウドコンソールのメトリクスダッシュボードにはサービスへの正しい割り当てが表示されるべきです。以下のスクリーンショットは、クラスターが合計メモリ `96 GiB`、すなわち `6` レプリカで、各レプリカに `16 GiB` のメモリ割り当てがあることを示しています。 - -Scaling memory allocation - -## 自動アイドル状態 {#automatic-idling} -**設定** ページでは、サービスが非アクティブなときに自動アイドルを許可するかどうかを選択できます(すなわち、サービスがユーザーが送信したクエリを実行していないとき)。自動アイドルは、サービスが一時停止している間、コンピューティングリソースに対する料金が発生しないため、コストを削減します。 - -:::note -特定の特別なケース、たとえばサービスの部品が多数ある場合、自動アイドルにはならないことがあります。 - -サービスはアイドル状態に入ることがあり、その場合は [更新可能なマテリアライズドビュー](/materialized-view/refreshable-materialized-view) のリフレッシュ、[S3Queue](/engines/table-engines/integrations/s3queue) からの消費、そして新しいマージのスケジュールが一時停止されます。既存のマージ操作は、サービスがアイドル状態に移行する前に完了します。更新可能なマテリアライズドビューと S3Queue の消費が継続的に行われるようにするには、アイドル状態機能を無効にしてください。 -::: - -:::danger 自動アイドルを使用しないべき時 -自動アイドルは、クエリに応答するのに遅延を処理できるユースケースの場合のみ使用してください。サービスが一時停止している間、サービスへの接続はタイムアウトします。自動アイドルは、あまり頻繁に使用されず、遅延に耐えられる場合のサービスに最適です。顧客向けの機能を提供するサービスには推奨されません。 -::: - -## 突発的なワークロードの処理 {#handling-bursty-workloads} -今後のワークロードの急増が予想される場合は、[ClickHouse Cloud API](/cloud/manage/api/api-overview) を使用して、急増を処理するためにサービスを事前にスケールアップし、需要が収まったらスケールダウンできます。 - -各レプリカで現在使用中の CPU コアとメモリを理解するには、以下のクエリを実行できます: - -```sql -SELECT * -FROM clusterAllReplicas('default', view( - SELECT - hostname() AS server, - anyIf(value, metric = 'CGroupMaxCPU') AS cpu_cores, - formatReadableSize(anyIf(value, metric = 'CGroupMemoryTotal')) AS memory - FROM system.asynchronous_metrics -)) -ORDER BY server ASC -SETTINGS skip_unavailable_shards = 1 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md.hash deleted file mode 100644 index a1d85821baa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/scaling.md.hash +++ /dev/null @@ -1 +0,0 @@ -eba9287973ae49fc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md deleted file mode 100644 index 44ed193b737..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -sidebar_label: 'サービスの稼働時間とSLA' -slug: '/cloud/manage/service-uptime' -title: 'サービスの稼働時間' -description: 'ユーザーは今、ステータスページで地域ごとの稼働時間を確認し、サービスの障害に関するアラートを購読できます。' ---- - - - -## Uptime Alerts {#uptime-alerts} - -ユーザーは現在、[ステータスページ](https://status.clickhouse.com/)で地域の稼働時間を確認し、サービスの中断に関するアラートの購読が可能です。 - -## SLA {#sla} - -選択されたコミットメント支出契約に対してサービスレベルアグリーメントも提供しています。SLAポリシーの詳細については、[sales@clickhouse.com](mailto:sales@clickhouse.com)までお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md.hash deleted file mode 100644 index a541b95f316..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/service-uptime.md.hash +++ /dev/null @@ -1 +0,0 @@ -df9f75d445319ad4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md deleted file mode 100644 index 0cb38973a30..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_label: '設定の構成' -slug: '/manage/settings' -title: '設定の構成' -description: '特定のユーザーまたはロールのためにClickHouse Cloudサービスの設定を構成する方法' ---- - -import Image from '@theme/IdealImage'; -import cloud_settings_sidebar from '@site/static/images/cloud/manage/cloud-settings-sidebar.png'; - - -# 設定の構成 - -特定の [ユーザー](/operations/access-rights#user-account-management) または [ロール](/operations/access-rights#role-management) のために ClickHouse Cloud サービスの設定を指定するには、[SQL駆動の設定プロファイル](/operations/access-rights#settings-profiles-management) を使用する必要があります。 設定プロファイルを適用することで、サービスが停止したりアイドル状態になったり、アップグレードされたりしても、構成した設定が持続することが保証されます。 設定プロファイルについて詳しく知りたい方は、[こちらのページ](/operations/settings/settings-profiles.md)をご覧ください。 - -XMLベースの設定プロファイルと [構成ファイル](/operations/configuration-files.md) は、現在 ClickHouse Cloud ではサポートされていないことに注意してください。 - -ClickHouse Cloud サービスのために指定できる設定について詳しく知りたい方は、[当社のドキュメント](/operations/settings)でカテゴリごとに可能な設定をすべて確認してください。 - -Cloud settings sidebar diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md.hash deleted file mode 100644 index 4228941c7ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/settings.md.hash +++ /dev/null @@ -1 +0,0 @@ -62696f3f9aa6bcf4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md deleted file mode 100644 index 651d0dfb1a1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -sidebar_label: '請求トラブルシューティング' -slug: '/manage/troubleshooting-billing-issues' -title: '請求トラブルシューティング' -description: '一般的な請求の問題のトラブルシューティング記事' ---- - -import Image from '@theme/IdealImage'; -import trial_expired from '@site/static/images/cloud/manage/trial-expired.png'; - - -# 請求に関する問題のトラブルシューティング - -## 動作しない支払い詳細の修正 {#fixing-non-working-payment-details} - -ClickHouse Cloudの使用には、アクティブで動作するクレジットカードが必要です。試用期間が終了した後や、最後の成功した支払いの後の30日間、サービスは継続して実行されます。ただし、有効なクレジットカードに対して請求できない場合、クラウドコンソールの機能はあなたの組織に対して制限されます。 - -**試用期間が終了してから30日以内または最後の成功した支払いから30日以内に有効なクレジットカードが追加されない場合、データは削除されます。** - -支払いの詳細に問題がある場合やクレジットカードを追加できない場合は、[サポートチームにご連絡ください](https://clickhouse.com/support/program)。 - -
- -試用期間満了 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md.hash deleted file mode 100644 index ab1077a2104..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/troubleshooting-billing-issues.md.hash +++ /dev/null @@ -1 +0,0 @@ -ffff537954b3cd1d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md deleted file mode 100644 index 13425d9a0cf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -sidebar_label: 'アップグレード' -slug: '/manage/updates' -title: 'Upgrades' -description: 'ClickHouse Cloudを使用すると、パッチ適用とアップグレードの心配はありません。定期的に修正、新機能、パフォーマンスの改善を含むアップグレードを展開します。' ---- - -import Image from '@theme/IdealImage'; -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' -import fast_release from '@site/static/images/cloud/manage/fast_release.png'; -import enroll_fast_release from '@site/static/images/cloud/manage/enroll_fast_release.png'; -import scheduled_upgrades from '@site/static/images/cloud/manage/scheduled_upgrades.png'; -import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled_upgrade_window.png'; - - -# アップグレード - -ClickHouse Cloudでは、パッチやアップグレードについて心配する必要はありません。修正、新機能、パフォーマンスの改善を含むアップグレードを定期的に展開します。ClickHouseの新機能の完全なリストについては、[Cloudの変更履歴](/cloud/reference/changelog.md)を参照してください。 - -:::note -新しいアップグレードメカニズム「make before break」(またはMBB)を導入します。この新しいアプローチでは、アップグレード操作中に古いレプリカを削除する前に、更新されたレプリカを追加します。これにより、稼働中のワークロードに対する中断が少ない、よりシームレスなアップグレードが実現します。 - -この変更の一環として、歴史的なシステムテーブルデータは、アップグレードイベントの一環として最大30日間保持されます。また、AWSまたはGCP上のサービスにおいては2024年12月19日以前、Azure上のサービスにおいては2025年1月14日以前のシステムテーブルデータは、新しい組織ティアへの移行の一部として保持されません。 -::: - -## バージョン互換性 {#version-compatibility} - -サービスを作成すると、[`compatibility`](/operations/settings/settings#compatibility) 設定は、サービスが最初にプロビジョニングされた時点でClickHouse Cloudが提供する最新のClickHouseバージョンに設定されます。 - -`compatibility`設定を使用すると、以前のバージョンからの設定のデフォルト値を使用できます。サービスが新しいバージョンにアップグレードされるとき、`compatibility`設定に指定されているバージョンは変更されません。これは、サービスを最初に作成したときに存在した設定のデフォルト値は変更されないことを意味しています(すでにデフォルト値を上書きしている場合は、その場合でもアップグレード後に持続します)。 - -サービスの`compatibility`設定を管理することはできません。`compatibility`設定のバージョンを変更したい場合は、[サポートに連絡](https://clickhouse.com/support/program)する必要があります。 - -## メンテナンスモード {#maintenance-mode} - -時には、サービスを更新する必要があり、そのためにスケーリングやアイドルなどの特定の機能を無効にする必要がある場合があります。珍しいケースとして、問題を経験しているサービスに対してアクションを取る必要があり、サービスを健康な状態に戻す必要があります。そのようなメンテナンス中は、「メンテナンス進行中」というバナーがサービスページに表示されます。この間でもクエリとしてサービスを使用できる場合があります。 - -サービスがメンテナンスを受けている間は、料金は発生しません。_メンテナンスモード_は珍しいケースであり、通常のサービスアップグレードと混同しないでください。 - -## リリースチャネル(アップグレードスケジュール) {#release-channels-upgrade-schedule} - -特定のリリースチャネルに登録することにより、ClickHouse Cloudサービスのアップグレードスケジュールを指定できます。 - -### ファストリリースチャネル(早期アップグレード) {#fast-release-channel-early-upgrades} - - - -通常のアップグレードスケジュールに加えて、サービスが通常のリリーススケジュールの前に更新を受け取ることを希望する場合、**ファストリリース**チャネルを提供しています。 - -具体的には、サービスは以下を行います: - -- 最新のClickHouseリリースを受信する -- 新しいリリースがテストされると、より頻繁にアップグレードが行われる - -サービスのリリーススケジュールは、下記のCloudコンソールで変更できます。 - -
- プランの選択 -
-
- -
- プランの選択 -
-
- -この**ファストリリース**チャネルは、重要でない環境で新機能をテストするために適しています。**厳格な稼働時間と信頼性の要件を持つ本番ワークロードには推奨されません。** - -### レギュラーリリースチャネル {#regular-release-channel} - -リリースチャネルやアップグレードスケジュールが設定されていないすべてのスケールおよびエンタープライズティアサービスについては、アップグレードはレギュラーチャネルリリースの一部として実施されます。これは本番環境に推奨されます。 - -レギュラーリリースチャネルへのアップグレードは、通常**ファストリリースチャネル**の2週間後に実施されます。 - -:::note -ベーシックティアのサービスは、ファストリリースチャネルの直後にアップグレードされます。 -::: - -## スケジュールされたアップグレード {#scheduled-upgrades} - - - -ユーザーは、エンタープライズティアのサービスに対してアップグレードウィンドウを設定できます。 - -アップグレードスケジュールを指定したいサービスを選択し、左側のメニューから`設定`を選択します。`スケジュールされたアップグレード`までスクロールします。 - -
- スケジュールされたアップグレード -
-
- -このオプションを選択すると、ユーザーはデータベースおよびクラウドのアップグレードの曜日/時間帯を選択できます。 - -
- スケジュールされたアップグレードウィンドウ -
-
-:::note -スケジュールされたアップグレードは定義されたスケジュールに従いますが、重要なセキュリティパッチおよび脆弱性修正については例外が適用されます。緊急のセキュリティ問題が特定された場合、スケジュールされたウィンドウ外でアップグレードが行われる場合があります。そのような例外については、必要に応じて顧客に通知されます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md.hash deleted file mode 100644 index 43b560d2dca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/manage/upgrades.md.hash +++ /dev/null @@ -1 +0,0 @@ -67cae2c42246e669 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/_category_.yml deleted file mode 100644 index 79520c71142..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: 'Cloud Reference' -position: 1 -collapsible: true -collapsed: true -link: - type: generated-index - title: Cloud Reference diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md deleted file mode 100644 index 48f69a059e0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -sidebar_label: 'Architecture' -slug: '/cloud/reference/architecture' -title: 'ClickHouse Cloud Architecture' -description: 'This page describes the architecture of ClickHouse Cloud' ---- - -import Architecture from '@site/static/images/cloud/reference/architecture.svg'; - - -# ClickHouse Cloudアーキテクチャ - - - -## オブジェクトストアに支えられたストレージ {#storage-backed-by-object-store} -- 実質的に無限のストレージ -- データを手動で共有する必要がない -- 特に頻繁にアクセスされないデータの保存に対して、データの保存コストが大幅に低くなる - -## コンピュート {#compute} -- 自動スケーリングとアイドル状態: 事前にサイズを決める必要がなく、ピーク使用時に過剰にプロビジョニングする必要もない -- 自動アイドル状態と再開: 誰も使用していない間、未使用のコンピュートを稼働させる必要がない -- デフォルトでセキュアで高可用性 - -## 管理 {#administration} -- セットアップ、監視、バックアップ、請求は全て自動で行われる。 -- コスト管理機能はデフォルトで有効になっており、Cloudコンソールを通じて調整可能。 - -## サービスの隔離 {#service-isolation} - -### ネットワーク隔離 {#network-isolation} - -全てのサービスはネットワーク層で隔離されている。 - -### コンピュート隔離 {#compute-isolation} - -全てのサービスはそれぞれのKubernetesスペースの個別のポッドに展開され、ネットワークレベルでの隔離が行われている。 - -### ストレージ隔離 {#storage-isolation} - -全てのサービスは共有バケット(AWS、GCP)またはストレージコンテナ(Azure)の別々のサブパスを使用する。 - -AWSの場合、ストレージへのアクセスはAWS IAMを介して制御されており、各IAMロールはサービスごとにユニークである。エンタープライズサービスの場合、[CMEK](/cloud/security/cmek)を有効にすることで、静止データに対して高度なデータ隔離を提供できる。CMEKは現時点ではAWSサービスのみサポートされている。 - -GCPおよびAzureの場合、サービスはオブジェクトストレージの隔離を持っている(全てのサービスはそれぞれのバケットまたはストレージコンテナを持つ)。 - -## コンピュートの分離 {#compute-compute-separation} -[コンピュートの分離](/cloud/reference/warehouses)により、ユーザーはそれぞれ独自のサービスURLを持つ複数のコンピュートノードグループを作成でき、全てが同じ共有オブジェクトストレージを使用します。これにより、同じデータを共有する読み取りと書き込みといった異なるユースケースのコンピュート隔離が可能になります。また、必要に応じてコンピュートグループの独立したスケーリングを許可することで、リソースの効率的な利用も促進します。 - -## 同時実行制限 {#concurrency-limits} - -ClickHouse Cloudサービスにおいて、1秒あたりのクエリ数(QPS)には制限がありません。ただし、各レプリカに対して最大1000の同時クエリの制限があります。QPSは最終的には平均クエリ実行時間とサービス内のレプリカ数の関数です。 - -セルフマネージドのClickHouseインスタンスや他のデータベース/データウェアハウスに比べ、ClickHouse Cloudの大きな利点は、[レプリカを追加することで同時実行性を簡単に増加させることができる(水平スケーリング)](/manage/scaling#manual-horizontal-scaling)点です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md.hash deleted file mode 100644 index 238530d300c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/architecture.md.hash +++ /dev/null @@ -1 +0,0 @@ -4864ed1266505adc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md deleted file mode 100644 index 60ef8a19d15..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'BYOC (Bring Your Own Cloud) for AWS' -slug: '/cloud/reference/byoc' -sidebar_label: 'BYOC (Bring Your Own Cloud)' -keywords: -- 'BYOC' -- 'cloud' -- 'bring your own cloud' -description: 'Deploy ClickHouse on your own cloud infrastructure' ---- - -import Image from '@theme/IdealImage'; -import byoc1 from '@site/static/images/cloud/reference/byoc-1.png'; -import byoc4 from '@site/static/images/cloud/reference/byoc-4.png'; -import byoc3 from '@site/static/images/cloud/reference/byoc-3.png'; -import byoc_vpcpeering from '@site/static/images/cloud/reference/byoc-vpcpeering-1.png'; -import byoc_vpcpeering2 from '@site/static/images/cloud/reference/byoc-vpcpeering-2.png'; -import byoc_vpcpeering3 from '@site/static/images/cloud/reference/byoc-vpcpeering-3.png'; -import byoc_vpcpeering4 from '@site/static/images/cloud/reference/byoc-vpcpeering-4.png'; -import byoc_plb from '@site/static/images/cloud/reference/byoc-plb.png'; -import byoc_security from '@site/static/images/cloud/reference/byoc-securitygroup.png'; -import byoc_inbound from '@site/static/images/cloud/reference/byoc-inbound-rule.png'; - - -## 概要 {#overview} - -BYOC (Bring Your Own Cloud) を使用すると、独自のクラウドインフラストラクチャに ClickHouse Cloud をデプロイできます。これは、ClickHouse Cloud のマネージドサービスを利用することを妨げる特定の要件や制約がある場合に便利です。 - -**アクセスをご希望の場合は、[お問い合わせ](https://clickhouse.com/cloud/bring-your-own-cloud)ください。** 詳細情報については、[利用規約](https://clickhouse.com/legal/agreements/terms-of-service)をご参照ください。 - -BYOCは現在、AWS のみサポートされています。 GCP および Azure の待機リストには、[こちらから](https://clickhouse.com/cloud/bring-your-own-cloud)参加できます。 - -:::note -BYOCは大規模なデプロイメント専用に設計されており、顧客に対して契約を締結することが求められます。 -::: - -## 用語集 {#glossary} - -- **ClickHouse VPC:** ClickHouse Cloud 所有の VPC です。 -- **Customer BYOC VPC:** 顧客のクラウドアカウントが所有し、ClickHouse Cloud によってプロビジョニングおよび管理される VPC で、ClickHouse Cloud BYOC デプロイメント専用です。 -- **Customer VPC:** 顧客のクラウドアカウントによって所有され、Customer BYOC VPC に接続が必要なアプリケーション用の他の VPC です。 - -## アーキテクチャ {#architecture} - -メトリクスとログは、顧客の BYOC VPC 内に保存されます。ログは現在、EBS 内にローカルで保存されています。将来的な更新では、ログは顧客の BYOC VPC 内の ClickHouse サービスである LogHouse に保存されます。メトリクスは、顧客の BYOC VPC 内にローカルに保存された Prometheus および Thanos スタックを介して実装されます。 - -
- -BYOC アーキテクチャ - -
- -## オンボーディングプロセス {#onboarding-process} - -顧客は、[こちらから](https://clickhouse.com/cloud/bring-your-own-cloud) お問い合わせいただくことで、オンボーディングプロセスを開始できます。顧客は専用の AWS アカウントを持ち、使用するリージョンを把握している必要があります。現在、ClickHouse Cloud に対してサポートしているリージョンのみで BYOC サービスを起動できるようになっています。 - -### 専用の AWS アカウントを準備する {#prepare-a-dedicated-aws-account} - -顧客は、ClickHouse BYOC デプロイメントのホスティング用に専用の AWS アカウントを準備する必要があります。これにより、より良い分離が確保されます。これと初期の組織管理者のメールを用いて、ClickHouse サポートに連絡することができます。 - -### CloudFormation テンプレートを適用する {#apply-cloudformation-template} - -BYOC セットアップは、[CloudFormation スタック](https://s3.us-east-2.amazonaws.com/clickhouse-public-resources.clickhouse.cloud/cf-templates/byoc.yaml)を介して初期化され、これにより BYOC コントローラーがインフラストラクチャを管理できるようにするのみのロールが作成されます。ClickHouse を実行するための S3、VPC、コンピュートリソースはこのスタックには含まれていません。 - - - -### BYOC インフラストラクチャを設定する {#setup-byoc-infrastructure} - -CloudFormation スタックを作成した後、クラウドコンソールから S3、VPC、および EKS クラスターを含むインフラストラクチャの設定が求められます。この段階で特定の設定を決定する必要があります。なぜなら、後から変更することができないからです。具体的には: - -- **使用したいリージョン**: ClickHouse Cloud のために用意された任意の[公開リージョン](/cloud/reference/supported-regions)から選択できます。 -- **BYOC の VPC CIDR 範囲**: デフォルトでは、BYOC VPC CIDR 範囲には `10.0.0.0/16` を使用します。別のアカウントとの VPC ピアリングを使用する予定がある場合は、CIDR 範囲が重複しないようにしてください。BYOC 用に適切な CIDR 範囲を割り当て、必要なワークロードを収容できる最小サイズである `/22` を使用してください。 -- **BYOC VPC のアベイラビリティゾーン**: VPC ピアリングを使用する場合、ソースアカウントと BYOC アカウント間でアベイラビリティゾーンを合わせることで、クロス AZ トラフィックコストを削減できます。AWS では、アベイラビリティゾーンのサフィックス(`a, b, c`)はアカウント間で異なる物理ゾーン ID を表す場合があります。詳細は[AWS ガイド](https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/use-consistent-availability-zones-in-vpcs-across-different-aws-accounts.html)を参照してください。 - -### オプション: VPC ピアリングを設定する {#optional-setup-vpc-peering} - -ClickHouse BYOC のために VPC ピアリングを作成または削除するには、以下の手順に従います: - -#### ステップ 1 ClickHouse BYOC のためにプライベートロードバランサーを有効にする {#step-1-enable-private-load-balancer-for-clickhouse-byoc} -ClickHouse サポートに連絡してプライベートロードバランサーを有効にしてください。 - -#### ステップ 2 ピアリング接続を作成する {#step-2-create-a-peering-connection} -1. ClickHouse BYOC アカウントの VPC ダッシュボードに移動します。 -2. ピアリング接続を選択します。 -3. ピアリング接続を作成するをクリックします。 -4. VPC リクエスターを ClickHouse VPC ID に設定します。 -5. VPC アクセプターをターゲット VPC ID に設定します。(該当する場合は他のアカウントを選択してください) -6. ピアリング接続を作成するをクリックします。 - -
- -BYOC ピアリング接続の作成 - -
- -#### ステップ 3 ピアリング接続要求を承認する {#step-3-accept-the-peering-connection-request} -ピアリングアカウントに移動し、(VPC -> ピアリング接続 -> アクション -> 要求を承認) ページで顧客はこの VPC ピアリング要求を承認できます。 - -
- -BYOC ピアリング接続の承認 - -
- -#### ステップ 4 ClickHouse VPC ルートテーブルに宛先を追加する {#step-4-add-destination-to-clickhouse-vpc-route-tables} -ClickHouse BYOC アカウントで、 -1. VPC ダッシュボードのルートテーブルを選択します。 -2. ClickHouse VPC ID を検索します。プライベートサブネットに関連付けられた各ルートテーブルを編集します。 -3. ルートタブの下にある編集ボタンをクリックします。 -4. 別のルートを追加をクリックします。 -5. 宛先の CIDR 範囲にターゲット VPC の CIDR 範囲を入力します。 -6. 「ピアリング接続」を選択し、ターゲットのピアリング接続 ID を選択します。 - -
- -BYOC ルートテーブルに追加 - -
- -#### ステップ 5 ターゲット VPC ルートテーブルに宛先を追加する {#step-5-add-destination-to-the-target-vpc-route-tables} -ピアリングされた AWS アカウントで、 -1. VPC ダッシュボードのルートテーブルを選択します。 -2. ターゲット VPC ID を検索します。 -3. ルートタブの下にある編集ボタンをクリックします。 -4. 別のルートを追加をクリックします。 -5. 宛先に ClickHouse VPC の CIDR 範囲を入力します。 -6. 「ピアリング接続」を選択し、ターゲットのピアリング接続 ID を選択します。 - -
- -BYOC ルートテーブルに追加 - -
- -#### ステップ 6 ピアード VPC アクセスを許可するためにセキュリティグループを編集する {#step-6-edit-security-group-to-allow-peered-vpc-access} -ClickHouse BYOC アカウントで、 -1. ClickHouse BYOC アカウントで、EC2 に移動し、infra-xx-xxx-ingress-private のような名前のプライベートロードバランサーを見つけます。 - -
- -BYOC プライベートロードバランサー - -
- -2. 詳細ページのセキュリティタブの下に、`k8s-istioing-istioing-xxxxxxxxx` のような命名パターンに従う関連付けられたセキュリティグループを見つけます。 - -
- -BYOC プライベートロードバランサーのセキュリティグループ - -
- -3. このセキュリティグループのインバウンドルールを編集し、ピアリングされた VPC CIDR 範囲を追加します(または、必要に応じて必要な CIDR 範囲を指定します)。 - -
- -BYOC セキュリティグループのインバウンドルール - -
- ---- -ClickHouse サービスは、ピアリングされた VPC からアクセス可能になるはずです。 - -ClickHouse にプライベートにアクセスするために、ユーザーのピアリングされた VPC からの安全な接続のために、プライベートロードバランサーとエンドポイントがプロビジョニングされます。プライベートエンドポイントは、`-private` サフィックスを持つ公開エンドポイントフォーマットに従います。例えば: -- **公開エンドポイント**: `h5ju65kv87.mhp0y4dmph.us-west-2.aws.byoc.clickhouse.cloud` -- **プライベートエンドポイント**: `h5ju65kv87-private.mhp0y4dmph.us-west-2.aws.byoc.clickhouse.cloud` - -オプションとして、ピアリングが正常に機能していることを確認した後、ClickHouse BYOC の公開ロードバランサーの削除をリクエストできます。 - -## アップグレードプロセス {#upgrade-process} - -私たちは定期的にソフトウェアをアップグレードしており、ClickHouse データベースバージョンのアップグレード、ClickHouse オペレーター、EKS、その他のコンポーネントが含まれます。 - -シームレスなアップグレード(例:ローリングアップグレードや再起動)を目指していますが、ClickHouse バージョンの変更や EKS ノードのアップグレードに関してはサービスに影響を与える可能性があります。顧客はメンテナンスウィンドウ(例:毎週火曜日午前1:00 PDT)を指定でき、それによりそのようなアップグレードはスケジュールされた時間のみ実施されます。 - -:::note -メンテナンスウィンドウは、セキュリティや脆弱性の修正には適用されません。これらは、オフサイクルアップグレードとして扱われ、適切な時間を調整し業務への影響を最小限に抑えるための迅速なコミュニケーションが行われます。 -::: - -## CloudFormation IAM ロール {#cloudformation-iam-roles} - -### ブートストラップ IAM ロール {#bootstrap-iam-role} - -ブートストラップ IAM ロールには以下の権限があります: - -- **EC2 および VPC 操作**: VPC および EKS クラスターの設定に必要です。 -- **S3 操作 (例:`s3:CreateBucket`)**: ClickHouse BYOC ストレージ用のバケットを作成するために必要です。 -- **`route53:*` 権限**: Route 53 にレコードを構成するための外部 DNS に必要です。 -- **IAM 操作 (例:`iam:CreatePolicy`)**: コントローラーが追加のロールを作成するために必要です(詳細は次のセクションを参照)。 -- **EKS 操作**: `clickhouse-cloud` プレフィックスで始まる名前のリソースに制限されます。 - -### コントローラーによって作成される追加の IAM ロール {#additional-iam-roles-created-by-the-controller} - -CloudFormation を介して作成された `ClickHouseManagementRole` に加えて、コントローラーはさらにいくつかのロールを作成します。 - -これらのロールは、顧客の EKS クラスター内で実行されているアプリケーションによって想定されます: -- **State Exporter Role** - - ClickHouse コンポーネントが ClickHouse Cloud にサービスのヘルス情報を報告します。 - - ClickHouse Cloud 所有の SQS キューに書き込む権限が必要です。 -- **Load-Balancer Controller** - - 標準の AWS ロードバランサーコントローラーです。 - - ClickHouse サービス用ボリュームを管理するための EBS CSI コントローラーです。 -- **External-DNS** - - DNS 構成を Route 53 に配布します。 -- **Cert-Manager** - - BYOC サービスドメイン用の TLS 証明書をプロビジョニングします。 -- **Cluster Autoscaler** - - 必要に応じてノードグループのサイズを調整します。 - -**K8s-control-plane** および **k8s-worker** ロールは AWS EKS サービスによって想定されます。 - -最後に、**`data-plane-mgmt`** により ClickHouse Cloud コントロールプレーンコンポーネントは、`ClickHouseCluster` および Istio の仮想サービス/ゲートウェイのような必要なカスタムリソースを調整できるようになります。 - -## ネットワーク境界 {#network-boundaries} - -このセクションでは、顧客 BYOC VPC へのネットワークトラフィックと顧客 BYOC VPC からのトラフィックの異なる形式について説明します: - -- **インバウンド**: 顧客 BYOC VPC に入ってくるトラフィック。 -- **アウトバウンド**: 顧客 BYOC VPC から発生し、外部の宛先に送信されるトラフィック。 -- **パブリック**: 公共のインターネットからアクセス可能なネットワークエンドポイント。 -- **プライベート**: VPC ピアリングや VPC プライベートリンク、Tailscale のようなプライベート接続を介してのみアクセス可能なネットワークエンドポイント。 - -**Istio ingress は AWS NLB の背後にデプロイされ、ClickHouse クライアントトラフィックを受け入れます。** - -*インバウンド、パブリック (プライベートとなる場合もある)* - -Istio ingress ゲートウェイは TLS を終了します。Let's Encrypt によって CertManager でプロビジョニングされた証明書は、EKS クラスター内のシークレットとして保存されます。Istio と ClickHouse 間のトラフィックは[AWS](https://docs.aws.amazon.com/whitepapers/latest/logical-separation/encrypting-data-at-rest-and--in-transit.html#:~:text=All%20network%20traffic%20between%20AWS,supported%20Amazon%20EC2%20instance%20types) によって暗号化されており、同じ VPC 内に存在するためです。 - -デフォルトでは、インバウンドは IP アロウリストフィルタリングでパブリックにアクセス可能です。顧客は VPC ピアリングを構成してプライベートにし、公共の接続を無効にすることができます。[IP フィルター](/cloud/security/setting-ip-filters)を設定してアクセスを制限することを強くお勧めします。 - -### アクセスのトラブルシューティング {#troubleshooting-access} - -*インバウンド、パブリック (プライベートとなる場合もある)* - -ClickHouse Cloud エンジニアは Tailscale 経由でトラブルシューティングアクセスを必要とします。彼らは BYOC デプロイメントのために、Just-in-Time の証明書ベースの認証をプロビジョニングされています。 - -### 請求スクリーパー {#billing-scraper} - -*アウトバウンド、プライベート* - -請求スクリーパーは ClickHouse から請求データを収集し、それを ClickHouse Cloud 所有の S3 バケットに送信します。 - -これは ClickHouse サーバーコンテナと一緒にサイドカーとして実行され、定期的に CPU およびメモリメトリクスをスクレイピングします。同じリージョン内のリクエストは、VPC ゲートウェイサービスエンドポイントを介してルーティングされます。 - -### アラート {#alerts} - -*アウトバウンド、パブリック* - -AlertManager は、顧客の ClickHouse クラスターが正常でない場合に ClickHouse Cloud にアラートを送信するように構成されています。 - -メトリクスとログは、顧客の BYOC VPC に保存されます。ログは現在、EBS 内でローカルに保存されています。将来的な更新では、BYOC VPC 内の ClickHouse サービスである LogHouse に保存される予定です。メトリクスは、BYOC VPC 内でローカルに保存された Prometheus および Thanos スタックを利用します。 - -### サービス状態 {#service-state} - -*アウトバウンド* - -State Exporter は、ClickHouse サービス状態情報を ClickHouse Cloud 所有の SQS に送信します。 - -## 機能 {#features} - -### サポートされている機能 {#supported-features} - -- **SharedMergeTree**: ClickHouse Cloud と BYOC は同じバイナリと構成を使用しています。したがって、SharedMergeTree などの ClickHouse コアのすべての機能が BYOC でサポートされています。 -- **サービス状態を管理するためのコンソールアクセス**: - - 開始、停止、および終了などの操作をサポートします。 - - サービスと状態を表示できます。 -- **バックアップと復元。** -- **手動の垂直および水平方向のスケーリング。** -- **アイドル。** -- **倉庫**: コンピュートとコンピュートの分離 -- **Tailscale を介したゼロトラストネットワーク。** -- **モニタリング**: - - クラウドコンソールには、サービスヘルスのモニタリング用の組み込みヘルスダッシュボードが含まれています。 - - Prometheus、Grafana、Datadog との中央集計モニタリング用の Prometheus スクレイピング。設定手順については、[Prometheus ドキュメント](/integrations/prometheus)を参照してください。 -- **VPC ピアリング。** -- **統合**: [このページ](/integrations)に完全なリストがあります。 -- **安全な S3。** -- **[AWS PrivateLink](https://aws.amazon.com/privatelink/)。** - -### 計画中の機能 (現在サポートされていません) {#planned-features-currently-unsupported} - -- [AWS KMS](https://aws.amazon.com/kms/) 別名 CMEK (顧客管理暗号化キー) -- インジェスト用の ClickPipes -- オートスケーリング -- MySQL インターフェース - -## FAQ {#faq} - -### コンピュート {#compute} - -#### この単一の EKS クラスターに複数のサービスを作成できますか? {#can-i-create-multiple-services-in-this-single-eks-cluster} - -はい。インフラストラクチャは、すべての AWS アカウントとリージョンの組み合わせについて一度だけプロビジョニングされる必要があります。 - -### BYOC のサポートリージョンはどこですか? {#which-regions-do-you-support-for-byoc} - -BYOC は ClickHouse Cloud と同じセットの [リージョン](/cloud/reference/supported-regions#aws-regions ) をサポートしています。 - -#### リソースのオーバーヘッドはありますか? ClickHouse インスタンス以外のサービスを実行するために必要なリソースは何ですか? {#will-there-be-some-resource-overhead-what-are-the-resources-needed-to-run-services-other-than-clickhouse-instances} - -ClickHouse インスタンス (ClickHouse サーバーと ClickHouse Keeper) の他に、`clickhouse-operator`、`aws-cluster-autoscaler`、Istio などのサービスが実行され、モニタリングスタックも実行されます。 - -現在、これらのワークロードを実行するために、専用のノードグループに 3 つの m5.xlarge ノード (各 AZ に 1 つ) を持っています。 - -### ネットワークとセキュリティ {#network-and-security} - -#### 設定完了後にインストール中に設定した権限を取り消すことはできますか? {#can-we-revoke-permissions-set-up-during-installation-after-setup-is-complete} - -現時点ではこれは不可能です。 - -#### ClickHouse エンジニアがトラブルシューティングのために顧客インフラにアクセスするための将来のセキュリティコントロールを検討していますか? {#have-you-considered-some-future-security-controls-for-clickhouse-engineers-to-access-customer-infra-for-troubleshooting} - -はい。顧客がエンジニアのクラスターアクセスを承認できる顧客制御のメカニズムの実装は私たちのロードマップ上にあります。現時点では、エンジニアはクラスタへの十分なアクセスを得るために、内部のエスカレーションプロセスを経なければなりません。これは、私たちのセキュリティチームによって記録され、監査されています。 - -#### 作成された VPC IP 範囲のサイズはどのくらいですか? {#what-is-the-size-of-the-vpc-ip-range-created} - -デフォルトでは、BYOC VPC には `10.0.0.0/16` を使用します。将来的なスケーリング可能性のために最低でも /22 を予約することをお勧めしますが、サイズを制限したい場合は、30 サーバーポッドに制限される可能性が高い場合に限り /23 を使用することが可能です。 - -#### メンテナンスの頻度を決定できますか? {#can-i-decide-maintenance-frequency} - -サポートに連絡してメンテナンスウィンドウをスケジュールしてください。少なくとも週間での更新スケジュールを期待してください。 - -## 可視性 {#observability} - -### 組み込みのモニタリングツール {#built-in-monitoring-tools} - -#### 可視性ダッシュボード {#observability-dashboard} - -ClickHouse Cloud は、メモリ使用量、クエリレート、I/O などのメトリクスを表示する高度な可視性ダッシュボードを備えています。これは、ClickHouse Cloud ウェブコンソールインターフェースの **モニタリング** セクションからアクセスできます。 - -
- -可視性ダッシュボード - -
- -#### 高度なダッシュボード {#advanced-dashboard} - -`system.metrics`、`system.events`、`system.asynchronous_metrics` などのシステムテーブルからのメトリクスを使用してダッシュボードをカスタマイズし、サーバーのパフォーマンスやリソース利用率を詳細に監視できます。 - -
- -高度なダッシュボード - -
- -#### Prometheus 統合 {#prometheus-integration} - -ClickHouse Cloud は、モニタリング用のメトリクスをスクレイピングするために使用できる Prometheus エンドポイントを提供します。これにより、Grafana や Datadog などのツールと統合し、可視化を行うことができます。 - -**https エンドポイント /metrics_all を介したサンプルリクエスト** - -```bash -curl --user : https://i6ro4qarho.mhp0y4dmph.us-west-2.aws.byoc.clickhouse.cloud:8443/metrics_all -``` - -**サンプルレスポンス** - -```bash - -# HELP ClickHouse_CustomMetric_StorageSystemTablesS3DiskBytes ディスク `s3disk` に保存されているバイト数 - -# TYPE ClickHouse_CustomMetric_StorageSystemTablesS3DiskBytes gauge -ClickHouse_CustomMetric_StorageSystemTablesS3DiskBytes{hostname="c-jet-ax-16-server-43d5baj-0"} 62660929 - -# HELP ClickHouse_CustomMetric_NumberOfBrokenDetachedParts 壊れたデタッチパーツの数 - -# TYPE ClickHouse_CustomMetric_NumberOfBrokenDetachedParts gauge -ClickHouse_CustomMetric_NumberOfBrokenDetachedParts{hostname="c-jet-ax-16-server-43d5baj-0"} 0 - -# HELP ClickHouse_CustomMetric_LostPartCount 最も古い変異の年齢 (秒) - -# TYPE ClickHouse_CustomMetric_LostPartCount gauge -ClickHouse_CustomMetric_LostPartCount{hostname="c-jet-ax-16-server-43d5baj-0"} 0 - -# HELP ClickHouse_CustomMetric_NumberOfWarnings サーバーによって発行された警告の数。これは通常、誤った設定について示しています - -# TYPE ClickHouse_CustomMetric_NumberOfWarnings gauge -ClickHouse_CustomMetric_NumberOfWarnings{hostname="c-jet-ax-16-server-43d5baj-0"} 2 - -# HELP ClickHouseErrorMetric_FILE_DOESNT_EXIST FILE_DOESNT_EXIST - -# TYPE ClickHouseErrorMetric_FILE_DOESNT_EXIST counter -ClickHouseErrorMetric_FILE_DOESNT_EXIST{hostname="c-jet-ax-16-server-43d5baj-0",table="system.errors"} 1 - -# HELP ClickHouseErrorMetric_UNKNOWN_ACCESS_TYPE UNKNOWN_ACCESS_TYPE - -# TYPE ClickHouseErrorMetric_UNKNOWN_ACCESS_TYPE counter -ClickHouseErrorMetric_UNKNOWN_ACCESS_TYPE{hostname="c-jet-ax-16-server-43d5baj-0",table="system.errors"} 8 - -# HELP ClickHouse_CustomMetric_TotalNumberOfErrors 最後の再起動以降のサーバー上のエラーの合計数 - -# TYPE ClickHouse_CustomMetric_TotalNumberOfErrors gauge -ClickHouse_CustomMetric_TotalNumberOfErrors{hostname="c-jet-ax-16-server-43d5baj-0"} 9 -``` - -**認証** - -ClickHouse のユーザー名とパスワードのペアを使用して認証できます。メトリクスをスクレイピングするために最小限の権限を持つ専用ユーザーの作成をお勧めします。最小限、`system.custom_metrics` テーブルに対して `READ` 権限が必要です。例えば: - -```sql -GRANT REMOTE ON *.* TO scraping_user -GRANT SELECT ON system.custom_metrics TO scraping_user -``` - -**Prometheus の設定** - -以下は設定の例です。`targets` エンドポイントは、ClickHouse サービスにアクセスするために使用されるのと同じものです。 - -```bash -global: - scrape_interval: 15s - -scrape_configs: - - job_name: "prometheus" - static_configs: - - targets: ["localhost:9090"] - - job_name: "clickhouse" - static_configs: - - targets: ["..aws.byoc.clickhouse.cloud:8443"] - scheme: https - metrics_path: "/metrics_all" - basic_auth: - username: - password: - honor_labels: true -``` - -また、[このブログ投稿](https://clickhouse.com/blog/clickhouse-cloud-now-supports-prometheus-monitoring)および[ClickHouse 用の Prometheus 設定ドキュメント](/integrations/prometheus)もご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md.hash deleted file mode 100644 index 3f7afb4f00d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/byoc.md.hash +++ /dev/null @@ -1 +0,0 @@ -c06f1bf0b44fe422 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md deleted file mode 100644 index 2fb076582ae..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md +++ /dev/null @@ -1,1182 +0,0 @@ ---- -slug: '/whats-new/cloud' -sidebar_label: 'クラウド変更履歴' -title: 'クラウド変更履歴' -description: '各ClickHouse Cloudリリースの新機能に関する説明を提供するClickHouse Cloud変更履歴' ---- - -import Image from '@theme/IdealImage'; -import add_marketplace from '@site/static/images/cloud/reference/add_marketplace.png'; -import beta_dashboards from '@site/static/images/cloud/reference/beta_dashboards.png'; -import api_endpoints from '@site/static/images/cloud/reference/api_endpoints.png'; -import cross_vpc from '@site/static/images/cloud/reference/cross-vpc-clickpipes.png'; -import nov_22 from '@site/static/images/cloud/reference/nov-22-dashboard.png'; -import private_endpoint from '@site/static/images/cloud/reference/may-30-private-endpoints.png'; -import notifications from '@site/static/images/cloud/reference/nov-8-notifications.png'; -import kenesis from '@site/static/images/cloud/reference/may-17-kinesis.png'; -import s3_gcs from '@site/static/images/cloud/reference/clickpipes-s3-gcs.png'; -import tokyo from '@site/static/images/cloud/reference/create-tokyo-service.png'; -import cloud_console from '@site/static/images/cloud/reference/new-cloud-console.gif'; -import copilot from '@site/static/images/cloud/reference/nov-22-copilot.gif'; -import latency_insights from '@site/static/images/cloud/reference/oct-4-latency-insights.png'; -import cloud_console_2 from '@site/static/images/cloud/reference/aug-15-compute-compute.png'; -import compute_compute from '@site/static/images/cloud/reference/july-18-table-inspector.png'; -import query_insights from '@site/static/images/cloud/reference/june-28-query-insights.png'; -import prometheus from '@site/static/images/cloud/reference/june-28-prometheus.png'; -import kafka_config from '@site/static/images/cloud/reference/june-13-kafka-config.png'; -import fast_releases from '@site/static/images/cloud/reference/june-13-fast-releases.png'; -import share_queries from '@site/static/images/cloud/reference/may-30-share-queries.png'; -import query_endpoints from '@site/static/images/cloud/reference/may-17-query-endpoints.png'; - -In addition to this ClickHouse Cloud changelog, please see the [Cloud Compatibility](/cloud/reference/cloud-compatibility.md) page. -## May 16, 2025 {#may-16-2025} - -- Resource Utilization Dashboardが導入され、ClickHouse Cloud内のサービスによって使用されるリソースのビューを提供します。以下のメトリクスはシステムテーブルから収集され、このダッシュボードに表示されます: - * メモリとCPU: `CGroupMemoryTotal`(割り当てられたメモリ)、`CGroupMaxCPU`(割り当てられたCPU)、 `MemoryResident`(使用されるメモリ)、および `ProfileEvent_OSCPUVirtualTimeMicroseconds`(使用されるCPU)のグラフ - * データ転送:ClickHouse Cloudからのデータの入出力を示すグラフ。詳細は[こちら](/cloud/manage/network-data-transfer)。 -- 新しいClickHouse Cloud Prometheus/Grafanaミックスインのローンチを発表できることを嬉しく思います。このミックスインは、ClickHouse Cloudサービスの監視を簡素化するために作られています。 - このミックスインは、Prometheusに互換性のあるAPIエンドポイントを使用して、ClickHouseメトリクスを既存のPrometheusおよびGrafanaセットアップにシームレスに統合します。リアルタイムでサービスの健康状態とパフォーマンスを可視化するためのプリ構成されたダッシュボードが含まれています。詳細はローンチの[ブログ](https://clickhouse.com/blog/monitor-with-new-prometheus-grafana-mix-in)を参照してください。 - -## April 18, 2025 {#april-18-2025} - -- 新しい**メンバー**組織レベルのロールと2つの新しいサービスレベルロール:**サービス管理者**および**サービス読み取り専用**を導入しました。 - **メンバー**はSAML SSOユーザーにデフォルトで割り当てられる組織レベルのロールで、サインインとプロファイル更新機能のみを提供します。**サービス管理者**と**サービス読み取り専用**ロールは、**メンバー**、**開発者**、または**請求管理者**ロールを持つユーザーに割り当てることができます。詳細は["ClickHouse Cloudのアクセス制御"](https://clickhouse.com/docs/cloud/security/cloud-access-management/overview)を参照してください。 -- ClickHouse Cloudでは、以下のリージョンで**エンタープライズ**顧客向けに**HIPAA**および**PCI**サービスが提供されています:AWS eu-central-1、AWS eu-west-2、AWS us-east-2。 -- **ClickPipesに対するユーザー向け通知**を導入しました。この機能は、ClickPipesの失敗について自動的に通知をメール、ClickHouse Cloud UI、およびSlack経由で送信します。メールおよびUIによる通知はデフォルトで有効になっており、各パイプごとに構成可能です。**Postgres CDC ClickPipes**の場合、通知はレプリケーションスロットの閾値(**設定**タブで構成可能)、特定のエラータイプ、失敗を解決するためのセルフサーブ手順もカバーします。 -- **MySQL CDCプライベートプレビュー**がオープンになりました。これにより、顧客は数回のクリックでMySQLデータベースをClickHouse Cloudにレプリケートでき、高速分析が可能になり、外部ETLツールの必要がなくなります。このコネクタは、MySQLがクラウド(RDS、Aurora、Cloud SQL、Azureなど)にある場合でもオンプレミスにある場合でも、継続的なレプリケーションと1回限りのマイグレーションの両方をサポートします。プライベートプレビューには[こちらのリンク](https://clickhouse.com/cloud/clickpipes/mysql-cdc-connector)からサインアップできます。 -- **ClickPipesに対するAWS PrivateLink**を導入しました。AWS PrivateLinkを使用して、VPC間、AWSサービス、オンプレミスシステム、ClickHouse Cloudとの間にセキュアな接続を確立できます。これにより、Postgres、MySQL、AWS上のMSKなどのソースからデータを移動する際に、公共インターネットにトラフィックを露出せずに行えます。また、VPCサービスエンドポイントを介してのクロスリージョンアクセスもサポートされています。PrivateLinkの接続設定は現在[完全セルフサービス](https://clickhouse.com/docs/integrations/clickpipes/aws-privatelink)でClickPipesを通じて行えます。 - -## April 4, 2025 {#april-4-2025} - -- ClickHouse CloudのSlack通知:ClickHouse Cloudは、請求、スケーリング、ClickPipesイベントに関するSlack通知を、コンソール内およびメール通知に加えてサポートしました。これらの通知はClickHouse Cloud Slackアプリケーションを介して送信されます。組織の管理者は、通知センターを介して通知を構成し、通知を送信すべきSlackチャネルを指定できます。 -- プロダクションおよび開発サービスを運用しているユーザーは、ClickPipesとデータ転送の使用料金を請求書に表示されるようになります。詳細については、2025年1月の[発表](/cloud/manage/jan-2025-faq/pricing-dimensions)を参照してください。 - -## March 21, 2025 {#march-21-2025} - -- AWS上のクロスリージョンPrivate Link接続が現在ベータ版です。設定方法やサポートされているリージョンのリストについては、ClickHouse Cloudプライベートリンクの[ドキュメント](/manage/security/aws-privatelink)を参照してください。 -- AWS上のサービスに対して利用可能な最大レプリカサイズは236 GiB RAMに設定されました。これにより、効率的な活用が可能になり、バックグラウンドプロセスにリソースが割り当てられることが保証されます。 - -## March 7, 2025 {#march-7-2025} - -- 新しい`UsageCost` APIエンドポイント:API仕様は、新しいエンドポイントによる使用情報の取得をサポートしています。これは組織エンドポイントで、最大31日分の使用コストをクエリできます。取得可能なメトリクスはストレージ、コンピュート、データ転送、ClickPipesが含まれます。詳細については[ドキュメント](https://clickhouse.com/docs/cloud/manage/api/usageCost-api-reference)を参照してください。 -- Terraformプロバイダー[v2.1.0](https://registry.terraform.io/providers/ClickHouse/clickhouse/2.1.0/docs/resources/service#nestedatt--endpoints_configuration)リリースによりMySQLエンドポイントの有効化がサポートされました。 - -## February 21, 2025 {#february-21-2025} -### ClickHouse Bring Your Own Cloud (BYOC) for AWS is now generally available! {#clickhouse-byoc-for-aws-ga} - -このデプロイメントモデルでは、データプレーンコンポーネント(コンピュート、ストレージ、バックアップ、ログ、メトリクス)が顧客のVPC内で実行され、コントロールプレーン(Webアクセス、API、および請求)はClickHouse VPC内に残ります。この設定は、大量のワークロードが厳格なデータ居住要件を遵守するために理想的で、すべてのデータが安全な顧客環境内に留まることを保証します。 - -- 詳細については、BYOCの[ドキュメント](/cloud/reference/byoc)を参照するか、[発表ブログ記事](https://clickhouse.com/blog/announcing-general-availability-of-clickhouse-bring-your-own-cloud-on-aws)をお読みください。 -- [お問い合わせ](https://clickhouse.com/cloud/bring-your-own-cloud)いただければ、アクセスをリクエストできます。 - -### Postgres CDC connector for ClickPipes {#postgres-cdc-connector-for-clickpipes} - -ClickPipesのPostgres CDCコネクタが現在パブリックベータ版です。この機能により、ユーザーはPostgresデータベースをClickHouse Cloudにシームレスにレプリケートできます。 - -- 始めるには、ClickPipes Postgres CDCコネクタの[ドキュメント](https://clickhouse.com/docs/integrations/clickpipes/postgres)を参照してください。 -- 顧客のユースケースと機能に関する詳細は、[ランディングページ](https://clickhouse.com/cloud/clickpipes/postgres-cdc-connector)および[ローンチブログ](https://clickhouse.com/blog/postgres-cdc-connector-clickpipes-public-beta)をご参照ください。 - -### PCI compliance for ClickHouse Cloud on AWS {#pci-compliance-for-clickhouse-cloud-on-aws} - -ClickHouse Cloudは現在、**エンタープライズ層**顧客向けに**PCI-準拠サービス**を**us-east-1**および**us-west-2**リージョンでサポートしています。PCI準拠の環境でサービスを起動したいユーザーは、[サポート](https://clickhouse.com/support/program)に連絡して支援を受けてください。 - -### Transparent Data Encryption and Customer Managed Encryption Keys on Google Cloud Platform {#tde-and-cmek-on-gcp} - -**透過的データ暗号化(TDE)**と**顧客管理の暗号化キー(CMEK)**のサポートが、**Google Cloud Platform(GCP)**におけるClickHouse Cloudで利用可能になりました。 - -- これらの機能に関する詳細情報は[ドキュメント](https://clickhouse.com/docs/cloud/security/cmek#transparent-data-encryption-tde)を参照してください。 - -### AWS Middle East (UAE) availability {#aws-middle-east-uae-availability} - -ClickHouse Cloudに新たなリージョンサポートが追加され、**AWS Middle East (UAE) me-central-1**リージョンで利用可能になりました。 - -### ClickHouse Cloud guardrails {#clickhouse-cloud-guardrails} - -ClickHouse Cloudの安定した使用を確保し、ベストプラクティスを促進するために、使用するテーブル、データベース、パーティション、およびパーツの数に関するガードレールを導入します。 - -- 詳細については、[使用制限](https://clickhouse.com/docs/cloud/bestpractices/usage-limits)セクションを参照してください。 -- サービスが既にこれらの制限を超えている場合は、10%の増加を許可します。質問がある場合は、[サポート](https://clickhouse.com/support/program)にご連絡ください。 - -## January 27, 2025 {#january-27-2025} -### Changes to ClickHouse Cloud tiers {#changes-to-clickhouse-cloud-tiers} - -私たちは、顧客の変化するニーズに応じて製品を適応させることに専念しています。GAでの導入以来、ClickHouse Cloudは大幅に進化し、顧客がどのように私たちのクラウド提供を利用しているかについて貴重な洞察を得ました。 - -私たちは、ClickHouse Cloudサービスのサイズとコスト効率を最適化するための新機能を導入しています。これには**コンピュート-コンピュート分離**、高性能なマシンタイプ、および**シングルレプリカサービス**が含まれます。また、よりシームレスで反応的な方法で自動スケーリングと管理されたアップグレードを実行するよう進化させています。 - -最も要求の厳しい顧客とワークロードのニーズに応えるために、業界特有のセキュリティおよびコンプライアンス機能に焦点を当て、基盤となるハードウェアやアップグレードに対するさらに多くのコントロール、そして高度な災害復旧機能を備えた**新しいエンタープライズ層**を導入します。 - -これらの変更をサポートするために、現在の**開発**および**プロダクション**層を、お客様の進化するニーズにより密接に一致させるよう再構築しています。新しいユーザー向けの**基本**層と、プロダクションワークロードおよび大規模なデータに取り組むユーザーに合わせた**スケール**層を導入します。 - -これらの機能変更については、この[ブログ](https://clickhouse.com/blog/evolution-of-clickhouse-cloud-new-features-superior-performance-tailored-offerings)でお読みいただけます。既存の顧客は、新しい[プラン](https://clickhouse.com/pricing)を選択するためのアクションを取る必要があります。顧客向けのコミュニケーションは組織の管理者にメールで送信され、以下の[FAQ](/cloud/manage/jan-2025-faq/summary)が主な変更点とタイムラインをカバーしています。 - -### Warehouses: Compute-compute separation (GA) {#warehouses-compute-compute-separation-ga} - -コンピュート-コンピュートの分離(「倉庫」とも呼ばれる)は一般的に利用可能です。詳細については[ブログ](https://clickhouse.com/blog/introducing-warehouses-compute-compute-separation-in-clickhouse-cloud)と[ドキュメント](/cloud/reference/warehouses)を参照してください。 - -### Single-replica services {#single-replica-services} - -「シングルレプリカサービス」の概念を導入します。これは独立した提供としても、倉庫内でも使用されます。独立した提供としては、シングルレプリカサービスはサイズ制限があり、小規模なテストワークロードに利用されることを意図しています。倉庫内ではシングルレプリカサービスをより大きなサイズで展開し、高可用性がスケールで要求されないワークロード(再起動可能なETLジョブなど)のために利用することができます。 - -### Vertical auto-scaling improvements {#vertical-auto-scaling-improvements} - -コンピュートレプリカのための新しい垂直スケーリングメカニズム、「事前確保後削除(Make Before Break、MBB)」を導入します。このアプローチにより、古いレプリカを削除する前に、新しいサイズの1つ以上のレプリカを追加し、スケーリング操作中のキャパシティ損失を防ぎます。既存のレプリカを削除し新しいレプリカを追加する際のギャップを排除することで、よりシームレスで中断の少ないスケーリングプロセスを実現します。特に、リソースの高い利用度が追加のキャパシティの必要性を引き起こすスケールアップシナリオで有益です。既存のレプリカを早期に削除すると、リソース制約を悪化させるだけになります。 - -### Horizontal scaling (GA) {#horizontal-scaling-ga} - -水平スケーリングが現在一般的に利用可能です。ユーザーはAPIやクラウドコンソールを介してサービスをスケールアウトするために追加のレプリカを追加できます。詳細については[ドキュメント](/manage/scaling#manual-horizontal-scaling)を参照してください。 - -### Configurable backups {#configurable-backups} - -顧客は、独自のクラウドアカウントにバックアップをエクスポートする機能が今後サポートされます。詳細については[ドキュメント](/cloud/manage/backups/configurable-backups)を参照ください。 - -### Managed upgrade improvements {#managed-upgrade-improvements} - -安全な管理されたアップグレードは、ユーザーが新機能を追加しながらデータベースを最新の状態に保つために大きな価値を提供します。この展開では、アップグレードに「事前確保後削除(MBB)」アプローチを適用し、実行中のワークロードに対する影響をさらに低減しました。 - -### HIPAA support {#hipaa-support} - -私たちは、AWS `us-east-1`、`us-west-2`、およびGCP `us-central1`、`us-east1`を含むコンプライアントリージョンでHIPAAをサポートしています。オンボードを希望する顧客は、ビジネスアソシエイト契約(BAA)に署名し、リージョンのコンプライアント版にデプロイする必要があります。HIPAAに関する詳細情報は[ドキュメント](/cloud/security/security-and-compliance)を参照してください。 - -### Scheduled upgrades {#scheduled-upgrades} - -ユーザーはサービスのアップグレードをスケジュールできます。この機能はエンタープライズ層のサービスのみでサポートされています。スケジュールされたアップグレードに関する詳細は[ドキュメント](/manage/updates)を参照してください。 - -### Language client support for complex types {#language-client-support-for-complex-types} - -[Golang](https://github.com/ClickHouse/clickhouse-go/releases/tag/v2.30.1)、[Python](https://github.com/ClickHouse/clickhouse-connect/releases/tag/v0.8.11)、および[NodeJS](https://github.com/ClickHouse/clickhouse-js/releases/tag/1.10.1)クライアントが、Dynamic、Variant、およびJSONタイプリクエストをサポートしました。 - -### DBT support for refreshable materialized views {#dbt-support-for-refreshable-materialized-views} - -DBTは、`1.8.7`リリースで[リフレッシュ可能なマテリアライズドビュー](https://github.com/ClickHouse/dbt-clickhouse/releases/tag/v1.8.7)をサポートしています。 - -### JWT token support {#jwt-token-support} - -JDBCドライバv2、clickhouse-java、[Python](https://github.com/ClickHouse/clickhouse-connect/releases/tag/v0.8.12)、および[NodeJS](https://github.com/ClickHouse/clickhouse-js/releases/tag/1.10.0)クライアントでJWTベースの認証がサポートされました。 - -JDBC / Javaは、リリース時に[0.8.0](https://github.com/ClickHouse/clickhouse-java/releases/tag/v0.8.0)で使用可能になります - リリース日時は未定です。 - -### Prometheus integration improvements {#prometheus-integration-improvements} - -Prometheus統合のためにいくつかの改善を加えました: - -- **組織レベルのエンドポイント**。ClickHouse Cloud用のPrometheus統合に改良が導入されました。サービスレベルのメトリクスに加えて、APIには**組織レベルメトリクス**のためのエンドポイントが含まれています。この新しいエンドポイントは、組織内のすべてのサービスのメトリクスを自動的に収集し、メトリクスをPrometheusコレクターにエクスポートするプロセスを簡素化します。これらのメトリクスは、GrafanaやDatadogなどの可視化ツールと統合し、組織のパフォーマンスをより包括的に把握するために使用できます。 - - この機能はすでにすべてのユーザーが利用可能です。詳細は[こちら](/integrations/prometheus)をご覧ください。 - -- **フィルターされたメトリクス**。私たちのClickHouse CloudのPrometheus統合で、フィルタリストを返すためのサポートが追加されました。この機能は、サービスの健康状態を監視するために重要なメトリクスに焦点を合わせることを可能にし、応答ペイロードサイズを削減します。 - - この機能はAPIのオプションのクエリパラメータとして利用可能で、データ収集を最適化し、GrafanaやDatadogとの統合を簡素化します。 - - フィルタードメトリクス機能はすでにすべてのユーザーのために利用可能です。詳細は[こちら](/integrations/prometheus)をご覧ください。 - -## December 20, 2024 {#december-20-2024} -### Marketplace subscription organization attachment {#marketplace-subscription-organization-attachment} - -新しいマーケットプレイスサブスクリプションを既存のClickHouse Cloud組織に添付できるようになりました。マーケットプレイスにサブスクライブしたら、ClickHouse Cloudにリダイレクトされ、過去に作成された既存の組織を新しいマーケットプレイスサブスクリプションに接続できるようになります。この時点から、組織内のリソースはマーケットプレイスを通じて請求されることになります。 - -ClickHouse Cloud interface showing how to add a marketplace subscription to an existing organization -### Force OpenAPI key expiration {#force-openapi-key-expiration} - -APIキーの有効期限オプションを制限し、有効期限のないOpenAPIキーを作成しないようにできるようになりました。これらの制限を組織に対して有効にするには、ClickHouse Cloudサポートチームにお問い合わせください。 - -### Custom emails for notifications {#custom-emails-for-notifications} - -組織管理者は、特定の通知に追加の受信者としてメールアドレスを追加できるようになりました。これは、通知をエイリアスやClickHouse Cloudのユーザーでない他の組織内のユーザーに送信したい場合に便利です。これを構成するには、クラウドコンソールの通知設定に移動し、メール通知を受信したいメールアドレスを編集します。 - -## December 6, 2024 {#december-6-2024} -### BYOC (Beta) {#byoc-beta} - -AWS向けのBring Your Own Cloudが現在ベータ版で利用可能です。このデプロイメントモデルにより、ClickHouse Cloudを独自のAWSアカウントで展開および実行できます。11以上のAWSリージョンでのデプロイメントをサポートし、今後さらに追加される予定です。アクセスについては、[サポートにお問い合わせください](https://clickhouse.com/support/program)。このデプロイは、大規模なデプロイメントにのみ予約されています。 - -### Postgres Change-Data-Capture (CDC) Connector in ClickPipes {#postgres-change-data-capture-cdc-connector-in-clickpipes} - -このターンキー統合により、顧客は数回のクリックでPostgresデータベースをClickHouse Cloudにレプリケートし、ClickHouseを利用して瞬時に分析できます。このコネクタを使用して、Postgresからの継続的なレプリケーションと1回限りのマイグレーションの両方を行うことができます。 - -### Dashboards (Beta) {#dashboards-beta} - -今週、ClickHouse CloudでDashboardsをベータ版で発表できることを嬉しく思います。Dashboardsを使用すると、ユーザーは保存したクエリをビジュアライゼーションに変え、ビジュアライゼーションをダッシュボードに整理し、クエリパラメータを使用してダッシュボードと対話できます。始めるには、[ダッシュボードのドキュメント](/cloud/manage/dashboards)を参照してください。 - -ClickHouse Cloud interface showing the new Dashboards Beta feature with visualizations - -### Query API endpoints (GA) {#query-api-endpoints-ga} - -ClickHouse CloudでクエリAPIエンドポイントのGAリリースを発表できることを嬉しく思います。クエリAPIエンドポイントを使用すると、保存されたクエリのRESTful APIエンドポイントを数回のクリックで立ち上げ、言語クライアントや認証の複雑さを気にせずにアプリケーション内でデータを消費し始めることができます。初期のローンチ以来、次のような改善が加えられました: - -* エンドポイントのレイテンシを削減、特にコールドスタート時 -* エンドポイントRBACコントロールの強化 -* CORS許可ドメインの設定可能性 -* 結果ストリーミング -* ClickHouse互換出力形式のサポート - -これらの改善に加えて、既存のフレームワークを活用し、ClickHouse Cloudサービスに対して任意のSQLクエリを実行することを可能にする一般的なクエリAPIエンドポイントを発表します。一般的なエンドポイントは、サービス設定ページから有効化および設定が可能です。 - -始めるには、[クエリAPIエンドポイントのドキュメント](/cloud/get-started/query-endpoints)を参照してください。 - -ClickHouse Cloud interface showing the API Endpoints configuration with various settings - -### Native JSON support (Beta) {#native-json-support-beta} - -ClickHouse CloudでネイティブJSONサポートのベータ版を発表します。開始するには、サポートに連絡して、[クラウドサービスを有効化してください](/cloud/support)。 - -### Vector search using vector similarity indexes (Early Access) {#vector-search-using-vector-similarity-indexes-early-access} - -近似ベクター検索のためのベクター類似性インデックスを早期アクセスで発表します! - -ClickHouseは、幅広い[距離関数](https://clickhouse.com/blog/reinvent-2024-product-announcements#vector-search-using-vector-similarity-indexes-early-access)とリニアスキャンを実行する能力を備えて、ベクター型ユースケースを強力にサポートしています。最近、[usearch](https://github.com/unum-cloud/usearch)ライブラリと階層型ナビゲーション可能な小世界(HNSW)近似最近傍検索アルゴリズムを活用した実験的[近似ベクター検索](/engines/table-engines/mergetree-family/annindexes)アプローチを追加しました。 - -始めるには、[早期アクセスの待機リストにサインアップしてください](https://clickhouse.com/cloud/vector-search-index-waitlist)。 - -### ClickHouse-Connect (Python) and ClickHouse-Kafka-Connect Users {#clickhouse-connect-python-and-clickhouse-kafka-connect-users} - -[`MEMORY_LIMIT_EXCEEDED`](https://docs.clickhouse.com/en/operations/events/#memory_limit_exceeded)例外が発生する可能性がある問題に苦しんでいた顧客に通知のメールが送信されました。 - -以下のバージョンにアップグレードしてください: -- Kafka-Connect: > 1.2.5 -- ClickHouse-Connect (Java): > 0.8.6 - -### ClickPipes now supports cross-VPC resource access on AWS {#clickpipes-now-supports-cross-vpc-resource-access-on-aws} - -特定のデータソース(たとえばAWS MSK)に対して一方向のアクセスを付与できるようになりました。AWS PrivateLinkとVPC Latticeを使用したクロスVPCリソースアクセスにより、VPCおよびアカウントの境界を越えて、または公共ネットワークを介らずにオンプレミスネットワークからリソースを共有できます。リソース共有の設定方法については、[発表記事](https://clickhouse.com/blog/clickpipes-crossvpc-resource-endpoints?utm_medium=web&utm_source=changelog)をお読みください。 - -Diagram showing the Cross-VPC resource access architecture for ClickPipes connecting to AWS MSK - -### ClickPipes now supports IAM for AWS MSK {#clickpipes-now-supports-iam-for-aws-msk} - -AWS MSK ClickPipesを使用して、IAM認証を使用してMSKブローカーに接続できるようになりました。開始するには、[ドキュメント](/integrations/clickpipes/kafka#iam)を確認してください。 - -### Maximum replica size for new services on AWS {#maximum-replica-size-for-new-services-on-aws} - -これから、新しく作成されたAWSのサービスは、最大236 GiBのレプリカサイズを許可します。 - -## November 22, 2024 {#november-22-2024} -### Built-in advanced observability dashboard for ClickHouse Cloud {#built-in-advanced-observability-dashboard-for-clickhouse-cloud} - -以前は、ClickHouseサーバーメトリクスとハードウェアリソース利用状況を監視するための高度な可視化ダッシュボードは、オープンソースのClickHouseでのみ利用可能でした。この機能が現在、ClickHouse Cloudコンソールで利用可能になったことを嬉しく思います! - -このダッシュボードでは、[system.dashboards](/operations/system-tables/dashboards)テーブルに基づいてクエリをすべて1つのUIで表示できます。今日から**モニタリング > サービスヘルス**ページを訪れて、高度な可視化ダッシュボードを使用してください。 - -ClickHouse Cloud advanced observability dashboard showing server metrics and resource utilization - -### AI-powered SQL autocomplete {#ai-powered-sql-autocomplete} - -新しいAI Copilotとともに、クエリを記述するときにインラインSQL補完を受けることができるよう、オートコンプリートを大幅に改善しました! この機能は、どのClickHouse Cloudサービスに対しても**「インラインコード補完を有効にする」**設定を切り替えて有効にすることができます。 - -Animation showing the AI Copilot providing SQL autocompletion suggestions as a user types - -### New "Billing" role {#new-billing-role} - -組織のユーザーに新しい**料金**ロールを割り当てて、サービスを構成または管理する能力を与えることなく請求情報を表示および管理させることができるようになりました。新しいユーザーを招待するか、既存のユーザーの役割を編集して**料金**ロールを割り当ててください。 - -## November 8, 2024 {#november-8-2024} -### Customer Notifications in ClickHouse Cloud {#customer-notifications-in-clickhouse-cloud} - -ClickHouse Cloudは、いくつかの請求およびスケーリングイベントについてコンソール内およびメール通知を提供します。顧客はこれらの通知をクラウドコンソールの通知センターを介して構成し、UIでのみ表示したり、メールを受信したり、両方を実施したりできます。受け取る通知のカテゴリーおよび重要度をサービスレベルで構成できます。 - -今後、他のイベントの通知や、通知を受信するための追加の方法も追加する予定です。 - -サービスの通知を有効にする方法については、[ClickHouseドキュメント](/cloud/notifications)を参照してください。 - -ClickHouse Cloud notification center interface showing configuration options for different notification types - -
-## October 4, 2024 {#october-4-2024} -### ClickHouse Cloud now offers HIPAA-ready services in Beta for GCP {#clickhouse-cloud-now-offers-hipaa-ready-services-in-beta-for-gcp} - -保護された健康情報(PHI)へのセキュリティを強化したい顧客は、現在、[Google Cloud Platform (GCP)](https://cloud.google.com/)でClickHouse Cloudに登録できます。ClickHouseは、[HIPAAセキュリティルール](https://www.hhs.gov/hipaa/for-professionals/security/index.html)で規定された管理的、物理的および技術的な保護策を実装し、特定のユースケースやワークロードに応じて実装できる設定可能なセキュリティ設定を持っています。利用可能なセキュリティ設定についての詳細は、[セキュリティ共有責任モデル](/cloud/security/shared-responsibility-model)をご覧ください。 - -サービスは、**専用**サービスタイプを持つ顧客に対して、GCP `us-central-1`で利用可能で、ビジネスアソシエイト契約(BAA)が必要です。この機能へのアクセスをリクエストするには、[営業](mailto:sales@clickhouse.com)または[サポート](https://clickhouse.com/support/program)にお問い合わせください。 - -### Compute-Compute separation is now in Private Preview for GCP and Azure {#compute-compute-separation-is-now-in-private-preview-for-gcp-and-azure} - -私たちは最近、AWSのコンピュート-コンピュート分離のプライベートプレビューを発表しました。今、GCPとAzureでも利用可能になったことを嬉しく思います。 - -コンピュート-コンピュート分離により、特定のサービスを読み書きまたは読み取り専用サービスとして指定できるため、アプリケーションに最適なコンピュート設定を設計してコストとパフォーマンスを最適化できます。詳細については、[ドキュメント](/cloud/reference/warehouses)をお読みください。 - -### Self-service MFA recovery codes {#self-service-mfa-recovery-codes} - -多要素認証を使用している顧客は、電話を失ったりトークンを誤って削除した場合に使用できる回復コードを取得できるようになりました。初めてMFAに登録する顧客には、設定時にコードが提供されます。既存のMFAを持っている顧客は、既存のMFAトークンを削除し新しいトークンを追加することで回復コードを取得できます。 - -### ClickPipes update: custom certificates, latency insights, and more! {#clickpipes-update-custom-certificates-latency-insights-and-more} - -ClickPipes、データをClickHouseサービスに取り込むための最も簡単な方法に関する最新の更新情報をお知らせできることを嬉しく思います!これらの新機能は、データ取り込みの制御を強化し、パフォーマンスメトリクスへの可視化を提供することを目的としています。 - -*Kafka用のカスタム認証証明書* - -ClickPipes for Kafkaでは、SASLと公開SSL/TLSを使用してKafkaブローカー用のカスタム認証証明書をサポートしています。ClickPipe設定中にSSL証明書セクションで独自の証明書を簡単にアップロードでき、Kafkaへのより安全な接続を実現します。 - -*KafkaおよびKinesisのレイテンシメトリクスを導入* - -パフォーマンスの可視化は重要です。ClickPipesにはレイテンシグラフが新たに追加され、メッセージ生産(KafkaトピックまたはKinesisストリームからの)からClickHouse Cloudへの取り込みまでの時間を把握できます。この新しいメトリクスにより、データパイプラインのパフォーマンスをより細かく監視し、最適化が可能です。 - -ClickPipes interface showing latency metrics graph for data ingestion performance - -
- -*KafkaおよびKinesisのスケーリング制御(プライベートベータ)* - -高スループットにより、データボリュームとレイテンシ要件を満たすために追加のリソースが必要になる場合があります。私たちはClickPipesの水平方向のスケーリングを導入しており、これによりクラウドコンソールを介して直接操作できます。この機能は現在プライベートベータ版で利用可能で、要件に応じてリソースをより効果的にスケールできます。プライベートベータ版に参加するには、[サポート]へお問い合わせください。 - -*KafkaおよびKinesisの生メッセージ取り込み* - -今後、完全なKafkaまたはKinesisメッセージを解析なしに取り込むことが可能になりました。ClickPipesでは、ユーザーが完全なメッセージを単一の文字列カラムにマッピングできる[_raw_message](https://integrations/clickpipes/kafka#kafka-virtual-columns)仮想カラムのサポートが提供されています。これにより、必要に応じて生データと対話する柔軟性が得られます。 - -## August 29, 2024 {#august-29-2024} -### New Terraform provider version - v1.0.0 {#new-terraform-provider-version---v100} - -Terraformを使用すると、ClickHouse Cloudサービスをプログラムで制御し、構成をコードとして保存できます。私たちのTerraformプロバイダーは20万ダウンロード以上を達成し、正式にv1.0.0になりました!この新しいバージョンには、再試行ロジックの改善や、ClickHouse Cloudサービスにプライベートエンドポイントを接続するための新しいリソースの追加が含まれています。[Terraformプロバイダーをこちらからダウンロード](https://registry.terraform.io/providers/ClickHouse/clickhouse/latest)でき、[完全な変更履歴をこちらで確認](https://github.com/ClickHouse/terraform-provider-clickhouse/releases/tag/v1.0.0)できます。 -### 2024 SOC 2 Type II レポートおよび更新された ISO 27001 証明書 {#2024-soc-2-type-ii-report-and-updated-iso-27001-certificate} - -私たちは、2024 SOC 2 Type II レポートおよび更新された ISO 27001 証明書の提供を誇りに思います。どちらも、最近開始した Azure のサービスと、AWS および GCP でのサービスの継続的なカバレッジを含んでいます。 - -私たちの SOC 2 Type II は、ClickHouse ユーザーに提供するサービスのセキュリティ、可用性、処理の完全性、および機密性を達成するための継続的なコミットメントを示しています。詳細については、アメリカ公認会計士協会 (AICPA) が発行した [SOC 2 - サービス組織のための SOC: 信頼サービス基準](https://www.aicpa-cima.com/resources/landing/system-and-organization-controls-soc-suite-of-services) および国際標準化機構 (ISO) の [ISO/IEC 27001 とは](https://www.iso.org/standard/27001) をご覧ください。 - -また、セキュリティおよびコンプライアンス文書やレポートについては、私たちの [Trust Center](https://trust.clickhouse.com/) をご覧ください。 - -## 2024年8月15日 {#august-15-2024} -### AWS のプライベートプレビューでのコンピュート間分離 {#compute-compute-separation-is-now-in-private-preview-for-aws} - -既存の ClickHouse Cloud サービスでは、レプリカが読み取りと書き込みの両方を処理しており、特定のレプリカを特定の操作のみ処理するように構成する方法はありません。新機能であるコンピュート間分離を使用すると、特定のサービスを読み取り/書き込みまたは読み取り専用サービスとして指定できるため、コストとパフォーマンスを最適化するための最適なコンピュート構成を設計できます。 - -新しいコンピュート間分離機能を使用すると、同じオブジェクトストレージフォルダを使用している各エンドポイントを持つ複数のコンピュートノードグループを作成できます。これにより、同じテーブル、ビューなどを使用することができます。 [ここでコンピュート間分離について詳しく読みます](/cloud/reference/warehouses)。プライベートプレビューでこの機能にアクセスを希望する場合は、[サポートに連絡](https://clickhouse.com/support/program)してください。 - -読み取り/書き込みおよび読み取り専用サービスグループを使用したコンピュート間分離の例を示す図 - -### S3 および GCS 用 ClickPipes が GA、Continuous mode 対応 {#clickpipes-for-s3-and-gcs-now-in-ga-continuous-mode-support} - -ClickPipes は、ClickHouse Cloud にデータを取り込む最も簡単な方法です。[ClickPipes](https://clickhouse.com/cloud/clickpipes) が S3 および GCS 用に **一般提供** されることを嬉しく思います。ClickPipes は、一度きりのバッチ取り込みと「連続モード」の両方をサポートしています。取り込みタスクは、特定のリモートバケット内のパターンに一致するすべてのファイルを ClickHouse の宛先テーブルに読み込みます。「連続モード」では、ClickPipesジョブが常に実行され、リモートオブジェクトストレージバケットに追加される一致するファイルを取り込みます。これにより、ユーザーは任意のオブジェクトストレージバケットを ClickHouse Cloud にデータを取り込むための完全に機能するステージングエリアに変えることができます。ClickPipes についての詳細は、[こちらのドキュメント](/integrations/clickpipes)をご覧ください。 - -## 2024年7月18日 {#july-18-2024} -### メトリクス用 Prometheus エンドポイントが一般提供中 {#prometheus-endpoint-for-metrics-is-now-generally-available} - -前回のクラウドチェンジログで、ClickHouse Cloud からの [Prometheus](https://prometheus.io/) メトリクスのエクスポートに関するプライベートプレビューを発表しました。この機能では、[ClickHouse Cloud API](/cloud/manage/api/api-overview) を使用してメトリクスを [Grafana](https://grafana.com/) や [Datadog](https://www.datadoghq.com/) などのツールに取り込んで視覚化できます。この機能が現在 **一般提供** されていることを嬉しく思います。詳細については、[こちらのドキュメント](/integrations/prometheus) をご覧ください。 - -### クラウドコンソール内のテーブルインスペクタ {#table-inspector-in-cloud-console} - -ClickHouse には、テーブルのスキーマを調べるための [`DESCRIBE`](/sql-reference/statements/describe-table) のようなコマンドがあります。これらのコマンドはコンソールに出力されますが、関連データ全体を取得するには複数のクエリを組み合わせる必要があるため、便利ではありません。 - -最近、SQL を記述せずに UI で重要なテーブルおよびカラム情報を取得できる **テーブルインスペクタ** をクラウドコンソールに導入しました。クラウドコンソールでサービスのテーブルインスペクタを試すことができます。このインターフェースは、スキーマ、ストレージ、圧縮などに関する情報を一元化して提供します。 - -ClickHouse Cloud テーブルインスペクタインターフェースで、詳細なスキーマおよびストレージ情報を表示 - -### 新しい Java クライアント API {#new-java-client-api} - -私たちの [Java Client](https://github.com/ClickHouse/clickhouse-java) は、ClickHouse に接続するためにユーザーが使用する最も人気のあるクライアントの1つです。私たちは、再設計された API やさまざまなパフォーマンス最適化を含めて、より使いやすく直感的にすることを望んでいました。これにより、Java アプリケーションから ClickHouse に接続するのがはるかに簡単になります。更新された Java Client の使い方については、[このブログ投稿](https://clickhouse.com/blog/java-client-sequel)を参照してください。 - -### 新しいアナライザーがデフォルトで有効化されました {#new-analyzer-is-enabled-by-default} - -ここ数年、クエリ分析と最適化のための新しいアナライザーの開発に取り組んできました。このアナライザーはクエリのパフォーマンスを向上させ、より迅速かつ効果的な `JOIN` を可能にします。以前は、新しいユーザーは `allow_experimental_analyzer` 設定を使用してこの機能を有効にする必要がありました。この改善されたアナライザーは、現在新しい ClickHouse Cloud サービスにデフォルトで備わっています。 - -さらなる最適化を行う予定があるので、アナライザーに関するさらなる改善にご期待ください! - -## 2024年6月28日 {#june-28-2024} -### Microsoft Azure 向け ClickHouse Cloud が一般提供中! {#clickhouse-cloud-for-microsoft-azure-is-now-generally-available} - -先月、私たちは Microsoft Azure サポートをベータ版で発表しました[(先月)](https://clickhouse.com/blog/clickhouse-cloud-is-now-on-azure-in-public-beta)。最新のクラウドリリースにおいて、Azure のサポートがベータ版から一般提供へと移行したことを嬉しく思います。ClickHouse Cloud は、AWS、Google Cloud Platform、そして今や Microsoft Azure のすべての主要クラウドプラットフォームで利用可能です。 - -このリリースには、[Microsoft Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/clickhouse.clickhouse_cloud)を通じてのサブスクリプションのサポートも含まれています。サービスは以下の地域で初めてサポートされます: -- 米国:West US 3 (アリゾナ) -- 米国:East US 2 (バージニア) -- ヨーロッパ:Germany West Central(フランクフルト) - -特定の地域のサポートを希望する場合は、[お問い合わせ](https://clickhouse.com/support/program)ください。 - -### クエリログインサイト {#query-log-insights} - -クラウドコンソールに新しく追加されたクエリインサイト UI は、ClickHouse に内蔵されたクエリログを使いやすくします。ClickHouse の `system.query_log` テーブルは、クエリの最適化、デバッグ、および全体的なクラスタの健康とパフォーマンスの監視に関する情報の重要なソースです。ただし、70以上のフィールドと複数のレコードにわたるクエリから、クエリログの解釈が難しい場合があります。この初期版のクエリインサイトは、クエリデバッグと最適化パターンを簡素化するための青写真を提供します。この機能の改善を続けたいと思っており、お客様からのフィードバックをお待ちしておりますので、お気軽にご連絡ください。 - -ClickHouse Cloud クエリインサイト UI でクエリパフォーマンスメトリクスと分析を表示 - -### メトリクス用 Prometheus エンドポイント (プライベートプレビュー) {#prometheus-endpoint-for-metrics-private-preview} - -私たちの最もリクエストの多い機能の1つかもしれません:ClickHouse Cloud から [Prometheus](https://prometheus.io/) メトリクスをエクスポートし、[Grafana](https://grafana.com/) と [Datadog](https://www.datadoghq.com/) で視覚化することができます。Prometheus は ClickHouse を監視し、カスタムアラートを設定するためのオープンソースソリューションを提供します。ClickHouse Cloud サービスの Prometheus メトリクスへのアクセスは、[ClickHouse Cloud API](/integrations/prometheus) 経由で利用できます。この機能は現在プライベートプレビュー中ですので、[サポートチーム](https://clickhouse.com/support/program)にご連絡いただき、この機能を有効にしてください。 - -ClickHouse Cloud からの Prometheus メトリクスを表示する Grafana ダッシュボード - -### その他の機能: {#other-features} -- [構成可能なバックアップ](/cloud/manage/backups/configurable-backups)は、頻度、保持、およびスケジュールのカスタムバックアップポリシーを構成するために、現在一般提供されております。 - -## 2024年6月13日 {#june-13-2024} -### Kafka ClickPipes コネクタの構成可能なオフセット (ベータ) {#configurable-offsets-for-kafka-clickpipes-connector-beta} - -最近まで、新しい [Kafka Connector for ClickPipes](/integrations/clickpipes/kafka) を設定すると、常に Kafka トピックの最初からデータを消費していました。この状況では、履歴データを再処理したり、新しい incoming データを監視したり、正確なポイントから再開する必要がある場合に、特定のユースケースに適合しないことがありました。 - -Kafka 用の ClickPipes では、Kafka トピックからのデータ消費に対する柔軟性とコントロールを向上させる新機能を追加しました。これにより、データが消費されるオフセットを構成できるようになります。 - -以下のオプションが利用可能です: -- 開始から:Kafka トピックの最初からデータの消費を開始します。このオプションは、すべての履歴データを再処理する必要があるユーザーに最適です。 -- 最新から:最新のオフセットからデータの消費を開始します。これは、新しいメッセージのみに関心があるユーザーに便利です。 -- タイムスタンプから:特定のタイムスタンプ以降に生成されたメッセージからデータの消費を開始します。この機能により、より正確なコントロールが可能になり、ユーザーが正確な時点から処理を再開できるようになります。 - -オフセット選択オプションを示す ClickPipes Kafka コネクタ設定インターフェース - -### サービスをファストリリースチャンネルに登録 {#enroll-services-to-the-fast-release-channel} - -ファストリリースチャンネルを使用すると、サービスはリリーススケジュールに先立って更新を受け取ることができます。以前は、この機能を有効にするにはサポートチームによる支援が必要でしたが、今では ClickHouse Cloud コンソールを使用して直接サービスのためにこの機能を有効にすることができます。「設定」に移動し、「ファストリリースに登録」をクリックするだけです。これにより、サービスは利用可能になるとすぐに更新を受け取ります! - -ファストリリースへの登録オプションを表示する ClickHouse Cloud 設定ページ - -### 水平方向のスケーリングのための Terraform サポート {#terraform-support-for-horizontal-scaling} - -ClickHouse Cloud は [水平スケーリング](/manage/scaling#how-scaling-works-in-clickhouse-cloud) をサポートしており、サービスに同サイズの追加レプリカを追加する機能を提供します。水平スケーリングは、パフォーマンスを向上させ、並列処理をサポートします。以前は、レプリカを追加するために ClickHouse Cloud コンソールやAPIを使用する必要がありましたが、今では Terraform を使ってプログラム的に ClickHouse サービスのレプリカを追加または削除できるようになりました。 - -詳細については、[ClickHouse Terraform プロバイダー](https://registry.terraform.io/providers/ClickHouse/clickhouse/latest/docs)をご覧ください。 - -## 2024年5月30日 {#may-30-2024} -### チームメイトとクエリを共有する {#share-queries-with-your-teammates} - -SQL クエリを記述するとき、チームの他の人にとってもそのクエリが役立つ可能性が高いです。以前は、クエリを Slack やメールで送信する必要があり、クエリを編集したときにチームメイトが自動的にその更新を受け取る方法はありませんでした。 - -ClickHouse Cloud コンソールを通じて、クエリを簡単に共有できるようになりました。クエリエディタから、クエリをチーム全体または特定のチームメンバーと直接共有できます。また、読み取りまたは書き込みのみにアクセスできるかを指定することもできます。クエリエディタの **共有** ボタンをクリックして、新しい共有クエリ機能を試してみてください。 - -権限オプションを含む共有機能を表示する ClickHouse Cloud クエリエディタ - -### Microsoft Azure 向け ClickHouse Cloud がベータ版であります {#clickhouse-cloud-for-microsoft-azure-is-now-in-beta} - -ついに、Microsoft Azure 上で ClickHouse Cloud サービスを作成できるようになりました!私たちのプライベートプレビュープログラムの一環として、すでに多くのお客様が Azure で ClickHouse Cloud を使用しています。今では、誰でも Azure 上で自分自身のサービスを作成できます。AWS および GCP でサポートされているお好みの ClickHouse 機能は、すべて Azure でも動作します。 - -今後数週間以内に、Azure 向け ClickHouse Cloud を一般提供する予定です。詳細を学ぶには、[こちらのブログ投稿](https://clickhouse.com/blog/clickhouse-cloud-is-now-on-azure-in-public-beta)をご覧いただくか、ClickHouse Cloud コンソールを使用して Azure 経由で新しいサービスを作成してください。 - -注意:現在、Azure 向けの **開発** サービスはサポートされていません。 - -### クラウドコンソールを介してプライベートリンクを設定する {#set-up-private-link-via-the-cloud-console} - -プライベートリンク機能を使用すると、ClickHouse Cloud サービスをクラウドプロバイダーアカウント内の内部サービスと接続でき、公共インターネットへのトラフィックを指向することなくコストを節約し、安全性を高めることができます。以前は、これを設定するのが困難で、ClickHouse Cloud API を使用する必要がありました。 - -今、ClickHouse Cloud コンソールから数回のクリックでプライベートエンドポイントを構成できるようになりました。これには、サービスの **設定** に移動し、**セキュリティ** セクションに進み、**プライベートエンドポイントの設定** をクリックします。 - -セキュリティ設定内でのプライベートエンドポイント設定インターフェースを表示する ClickHouse Cloud コンソール - -## 2024年5月17日 {#may-17-2024} -### ClickPipes を使用して Amazon Kinesis からデータを取り込む (ベータ) {#ingest-data-from-amazon-kinesis-using-clickpipes-beta} - -ClickPipes は、コードなしでデータを取り込むために ClickHouse Cloud が提供する独自のサービスです。Amazon Kinesis は、AWS のフルマネージドストリーミングサービスであり、処理のためにデータストリームを取り込み、保存します。ClickPipes の Amazon Kinesis ベータ版を発表できることを嬉しく思います。これは、私たちがよくリクエストされる統合の1つです。ClickPipes への新しい統合を追加する予定なので、サポートしてほしいデータソースがあれば教えてください! [こちらで](https://clickhouse.com/blog/clickpipes-amazon-kinesis) この機能についてもっと読むことができます。 - -クラウドコンソールで新しい Amazon Kinesis 統合を試すことができます: - -Amazon Kinesis 統合設定オプションを示す ClickPipes インターフェース - -### 構成可能なバックアップ (プライベートプレビュー) {#configurable-backups-private-preview} - -バックアップはすべてのデータベースにとって重要です(どんなに信頼性が高くても)、ClickHouse Cloud の初日からバックアップの重要性を真剣に受け止めてきました。今週、私たちは構成可能なバックアップを開始しました。これにより、サービスのバックアップに対する柔軟性が大幅に向上します。これで、開始時間、保持、および頻度を制御できるようになりました。この機能は **Production**および **Dedicated** サービス用に利用可能で、**Development** サービス用には利用できません。この機能は現在プライベートプレビュー中ですので、サービスの有効化については support@clickhouse.com までご連絡ください。構成可能なバックアップについての詳細は、[こちら](https://clickhouse.com/blog/configurable-backups-in-clickhouse-cloud)でご覧いただけます。 - -### SQL クエリから API を作成する (ベータ) {#create-apis-from-your-sql-queries-beta} - -ClickHouse 用の SQL クエリを書くと、アプリケーションにクエリを公開するにはドライバ経由で ClickHouse に接続する必要があります。しかし、現在の **クエリエンドポイント** 機能を使用すると、設定なしで API から直接 SQL クエリを実行できます。クエリエンドポイントを指定して、JSON、CSV、または TSV を返すように設定できます。クラウドコンソールで「共有」ボタンをクリックして、クエリでこの新機能を試してみてください。 [クエリエンドポイントについての詳細はこちら](https://clickhouse.com/blog/automatic-query-endpoints)をご覧ください。 - -出力形式オプションを持つクエリエンドポイント設定を示す ClickHouse Cloud インターフェース - -### 公式の ClickHouse 認証が提供されています {#official-clickhouse-certification-is-now-available} - -ClickHouse 開発トレーニングコースには 12 の無料トレーニングモジュールがあります。この週の前には、ClickHouse での習熟度を証明する公式な方法はありませんでした。最近、**ClickHouse 認定開発者**になるための公式な試験を開始しました。この試験を完了すると、データの取り込み、モデリング、分析、パフォーマンスの最適化などのトピックに関する ClickHouse の習熟度を、現在および将来の雇用主に示すことができます。 [こちらで試験を受ける](https://clickhouse.com/learn/certification) か、ClickHouse 認証についての詳細は [このブログ投稿](https://clickhouse.com/blog/first-official-clickhouse-certification)をご覧ください。 - -## 2024年4月25日 {#april-25-2024} -### S3 および GCS からデータを ClickPipes を使用してロードする {#load-data-from-s3-and-gcs-using-clickpipes} - -最近リリースされたクラウドコンソールには、「データソース」という新しいセクションがあることに気づいたかもしれません。「データソース」ページは、様々なソースから ClickHouse Cloud にデータを簡単に挿入できる ClickPipes というネイティブな ClickHouse Cloud 機能によってパワーされています。 - -最近の ClickPipes アップデートには、Amazon S3 および Google Cloud Storage からデータを直接アップロードする機能が追加されました。組み込みのテーブル関数を使用することもできますが、ClickPipes は、UI を介しての完全に管理されたサービスであり、数回のクリックで S3 および GCS からデータを取り込むことができます。この機能はまだプライベートプレビュー中ですが、クラウドコンソールで今すぐ試すことができます。 - -S3 および GCS バケットからデータをロードするための設定オプションを示す ClickPipes インターフェース - -### 500 以上のソースから ClickHouse Cloud へのデータを Fivetran を使用してロードする {#use-fivetran-to-load-data-from-500-sources-into-clickhouse-cloud} - -ClickHouse は、すべての大規模データセットを迅速にクエリできますが、もちろん、データは最初に ClickHouse に挿入する必要があります。Fivetran の多様なコネクタのおかげで、ユーザーは 500 以上のソースからデータを迅速にロードできるようになりました。Zendesk、Slack、またはお気に入りのアプリケーションからデータをロードする必要がある場合、Fivetran の新しい ClickHouse 宛先を使用することで、ClickHouse をアプリケーションデータのターゲットデータベースとして使用できるようになります。 - -これは多くの月の努力の末、私たちの統合チームによって構築されたオープンソースの統合です。 [こちらのリリースブログ投稿](https://clickhouse.com/blog/fivetran-destination-clickhouse-cloud) と、[GitHub リポジトリ](https://github.com/ClickHouse/clickhouse-fivetran-destination)をここで確認できます。 - -### その他の変更 {#other-changes} - -**コンソールの変更** -- SQL コンソールにおける出力形式のサポート - -**統合の変更** -- ClickPipes Kafka コネクタがマルチブローカー設定をサポート -- PowerBI コネクタが ODBC ドライバ設定オプションを提供するサポートが追加 - -## 2024年4月18日 {#april-18-2024} -### AWS 東京リージョンが ClickHouse Cloud 用に利用可能になりました {#aws-tokyo-region-is-now-available-for-clickhouse-cloud} - -このリリースでは、ClickHouse Cloud 用に新しい AWS 東京リージョン (`ap-northeast-1`) が導入されました。ClickHouse を最速のデータベースにしたいと考えているため、可能な限りレイテンシを削減するために、すべてのクラウドのリージョンを追加し続けています。更新されたクラウドコンソールで東京に新しいサービスを作成できます。 - -東京リージョン選択を表示する ClickHouse Cloud サービス作成インターフェース - -その他の変更: -### コンソールの変更 {#console-changes} -- ClickPipes for Kafka に対する Avro 形式のサポートが現在一般提供中 -- Terraform プロバイダーに対してリソースのインポート(サービスとプライベートエンドポイント)の完全なサポートを実装 - -### 統合の変更 {#integrations-changes} -- NodeJS クライアントの主要な安定リリース: クエリ + ResultSet、URL 構成に対する高度な TypeScript サポート -- Kafka コネクタ: DLQ への書き込み時に例外を無視するバグを修正、Avro 列挙型をサポートする機能を追加、[MSK](https://www.youtube.com/watch?v=6lKI_WlQ3-s) および [Confluent Cloud](https://www.youtube.com/watch?v=SQAiPVbd3gg) でのコネクタ使用法ガイドを公開 -- Grafana: UI で Nullable 型のサポートを修正、動的 OTEL トレーシングテーブル名のサポートを修正 -- DBT: カスタムマテリアライゼーションのモデル設定を修正 -- Java クライアント: 不正なエラーコード解析のバグを修正 -- Python クライアント: 数値型のパラメータバインディングを修正、クエリバインディングの数値リストに関するバグを修正、SQLAlchemy Point サポートを追加 - -## 2024年4月4日 {#april-4-2024} -### 新しい ClickHouse Cloud コンソールの紹介 {#introducing-the-new-clickhouse-cloud-console} - -このリリースでは、新しいクラウドコンソールのプライベートプレビューを導入します。 - -ClickHouse では、開発者エクスペリエンスの向上について常に考えています。最速のリアルタイムデータウェアハウスを提供するだけでは不十分で、それを使いやすく管理しやすくする必要があります。 - -数千人の ClickHouse Cloud ユーザーが毎月私たちの SQL コンソールで数十億のクエリを実行しているため、ClickHouse Cloud サービスとのインタラクションを以前よりも簡単にするために、世界クラスのコンソールに投資することに決めました。新しいクラウドコンソール体験は、スタンドアロンの SQL エディタと管理コンソールを直感的な UI 内で組み合わせています。 - -選ばれたお客様には、新しいクラウドコンソール体験をプレビューする機会が提供されます – ClickHouse 内のデータを探索し管理するための統合された没入型の方法です。優先アクセスを希望される場合は、support@clickhouse.com までご連絡ください。 - -統合された SQL エディタと管理機能を持つ新しい ClickHouse Cloud コンソールインターフェースを示すアニメーション - -## 2024年3月28日 {#march-28-2024} - -このリリースでは、Microsoft Azure のサポート、API からの水平スケーリング、プライベートプレビューでのリリースチャンネルを導入します。 -### 一般的な更新 {#general-updates} -- Microsoft Azure へのサポートをプライベートプレビューで導入しました。アクセスを取得するには、アカウント管理またはサポートに連絡するか、[待機リスト](https://clickhouse.com/cloud/azure-waitlist)に参加してください。 -- リリースチャンネルを導入しました – 環境タイプに基づいてアップグレードのタイミングを指定する機能。このリリースでは、「ファスト」リリースチャンネルを追加し、非本番環境を本番より先にアップグレードできるようにしました(有効にするにはサポートに連絡してください)。 - -### 管理の変更 {#administration-changes} -- API 経由での水平スケーリング構成のサポートを追加(プライベートプレビュー、サポートに連絡して有効にしてください) -- 起動時にメモリエラーが発生しているサービスのスケーリング上昇を改善 -- Terraform プロバイダー経由で AWS に対する CMEK のサポートを追加 - -### コンソールの変更 {#console-changes-1} -- Microsoft ソーシャルログインをサポート -- SQL コンソールでのパラメータ化されたクエリ共有機能を追加 -- クエリエディタのパフォーマンスを大幅に改善(一部の EU リージョンでのレイテンシが 5 秒から 1.5 秒に短縮) - -### 統合の変更 {#integrations-changes-1} -- ClickHouse OpenTelemetry エクスポータ: ClickHouse のレプリケーショントランケーブルエンジンをサポートする [追加](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/31920) および [統合テスト追加](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/31896) -- ClickHouse DBT アダプタ: [辞書のマテリアライゼーションマクロのサポートを追加](https://github.com/ClickHouse/dbt-clickhouse/pull/255)し、[TTL 表現サポートのテストを追加](https://github.com/ClickHouse/dbt-clickhouse/pull/254) -- ClickHouse Kafka Connect Sink: [Kafka プラグイン発見との互換性を追加](https://github.com/ClickHouse/clickhouse-kafka-connect/issues/350)(コミュニティの寄与) -- ClickHouse Java Client: 新しいクライアント API 用の [新しいパッケージを導入](https://github.com/ClickHouse/clickhouse-java/pull/1574)し、[Cloud テストのためのテストカバレッジを追加](https://github.com/ClickHouse/clickhouse-java/pull/1575) -- ClickHouse NodeJS Client: 新しい HTTP keep-alive の動作に対するテストとドキュメントを拡張。v0.3.0 リリース以降のもの -- ClickHouse Golang Client: [Map 内のキーとして Enum のバグを修正](https://github.com/ClickHouse/clickhouse-go/pull/1236)、接続プール内にエラーのある接続が残らない場合のバグを修正 [(コミュニティの寄与)](https://github.com/ClickHouse/clickhouse-go/pull/1237) -- ClickHouse Python Client: [PyArrow を介してのクエリストリーミングを支援する](https://github.com/ClickHouse/clickhouse-connect/issues/155)(コミュニティ寄与) - -### セキュリティ更新 {#security-updates} -- ClickHouse Cloud を更新して、["ロールベースのアクセス制御が有効な場合にクエリキャッシュがバイパスされる"](https://github.com/ClickHouse/ClickHouse/security/advisories/GHSA-45h5-f7g3-gr8r)(CVE-2024-22412)を防止 - -## 2024年3月14日 {#march-14-2024} - -このリリースでは、新しいクラウドコンソールの体験、S3 および GCS からのバルクローディング向けの ClickPipes、および Kafka 用の ClickPipes における Avro 形式のサポートが早期アクセスで提供されます。また、ClickHouse データベースバージョンが 24.1 にアップグレードされ、新機能のサポートやパフォーマンスおよびリソース使用の最適化を実現しています。 -### コンソールの変更 {#console-changes-2} -- 新しいクラウドコンソール体験が早期アクセスで提供中(参加に興味がある場合はサポートに連絡してください)。 -- S3 および GCS からのバルクローディング用 ClickPipes が早期アクセスで提供中(参加に興味がある場合はサポートに連絡してください)。 -- Kafka 用 ClickPipes の Avro 形式のサポートが早期アクセスで提供中(参加に興味がある場合はサポートに連絡してください)。 - -### ClickHouse バージョンアップグレード {#clickhouse-version-upgrade} -- FINAL に対する最適化、ベクトル化の改善、より高速な集計 - 詳細は [23.12 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-12#optimizations-for-final)を参照してください。 -- punycode の処理、文字列の類似性、外れ値の検出、およびマージおよび Keeper のメモリ最適化に関する新機能 - 詳細は [24.1 リリースブログ](https://clickhouse.com/blog/clickhouse-release-24-01)および [プレゼンテーション](https://presentations.clickhouse.com/release_24.1/)を参照ください。 -- この ClickHouse Cloud バージョンは 24.1 に基づいており、新機能、パフォーマンス改善、バグ修正が数十件あります。詳細はコアデータベースの [変更ログ](/whats-new/changelog/2023#2312)を参照ください。 - -### 統合の変更 {#integrations-changes-2} -- Grafana: v4 のダッシュボード移行とアドホックフィルタリングロジックを修正 -- Tableau コネクタ: DATENAME 関数および「実際の」引数の丸めを修正 -- Kafka コネクタ: 接続初期化時の NPE を修正、JDBC ドライバオプションを指定する機能を追加 -- Golang クライアント: レスポンスのメモリフットプリントを減少、Date32 の極端な値を修正、圧縮が有効な場合のエラー報告を改善 -- Python クライアント: 日時パラメータでのタイムゾーンのサポートを改善、Pandas DataFrame のパフォーマンスを改善 - -## 2024年2月29日 {#february-29-2024} - -このリリースでは、SQL コンソールアプリケーションの読み込み時間を改善し、ClickPipes における SCRAM-SHA-256 認証をサポートし、Kafka Connect へのネスト構造サポートを拡張します。 -### コンソールの変更 {#console-changes-3} -- SQL コンソールアプリケーションの初期読み込み時間を最適化 -- SQL コンソール中のレースコンディションを修正し、「認証失敗」エラーを防止 -- 最近のメモリ割り当て値が時折間違っている監視ページの動作を修正 -- SQL コンソールが時折重複した KILL QUERY コマンドを発行する動作を修正 -- ClickPipes における Kafka ベースのデータソース用に SCRAM-SHA-256 認証メソッドのサポートを追加 - -### 統合の変更 {#integrations-changes-3} -- Kafka コネクタ: 複雑なネスト構造(配列、マップ)へのサポートを拡張、FixedString 型のサポートを追加、複数のデータベースへの取り込みをサポート -- Metabase: ClickHouse バージョン 23.8 未満との互換性の修正 -- DBT: モデル作成にパラメータを渡す機能を追加 -- Node.js クライアント: 長時間実行されるクエリ (>1 時間) をサポートし、空の値を優雅に処理する機能を追加 - -## 2024年2月15日 {#february-15-2024} - -このリリースはコアデータベースバージョンをアップグレードし、Terraform を介してプライベートリンクを設定する機能を追加し、Kafka Connect を介して非同期挿入の正確な一度のセマンティクスのサポートを追加します。 - -### ClickHouse バージョンアップグレード {#clickhouse-version-upgrade-1} -- S3Queue テーブルエンジンによる S3 からのデータの連続的でスケジュールされたロードが生産レベルで準備完了 - 詳細は [23.11 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-11)を参照してください。 -- FINAL に対する重要なパフォーマンスの改善と SIMD 命令によるベクトル化の改善があり、より高速なクエリの実現 - 詳細は [23.12 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-12#optimizations-for-final)を参照してください。 -- この ClickHouse Cloud バージョンは 23.12 に基づいており、多数の新機能、パフォーマンス向上、バグ修正が含まれています。 [コアデータベースの変更ログ](/whats-new/changelog/2023#2312)を確認してください。 - -### コンソールの変更 {#console-changes-4} -- Terraform プロバイダーを介して AWS Private Link および GCP Private Service Connect を設定する機能を追加 -- リモートファイルデータ インポートの回復力を改善 -- すべてのデータインポートにインポートステータスの詳細フライアウトを追加 -- S3 データインポートにキー/シークレットキー認証情報のサポートを追加 - -### 統合の変更 {#integrations-changes-4} -* Kafka Connect - * 正確な一度のための async_insert をサポート(デフォルトで無効) -* Golang クライアント - * DateTime バインディングを修正 - * バッチ挿入性能を改善 -* Java クライアント - * リクエスト圧縮の問題を修正 -### 設定の変更 {#settings-changes} -* `use_mysql_types_in_show_columns` はもはや必要ありません。MySQL インターフェースを通じて接続すると、自動的に有効になります。 -* `async_insert_max_data_size` のデフォルト値が `10 MiB` になりました。 -## 2024年2月2日 {#february-2-2024} - -このリリースは、Azure Event Hub への ClickPipes の利用可能性をもたらし、v4 ClickHouse Grafana コネクタを使用したログおよびトレースナビゲーションのワークフローを劇的に改善し、Flyway と Atlas データベーススキーマ管理ツールのサポートを初めて導入します。 -### コンソールの変更 {#console-changes-5} -* Azure Event Hub への ClickPipes サポートが追加されました。 -* 新しいサービスは、デフォルトのアイドル時間が 15 分で開始されます。 -### 統合の変更 {#integrations-changes-5} -* [ClickHouse データソース for Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) v4 リリース - * テーブル、ログ、タイムシリーズ、トレースのための専門のエディターを持つ完全に再構築されたクエリビルダー - * より複雑で動的なクエリをサポートするために完全に再構築された SQL ジェネレーター - * ログおよびトレースビューに対する OpenTelemetry のファーストクラスサポートの追加 - * ログおよびトレース用のデフォルトのテーブルやカラムを指定するための設定の拡張 - * カスタム HTTP ヘッダーを指定する能力の追加 - * さらに多くの改善点 - 完全な [変更ログ](https://github.com/grafana/clickhouse-datasource/blob/main/CHANGELOG.md#400)を確認してください。 -* データベーススキーマ管理ツール - * [Flyway に ClickHouse サポートが追加されました](https://github.com/flyway/flyway-community-db-support/packages/2037428) - * [Ariga Atlas に ClickHouse サポートが追加されました](https://atlasgo.io/blog/2023/12/19/atlas-v-0-16#clickhouse-beta-program) -* Kafka Connector Sink - * デフォルト値を持つテーブルへの取り込みを最適化しました。 - * DateTime64 における文字列ベースの日付のサポートが追加されました。 -* Metabase - * 複数のデータベースへの接続のサポートが追加されました。 -## 2024年1月18日 {#january-18-2024} - -このリリースは、AWS の新しいリージョン(ロンドン / eu-west-2)を追加し、Redpanda、Upstash、Warpstream に対する ClickPipes のサポートを追加し、[is_deleted](/engines/table-engines/mergetree-family/replacingmergetree#is_deleted) コアデータベース機能の信頼性を改善します。 -### 一般的な変更 {#general-changes} -- 新しい AWS リージョン: ロンドン (eu-west-2) -### コンソールの変更 {#console-changes-6} -- Redpanda、Upstash、Warpstream に対する ClickPipes サポートが追加されました。 -- ClickPipes 認証メカニズムが UI で構成可能になりました。 -### 統合の変更 {#integrations-changes-6} -- Java クライアント: - - 破壊的変更: 呼び出し時にランダムな URL ハンドルを指定する機能が削除されました。この機能は ClickHouse から削除されました。 - - 非推奨: Java CLI クライアントおよび GRPC パッケージ - - ClickHouse インスタンスへのバッチサイズおよび負荷を減らすために RowBinaryWithDefaults 形式をサポート - - Date32 および DateTime64 の範囲境界を ClickHouse と互換性のあるものにし、Spark Array 文字列型との互換性を持たせました。 -- Kafka Connector: Grafana 向けの JMX 監視ダッシュボードが追加されました。 -- PowerBI: ODBC ドライバー設定が UI で構成可能になりました。 -- JavaScript クライアント: クエリの要約情報を公開し、挿入のために特定のカラムのサブセットを提供できるようにし、Web クライアントの keep_alive を構成可能にしました。 -- Python クライアント: SQLAlchemy に対する Nothing 型のサポートが追加されました。 -### 信頼性の変更 {#reliability-changes} -- ユーザー側の逆互換性のある変更: 以前は、2 つの機能 ([is_deleted](/engines/table-engines/mergetree-family/replacingmergetree#is_deleted) および ``OPTIMIZE CLEANUP``) が特定の条件下で ClickHouse のデータの破損を引き起こす可能性がありました。ユーザーのデータの整合性を守るために、機能のコアを維持しつつ、この機能の動作を調整しました。具体的には、MergeTree 設定の ``clean_deleted_rows`` は現在非推奨となり、もはや効果がないことになりました。``CLEANUP`` キーワードはデフォルトでは許可されていません(使用するには ``allow_experimental_replacing_merge_with_cleanup`` を有効にする必要があります)。``CLEANUP`` を使用することを決定した場合は、常に ``FINAL`` と一緒に使用されることを確認する必要があり、``OPTIMIZE FINAL CLEANUP`` を実行した後に古いバージョンを持つ行が挿入されないことを保証しなければなりません。 -## 2023年12月18日 {#december-18-2023} - -このリリースは、GCP の新しいリージョン(us-east1)、セキュアなエンドポイント接続の自己サービス機能、DBT 1.7 を含む追加の統合サポート、数多くのバグ修正およびセキュリティ強化を提供します。 -### 一般的な変更 {#general-changes-1} -- ClickHouse Cloud は、GCP us-east1 (サウスカロライナ) リージョンで利用可能になりました。 -- OpenAPI を介して AWS Private Link および GCP Private Service Connect を設定する機能が有効になりました。 -### コンソールの変更 {#console-changes-7} -- 開発者ロールを持つユーザー向けの SQL コンソールへのシームレスなログインが可能になりました。 -- オンボーディング中のアイドル制御の設定のワークフローが簡素化されました。 -### 統合の変更 {#integrations-changes-7} -- DBT コネクタ: DBT の v1.7 までのサポートが追加されました。 -- Metabase: Metabase v0.48 へのサポートが追加されました。 -- PowerBI Connector: PowerBI Cloud での実行機能が追加されました。 -- ClickPipes 内部ユーザーの権限を構成可能にしました。 -- Kafka Connect - - Nullable 型の重複排除ロジックと取り込みを改善しました。 - - テキストベースのフォーマット (CSV、TSV) のサポートが追加されました。 -- Apache Beam: Boolean および LowCardinality 型のサポートが追加されました。 -- Nodejs クライアント: Parquet 形式のサポートが追加されました。 -### セキュリティのお知らせ {#security-announcements} -- 3 つのセキュリティ脆弱性が修正されました - 詳細は [セキュリティ変更ログ](/whats-new/security-changelog) を参照してください: - - CVE 2023-47118 (CVSS 7.0) - デフォルトでポート 9000/tcp で実行されているネイティブインターフェースに影響を与えるヒープバッファオーバーフローの脆弱性 - - CVE-2023-48704 (CVSS 7.0) - デフォルトでポート 9000/tcp で実行されているネイティブインターフェースに影響を与えるヒープバッファオーバーフローの脆弱性 - - CVE 2023-48298 (CVSS 5.9) - FPC 圧縮コーデックの整数アンダーフローの脆弱性 -## 2023年11月22日 {#november-22-2023} - -このリリースは、コアデータベースバージョンをアップグレードし、ログインおよび認証フローを改善し、Kafka Connect Sink にプロキシサポートを追加します。 -### ClickHouse バージョンアップグレード {#clickhouse-version-upgrade-2} - -- Parquet ファイルの読み取りパフォーマンスが劇的に改善されました。詳細は [23.8 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-08) を参照してください。 -- JSON の型推論サポートが追加されました。詳細は [23.9 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-09) を参照してください。 -- `ArrayFold` のような強力なアナリスト向け関数が導入されました。詳細は [23.10 リリースブログ](https://clickhouse.com/blog/clickhouse-release-23-10) を参照してください。 -- **ユーザー側の逆互換性のある変更**: JSON 形式で文字列から数値を推論するのを避けるために、デフォルトで `input_format_json_try_infer_numbers_from_strings` 設定が無効化されました。これを行うと、サンプルデータに数値に似た文字列が含まれている場合にパースエラーが発生する可能性があります。 -- 数十の新機能、パフォーマンス改善、バグ修正が行われました。詳細は [コアデータベースの変更ログ](/whats-new/changelog) を参照してください。 -### コンソールの変更 {#console-changes-8} - -- ログインおよび認証フローが改善されました。 -- 大規模なスキーマをよりよくサポートするために AI ベースのクエリ提案が改善されました。 -### 統合の変更 {#integrations-changes-8} - -- Kafka Connect Sink: プロキシサポート、`topic-tablename` マッピング、Keeper の _exactly-once_ 配信プロパティの構成可能性が追加されました。 -- Node.js クライアント: Parquet 形式のサポートが追加されました。 -- Metabase: `datetimeDiff` 関数のサポートが追加されました。 -- Python クライアント: カラム名での特殊文字のサポートが追加されました。タイムゾーンパラメータのバインディングが修正されました。 -## 2023年11月2日 {#november-2-2023} - -このリリースは、アジアにおける開発サービスのリージョナルサポートを拡大し、顧客管理の暗号化キーに対するキー回転機能を導入し、請求コンソールにおける税金設定の粒度を改善し、サポートされている言語クライアント全体にわたるいくつかのバグ修正を提供します。 -### 一般的な更新 {#general-updates-1} -- 開発サービスが AWS の `ap-south-1` (ムンバイ) および `ap-southeast-1` (シンガポール) で利用可能になりました。 -- 顧客管理の暗号化キー (CMEK) に対するキー回転のサポートが追加されました。 -### コンソールの変更 {#console-changes-9} -- クレジットカードを追加する際に粒度の高い税金設定を構成する機能が追加されました。 -### 統合の変更 {#integrations-changes-9} -- MySQL - - MySQL 経由の Tableau Online および QuickSight のサポートが改善されました。 -- Kafka Connector - - テキストベースのフォーマット (CSV、TSV) のサポートを追加するために新しい StringConverter が導入されました。 - - Bytes および Decimal データ型のサポートが追加されました。 - - 再試行可能な例外を常に再試行されるように調整しました (errors.tolerance=all の場合でも)。 -- Node.js クライアント - - 大規模なデータセットをストリーミングした際の腐敗した結果をもたらす問題を修正しました。 -- Python クライアント - - 大規模な挿入のタイムアウトを修正しました。 - - NumPy/Pandas の Date32 問題を修正しました。 -​- Golang クライアント - - JSON カラムへの空のマップの挿入、圧縮バッファのクリーンアップ、クエリエスケープ、IPv4 および IPv6 のゼロ/nil に対するパニックを修正しました。 - - キャンセルされた挿入に対するウォッチドッグを追加しました。 -- DBT - - テストを伴う分散テーブルのサポートが改善されました。 -## 2023年10月19日 {#october-19-2023} - -このリリースは、SQL コンソールにおける使いやすさおよびパフォーマンスの改善、Metabase コネクタにおける IP データ型処理の改善、新しい機能を Java および Node.js クライアントに追加します。 -### コンソールの変更 {#console-changes-10} -- SQL コンソールの使いやすさが改善されました (例: クエリ実行間でのカラム幅の保持)。 -- SQL コンソールのパフォーマンスが改善されました。 -### 統合の変更 {#integrations-changes-10} -- Java クライアント: - - パフォーマンスを向上させ、オープン接続を再利用するためにデフォルトのネットワークライブラリを切り替えました。 - - プロキシのサポートが追加されました。 - - Trust Store を使用してセキュアな接続をサポートする機能が追加されました。 -- Node.js クライアント: 挿入クエリの keep-alive 動作を修正しました。 -- Metabase: IPv4/IPv6 カラムのシリアライゼーションが修正されました。 -## 2023年9月28日 {#september-28-2023} - -このリリースは、Kafka、Confluent Cloud、Amazon MSK に対する ClickPipes の一般提供をもたらし、Kafka Connect ClickHouse Sink、IAM ロールを介した Amazon S3 へのセキュアなアクセスの自己サービスワークフロー、AI 支援のクエリ提案 (プライベートプレビュー) を追加します。 -### コンソールの変更 {#console-changes-11} -- IAM ロールを介して [Amazon S3 へのアクセスをセキュリティするための自己サービスワークフロー](/cloud/security/secure-s3) が追加されました。 -- プライベートプレビューで AI 支援のクエリ提案が導入されました (試してみるには、[ClickHouse Cloud サポート](https://console.clickhouse.cloud/support) にお問い合わせください!)。 -### 統合の変更 {#integrations-changes-11} -- ClickPipes の一般提供が発表されました - Kafka、Confluent Cloud、Amazon MSK に対するターンキーのデータ取り込みサービス (詳細は [リリースブログ](https://clickhouse.com/blog/clickpipes-is-generally-available) を参照)。 -- Kafka Connect ClickHouse Sink の一般提供が達成されました。 - - `clickhouse.settings` プロパティを使用したカスタマイズされた ClickHouse 設定のサポートが拡張されました。 - - 動的フィールドを考慮した重複排除動作が改善されました。 - - ClickHouse からのテーブル変更を再取得するための `tableRefreshInterval` のサポートが追加されました。 -- SSL 接続の問題および [PowerBI](/integrations/powerbi) と ClickHouse データ型間の型マッピングが修正されました。 -## 2023年9月7日 {#september-7-2023} - -このリリースでは、PowerBI Desktop 公式コネクタのベータ版リリース、インドにおけるクレジットカード決済処理の改善、およびサポートされている言語クライアント全体の複数の改善が行われます。 -### コンソールの変更 {#console-changes-12} -- インドからの請求をサポートするために、残額および支払い再試行が追加されました。 -### 統合の変更 {#integrations-changes-12} -- Kafka Connector: ClickHouse 設定の構成、および error.tolerance 構成オプションの追加がサポートされました。 -- PowerBI Desktop: 公式コネクタのベータ版がリリースされました。 -- Grafana: Point geo type のサポートが追加され、Data Analyst ダッシュボードの Panels が修正され、timeInterval マクロが修正されました。 -- Python クライアント: Pandas 2.1.0 との互換性があり、Python 3.7 のサポートが打ち切られました。Nullable JSON 形式のサポートが追加されました。 -- Node.js クライアント: default_format 設定のサポートが追加されました。 -- Golang クライアント: bool 型の処理が修正され、文字列制限が削除されました。 -## 2023年8月24日 {#aug-24-2023} - -このリリースでは、ClickHouse データベースへの MySQL インターフェースのサポートを追加し、新しい公式 PowerBI コネクタを導入し、クラウドコンソールに新しい "Running Queries" ビューを追加し、ClickHouse バージョンを 23.7 に更新します。 -### 一般的な更新 {#general-updates-2} -- [MySQL ワイヤプロトコル](/interfaces/mysql) のサポートが追加されました。このプロトコルにより、多くの既存の BI ツールとの互換性が実現します。この機能を組織のために有効化するには、サポートに連絡してください。 -- 新しい公式 PowerBI コネクタが導入されました。 -### コンソールの変更 {#console-changes-13} -- SQL コンソールに "Running Queries" ビューのサポートが追加されました。 -### ClickHouse 23.7 バージョンアップグレード {#clickhouse-237-version-upgrade} -- Azure Table 機能のサポートが追加され、地理データ型が生産準備が整い、結合パフォーマンスが向上しました - 詳細は 23.5 リリース [ブログ](https://clickhouse.com/blog/clickhouse-release-23-05) を参照してください。 -- MongoDB 統合サポートがバージョン 6.0 に拡張されました - 詳細は 23.6 リリース [ブログ](https://clickhouse.com/blog/clickhouse-release-23-06) を参照してください。 -- Parquet 形式への書き込み性能が 6 倍向上し、PRQL クエリ言語がサポートされ、SQL 互換性が向上しました - 詳細は 23.7 リリース [デッキ](https://presentations.clickhouse.com/release_23.7/) を参照してください。 -- 数十の新機能、パフォーマンス改善、バグ修正が行われました - 詳細な [変更ログ](/whats-new/changelog) は 23.5、23.6、23.7 を参照してください。 -### 統合の変更 {#integrations-changes-13} -- Kafka Connector: Avro Date および Time 型のサポートが追加されました。 -- JavaScript クライアント: ウェブベースの環境での安定版がリリースされました。 -- Grafana: フィルターロジック、データベース名の処理が改善され、サブ秒精度を持つ TimeInterval のサポートが追加されました。 -- Golang クライアント: バッチおよび非同期データロードの問題がいくつか修正されました。 -- Metabase: v0.47 をサポートし、接続の偽装が追加され、データ型のマッピングが修正されました。 -## 2023年7月27日 {#july-27-2023} - -このリリースでは、Kafka 用の ClickPipes のプライベートプレビュー、新しいデータロード体験、クラウドコンソールを使用して URL からファイルをロードする機能が追加されます。 -### 統合の変更 {#integrations-changes-14} -- Kafka 用の [ClickPipes](https://clickhouse.com/cloud/clickpipes) のプライベートプレビューが導入されました。これは、Kafka および Confluent Cloud からの大量のデータを簡単に取り込むことができるクラウドネイティブな統合エンジンです。待機リストにサインアップするには [こちら](https://clickhouse.com/cloud/clickpipes#joinwaitlist) をクリックしてください。 -- JavaScript クライアント: ウェブベースの環境 (ブラウザ、Cloudflare ワーカー) 向けにサポートをリリースしました。コードは、コミュニティがカスタム環境用コネクタを作成できるようにリファクタリングされました。 -- Kafka Connector: Timestamp および Time Kafka 型のインラインスキーマのサポートが追加されました。 -- Python クライアント: 挿入圧縮および LowCardinality の読み取りの問題が修正されました。 -### コンソールの変更 {#console-changes-14} -- より多くのテーブル作成構成オプションを持つ新しいデータロード体験が追加されました。 -- クラウドコンソールを使用して URL からファイルをロードする機能が導入されました。 -- 異なる組織に参加するための追加オプションや、すべての未解決の招待状を見るためのオプションを持つ招待フローが改善されました。 -## 2023年7月14日 {#july-14-2023} - -このリリースでは、専用サービスを立ち上げる機能、新しい AWS リージョン(オーストラリア)、およびディスク上のデータを暗号化するための独自のキーを持つことができるようになります。 -### 一般的な更新 {#general-updates-3} -- 新しい AWS オーストラリアリージョン: シドニー (ap-southeast-2) -- 要求の厳しいレイテンシーセンサーなワークロード向けの専用サービス tier (セットアップするには [サポート](https://console.clickhouse.cloud/support) に連絡してください) -- ディスク上のデータを暗号化するための独自のキー (BYOK) を持つことができる (セットアップするには [サポート](https://console.clickhouse.cloud/support) に連絡してください) -### コンソールの変更 {#console-changes-15} -- 非同期挿入の監視メトリクスダッシュボードへの改善 -- サポートとの統合のためのチャットボットの行動が改善されました。 -### 統合の変更 {#integrations-changes-15} -- NodeJS クライアント: ソケットタイムアウトによる接続失敗に関するバグが修正されました。 -- Python クライアント: 挿入クエリに QuerySummary を追加し、データベース名の特殊文字をサポートする機能が追加されました。 -- Metabase: JDBC ドライバーのバージョンが更新され、DateTime64 サポートが追加され、パフォーマンス改善が行われました。 -### コアデータベースの変更 {#core-database-changes} -- [クエリキャッシュ](/operations/query-cache) を ClickHouse Cloud で有効にすることができます。有効にすると、成功したクエリはデフォルトで 1 分間キャッシュされ、その後のクエリはキャッシュされた結果を使用します。 -## 2023年6月20日 {#june-20-2023} - -このリリースでは、ClickHouse Cloud が GCP で一般提供され、Cloud API 用の Terraform プロバイダが追加され、ClickHouse バージョンが 23.4 に更新されます。 -### 一般的な更新 {#general-updates-4} -- ClickHouse Cloud が GCP で GA となり、GCP Marketplace 統合、Private Service Connect のサポート、自動バックアップが提供されます (詳細は [ブログ](https://clickhouse.com/blog/clickhouse-cloud-on-google-cloud-platform-gcp-is-generally-available) および [プレスリリース](https://clickhouse.com/blog/clickhouse-cloud-expands-choice-with-launch-on-google-cloud-platform) をご覧ください) -- Cloud API 用の [Terraform プロバイダー](https://registry.terraform.io/providers/ClickHouse/clickhouse/latest/docs) が利用可能になりました。 -### コンソールの変更 {#console-changes-16} -- サービスの新しい統合設定ページが追加されました。 -- ストレージとコンピューティングのメーター精度が調整されました。 -### 統合の変更 {#integrations-changes-16} -- Python クライアント: 挿入パフォーマンスが改善され、内部依存関係がリファクタリングされ、マルチプロセッシングがサポートされました。 -- Kafka Connector: Confluent Cloud にアップロードしてインストールすることができ、接続の問題中に再試行が追加され、不正なコネクタ状態を自動的にリセットしました。 -### ClickHouse 23.4 バージョンアップグレード {#clickhouse-234-version-upgrade} -- 平行レプリカ向けの JOIN サポートが追加されました (セットアップするには [サポート](https://console.clickhouse.cloud/support) に連絡してください)。 -- 論理削除のパフォーマンスが向上しました。 -- 大規模な挿入処理中のキャッシングが改善されました。 -### 管理の変更 {#administration-changes-1} -- "default" ではないユーザー向けのローカルディクショナリ作成が拡張されました。 -## 2023年5月30日 {#may-30-2023} - -このリリースでは、ClickHouse Cloud のコントロールプレーン操作のためのプログラマティック API の一般公開 (詳細は [ブログ](https://clickhouse.com/blog/using-the-new-clickhouse-cloud-api-to-automate-deployments) を参照してください)、IAM ロールを使用した S3 アクセス、および追加のスケーリングオプションを提供します。 -### 一般的な変更 {#general-changes-2} -- ClickHouse Cloud 用の API サポート。新しい Cloud API により、既存の CI/CD パイプラインでサービスの管理をシームレスに統合し、サービスをプログラム的に管理できます。 -- IAM ロールを使用した S3 アクセス。IAM ロールを利用して、プライベートな Amazon Simple Storage Service (S3) バケットに安全にアクセスできるようになりました (セットアップするにはサポートに連絡してください)。 -### スケーリングの変更 {#scaling-changes} -- [水平スケーリング](/manage/scaling#manual-horizontal-scaling)。より多くの並列化を必要とするワークロードは、最大 10 レプリカまで構成することができるようになりました (セットアップするにはサポートに連絡してください)。 -- [CPU ベースのオートスケーリング](/manage/scaling)。CPU に依存するワークロードは、オートスケーリングポリシーのための追加のトリガーの恩恵を受けることができます。 -### コンソールの変更 {#console-changes-17} -- Dev サービスを Production サービスに移行する機能を追加 (有効にするにはサポートに連絡してください)。 -- インスタンス作成フロー中にスケーリング構成制御を追加しました。 -- メモリにデフォルトパスワードが存在しない場合の接続文字列を修正しました。 -### 統合の変更 {#integrations-changes-17} -- Golang クライアント: ネイティブプロトコルでの接続の不均衡につながる問題が修正され、ネイティブプロトコルでのカスタム設定のサポートが追加されました。 -- Nodejs クライアント: nodejs v14 のサポートが中止され、v20 のサポートが追加されました。 -- Kafka Connector: LowCardinality 型のサポートが追加されました。 -- Metabase: 時間範囲でのグループ化の修正、メタベースの質問での整数のサポートの改善が行われました。 -### パフォーマンスと信頼性 {#performance-and-reliability} -- 書き込みに重いワークロードの効率とパフォーマンスが改善されました。 -- バックアップの速度と効率を向上させるために増分バックアップ戦略が導入されました。 -## 2023年5月11日 {#may-11-2023} - -このリリースは、GCP 上の ClickHouse Cloud の ~~パブリックベータ~~ (現在 GA、上記の 6 月 20 日のエントリーを参照) のための一般公開 (詳細は [ブログ](https://clickhouse.com/blog/clickhouse-cloud-on-gcp-available-in-public-beta) を参照) をもたらし、クエリの権限を終了する管理者権限を拡張し、Cloud コンソールにおける MFA ユーザーのステータスへのより良い可視性を追加します。 -### ClickHouse Cloud on GCP ~~(パブリックベータ)~~ (現在 GA、上記の 6 月 20 日のエントリーを参照) {#clickhouse-cloud-on-gcp-public-beta-now-ga-see-june-20th-entry-above} -- 完全に管理された分離されたストレージとコンピューティングの ClickHouse 提供を立ち上げ、Google Compute と Google Cloud Storage 上で実行されます。 -- アイオワ (us-central1)、オランダ (europe-west4)、シンガポール (asia-southeast1) リージョンで利用可能。 -- 3 つの初期リージョンで開発サービスと本番サービスの両方をサポートします。 -- デフォルトで強力なセキュリティを提供: 転送中のエンドツーエンドの暗号化、静止データの暗号化、IP アロウリスト。 -### 統合の変更 {#integrations-changes-18} -- Golang クライアント: プロキシ環境変数のサポートが追加されました。 -- Grafana: ClickHouse カスタム設定および Grafana データソースセットアップでのプロキシ環境変数の指定機能が追加されました。 -- Kafka Connector: 空のレコードの処理が改善されました。 -### コンソールの変更 {#console-changes-18} -- ユーザーリストにおける多要素認証 (MFA) の使用状況を示すインジケーターが追加されました。 -### パフォーマンスと信頼性 {#performance-and-reliability-1} -- 管理者用のクエリ終了権限に対するより粒度の高い制御が追加されました。 -## 2023年5月4日 {#may-4-2023} - -このリリースは、新しいヒートマップチャートタイプを追加し、請求使用ページを改善し、サービスの起動時間を改善します。 -### コンソールの変更 {#console-changes-19} -- SQL コンソールにヒートマップチャートタイプを追加しました。 -- 各請求寸法における消費されたクレジットを表示するために請求使用ページが改善されました。 -### 統合の変更 {#integrations-changes-19} -- Kafka コネクタ: 一時的な接続エラーのための再試行メカニズムが追加されました。 -- Python クライアント: HTTP 接続が再利用されないように max_connection_age 設定が追加されました。これは、特定の負荷分散の問題に対処するのに役立ちます。 -- Node.js クライアント: Node.js v20 のサポートが追加されました。 -- Java クライアント: クライアント証明書認証のサポートが改善され、入れ子の Tuple/Map/ネストされた型のサポートが追加されました。 -### パフォーマンスと信頼性 {#performance-and-reliability-2} -- 大量のパーツが存在する場合のサービスの起動時間が改善されました。 -- SQL コンソールにおける長時間実行されるクエリのキャンセロジックが最適化されました。 -### バグ修正 {#bug-fixes} -- 'Cell Towers' サンプルデータセットのインポートが失敗する原因となるバグが修正されました。 -## 2023年4月20日 {#april-20-2023} - -このリリースでは、ClickHouse バージョンが 23.3 に更新され、コールドリードの速度が大幅に改善され、サポートとのリアルタイムチャットが提供されています。 -### コンソールの変更 {#console-changes-20} -- サポートとのリアルタイムチャットオプションが追加されました。 -### 統合の変更 {#integrations-changes-20} -- Kafka コネクタ: Nullable 型のサポートが追加されました。 -- Golang クライアント: 外部テーブルのサポートが追加され、boolean およびポインタ型パラメータのバインディングが改善されました。 -### 設定の変更 {#configuration-changes} -- 大規模なテーブルを削除する機能が追加されました - `max_table_size_to_drop` および `max_partition_size_to_drop` 設定をオーバーライドします。 -### パフォーマンスと信頼性 {#performance-and-reliability-3} -- S3 プリフェッチを利用してコールドリードの速度を向上させる設定を追加しました: `allow_prefetched_read_pool_for_remote_filesystem`。 -### ClickHouse 23.3 バージョンアップグレード {#clickhouse-233-version-upgrade} -- 論理削除は生産準備が整いました - 詳細は 23.3 リリース [ブログ](https://clickhouse.com/blog/clickhouse-release-23-03) を参照ください。 -- マルチステージ PREWHERE のサポートが追加されました - 詳細は 23.2 リリース [ブログ](https://clickhouse.com/blog/clickhouse-release-23-03) を参照してください。 -- 数十の新機能、パフォーマンス改善、バグ修正が行われました - 詳細な [変更ログ](/whats-new/changelog/index.md) を 23.3 および 23.2 と共にご覧ください。 -## 2023年4月6日 {#april-6-2023} - -このリリースは、クラウドエンドポイントを取得するための API、最小アイドルタイムアウトのための高度なスケーリング制御、および Python クライアントのクエリメソッドでの外部データのサポートをもたらします。 -### API の変更 {#api-changes} -* [Cloud Endpoints API](//cloud/get-started/query-endpoints.md) を介して ClickHouse Cloud エンドポイントをプログラムでクエリする機能が追加されました。 -### コンソールの変更 {#console-changes-21} -- 高度なスケーリング設定に「最小アイドルタイムアウト」設定が追加されました。 -- データ読み込みモーダルでのスキーマ推論に最善を尽くす日付時刻の検出が追加されました。 -### 統合の変更 {#integrations-changes-21} -- [Metabase](/integrations/data-visualization/metabase-and-clickhouse.md): 複数スキーマのサポートが追加されました。 -- [Go クライアント](/integrations/language-clients/go/index.md): TLS 接続のアイドル接続生存性検査が修正されました。 -- [Python クライアント](/integrations/language-clients/python/index.md) - - クエリメソッドに外部データのサポートが追加されました。 - - クエリ結果に対するタイムゾーンサポートが追加されました。 - - `no_proxy`/`NO_PROXY` 環境変数のサポートが追加されました。 - - Nullable 型に対する NULL 値のサーバー側パラメータバインディングが修正されました。 -### バグ修正 {#bug-fixes-1} -* SQL コンソールから `INSERT INTO ... SELECT ...` を実行すると、SELECT クエリと同じ行制限が適用されるという動作が修正されました。 -## 2023年3月23日 {#march-23-2023} - -このリリースでは、データベースパスワードの複雑さルール、大規模バックアップの復元速度の大幅な向上、Grafana トレースビューでのトレースの表示に対するサポートを追加します。 -### セキュリティと信頼性 {#security-and-reliability} -- コアデータベースエンドポイントは、パスワードの複雑さルールを強制します。 -- 大規模バックアップの復元時間が改善されました。 -### コンソールの変更 {#console-changes-22} -- オンボーディングフローが簡素化され、新しいデフォルトとよりコンパクトなビューが導入されました。 -- サインアップおよびサインインの待機時間が短縮されました。 -### 統合の変更 {#integrations-changes-22} -- Grafana: - - ClickHouse に保存されたトレースデータをトレースビューで表示するサポートが追加されました。 - - 時間範囲フィルターが改善され、テーブル名に特殊文字のサポートが追加されました。 -- Superset: ClickHouse のネイティブサポートが追加されました。 -- Kafka Connect Sink: 自動日付変換と Null カラム処理が追加されました。 -- Metabase: 一時テーブルへの挿入が修正され、Pandas Null のサポートが追加されました。 -- Golang クライアント: タイムゾーンを持つ Date 型が正規化されました。 -- Java クライアント - - 圧縮、infile、outfile キーワードを SQL パーサーにサポートとして追加しました。 - - 認証情報のオーバーロードが追加されました。 - - `ON CLUSTER` とのバッチサポートが修正されました。 -- Node.js クライアント - - JSONStrings、JSONCompact、JSONCompactStrings、JSONColumnsWithMetadata 形式のサポートが追加されました。 - - `query_id` はすべての主要なクライアントメソッドで提供できるようになりました。 -### バグ修正 {#bug-fixes-2} -- 新しいサービスの初期プロビジョニングと起動時間が遅くなる原因となるバグが修正されました。 -- キャッシュの誤設定が原因でクエリのパフォーマンスが低下する結果となるバグが修正されました。 -## 2023年3月9日 {#march-9-2023} - -このリリースでは、可視性ダッシュボードが改善され、大規模バックアップの作成時間を最適化し、大規模テーブルやパーティションを削除するために必要な設定が追加されます。 -### コンソールの変更 {#console-changes-23} -- 高度な可視性ダッシュボード (プレビュー) が追加されました。 -- 可視性ダッシュボードにメモリアロケーションチャートが追加されました。 -- SQL コンソールのスプレッドシートビューでのスペースおよび改行処理が改善されました。 -### 信頼性およびパフォーマンス {#reliability-and-performance} -- バックアップスケジュールを最適化し、データが変更された場合のみバックアップを実行するようにしました。 -- 大規模バックアップの完了時間が改善されました。 -### 設定の変更 {#configuration-changes-1} -- 大規模なテーブルやパーティションを削除するための制限を設定をオーバーライドすることで増加させる機能が追加されました。これには `max_table_size_to_drop` および `max_partition_size_to_drop` 設定が含まれます。 -- クエリログにソース IP を追加し、ソース IP に基づいたクォータおよびアクセス制御の強制を可能にしました。 -### 統合 {#integrations} -- [Python クライアント](/integrations/language-clients/python/index.md): Pandas サポートが改善され、タイムゾーン関連の問題が修正されました。 -- [Metabase](/integrations/data-visualization/metabase-and-clickhouse.md): Metabase 0.46.x 互換性および SimpleAggregateFunction のサポートが追加されました。 -- [Kafka-Connect](/integrations/data-ingestion/kafka/index.md): 暗黙の日時変換および Null カラムの処理が改善されました。 -- [Java クライアント](https://github.com/ClickHouse/clickhouse-java): Java マップへの入れ子の変換が追加されました。 -## 2023年2月23日 {#february-23-2023} - -このリリースでは、ClickHouse 23.1 のコアリリースのサブセットの機能が有効になり、Amazon Managed Streaming for Apache Kafka (MSK) との相互運用性が提供され、アクティビティログに高度なスケーリングおよびアイドル調整が公開されます。 -### ClickHouse 23.1 バージョンアップグレード {#clickhouse-231-version-upgrade} - -ClickHouse 23.1 の機能のサブセットを追加します。たとえば: -- Map 型を使用した ARRAY JOIN -- SQL 標準の16進およびバイナリリテラル -- `age()`、`quantileInterpolatedWeighted()`、`quantilesInterpolatedWeighted()` などの新機能 -- 引数なしで `generateRandom` に挿入テーブルからの構造を使用する機能 -- 以前の名前の再利用を可能にするデータベース作成および名前変更ロジックの改善 -- より詳細については 23.1 リリース [ウェビナー スライド](https://presentations.clickhouse.com/release_23.1/#cover) および [23.1 リリース変更ログ](/whats-new/cloud#clickhouse-231-version-upgrade) を参照してください。 -### Integrations changes {#integrations-changes-23} -- [Kafka-Connect](/integrations/data-ingestion/kafka/index.md): Amazon MSKのサポートを追加 -- [Metabase](/integrations/data-visualization/metabase-and-clickhouse.md): 初の安定リリース1.0.0 - - [Metabase Cloud](https://www.metabase.com/start/)でコネクタが利用可能に - - 利用可能なすべてのデータベースを探索する機能を追加 - - AggregationFunctionタイプのデータベースの同期を修正 -- [DBT-clickhouse](/integrations/data-ingestion/etl-tools/dbt/index.md): 最新のDBTバージョンv1.4.1のサポートを追加 -- [Python client](/integrations/language-clients/python/index.md): プロキシとSSHトンネリングのサポートを改善; Pandas DataFramesのためにいくつかの修正とパフォーマンス最適化を追加 -- [Nodejs client](/integrations/language-clients/js.md): クエリ結果に`query_id`を添付する機能をリリースし、これを使用して`system.query_log`からクエリメトリクスを取得可能に -- [Golang client](/integrations/language-clients/go/index.md): ClickHouse Cloudとのネットワーク接続を最適化 -### Console changes {#console-changes-24} -- アクティビティログに高度なスケーリングとアイドリング設定調整を追加 -- パスワードリセットメールにユーザーエージェントとIP情報を追加 -- Google OAuthのサインアップフローメカニズムを改善 -### Reliability and performance {#reliability-and-performance-1} -- 大規模サービスのアイドルから再開する際の時間を短縮 -- 大量のテーブルとパーティションを持つサービスの読み取りレイテンシを改善 -### Bug fixes {#bug-fixes-3} -- サービスパスワードリセットがパスワードポリシーに従わない動作を修正 -- 組織招待メールの検証を大文字小文字を区別しないように変更 -## February 2, 2023 {#february-2-2023} - -このリリースは公式にサポートされたMetabase統合、主要なJavaクライアント/JDBCドライバーリリース、およびSQLコンソールでのビューとMaterialized Viewのサポートをもたらします。 -### Integrations changes {#integrations-changes-24} -- [Metabase](/integrations/data-visualization/metabase-and-clickhouse.md)プラグイン: ClickHouseによって維持される公式ソリューションになりました -- [dbt](/integrations/data-ingestion/etl-tools/dbt/index.md)プラグイン: [複数スレッド](https://github.com/ClickHouse/dbt-clickhouse/blob/main/CHANGELOG.md)のサポートを追加 -- [Grafana](/integrations/data-visualization/grafana/index.md)プラグイン: 接続エラーの処理が改善されました -- [Python](/integrations/language-clients/python/index.md)クライアント: 挿入操作のための[ストリーミングサポート](/integrations/language-clients/python/index.md#streaming-queries) -- [Go](/integrations/language-clients/go/index.md)クライアント: [バグ修正](https://github.com/ClickHouse/clickhouse-go/blob/main/CHANGELOG.md): キャンセルされた接続を閉じ、接続エラーの処理を改善 -- [JS](/integrations/language-clients/js.md)クライアント: [exec/insertの破壊的変更](https://github.com/ClickHouse/clickhouse-js/releases/tag/0.0.12); 戻り値の型でquery_idを公開 -- [Java](https://github.com/ClickHouse/clickhouse-java#readme)クライアント/JDBCドライバーのメジャーリリース - - [破壊的変更](https://github.com/ClickHouse/clickhouse-java/releases): 非推奨のメソッド、クラス、パッケージが削除されました - - R2DBCドライバーとファイル挿入のサポートを追加 -### Console changes {#console-changes-25} -- SQLコンソールでのビューとMaterialized Viewのサポートを追加 -### Performance and reliability {#performance-and-reliability-4} -- 停止中/アイドル状態のインスタンスのパスワードリセットを迅速化 -- より正確なアクティビティトラッキングによるスケールダウンの動作を改善 -- SQLコンソールのCSVエクスポートがトリミングされるバグを修正 -- インターミッテントなサンプルデータアップロードの失敗を引き起こすバグを修正 -## January 12, 2023 {#january-12-2023} - -このリリースはClickHouseバージョンを22.12に更新し、多くの新しいソースのための辞書を有効にし、クエリパフォーマンスを改善します。 -### General changes {#general-changes-3} -- 外部ClickHouse、Cassandra、MongoDB、MySQL、PostgreSQL、Redisを含む追加のソースのために辞書を有効にしました -### ClickHouse 22.12 version upgrade {#clickhouse-2212-version-upgrade} -- JOINサポートをGrace Hash Joinを含むまで拡張 -- ファイルを読み込むためのBinary JSON (BSON)サポートを追加 -- GROUP BY ALL標準SQL構文のサポートを追加 -- 固定精度での小数演算のための新しい数学関数 -- 完全な変更リストについては[22.12リリースブログ](https://clickhouse.com/blog/clickhouse-release-22-12)と[詳細な22.12変更ログ](/whats-new/cloud#clickhouse-2212-version-upgrade)を参照してください -### Console changes {#console-changes-26} -- SQLコンソールのオートコンプリート機能を改善 -- デフォルトのリージョンが大陸のローカリティを考慮に入れるようになりました -- 課金使用状況ページを改善し、請求ユニットとウェブサイトユニットの両方を表示 -### Integrations changes {#integrations-changes-25} -- DBTリリース[v1.3.2](https://github.com/ClickHouse/dbt-clickhouse/blob/main/CHANGELOG.md#release-132-2022-12-23) - - delete+insertインクリメンタル戦略の実験的サポートを追加 - - 新しいs3sourceマクロ -- Python client[v0.4.8](https://github.com/ClickHouse/clickhouse-connect/blob/main/CHANGELOG.md#048-2023-01-02) - - ファイル挿入のサポート - - サーバー側クエリ[パラメータバインディング](/interfaces/cli.md/#cli-queries-with-parameters) -- Go client[v2.5.0](https://github.com/ClickHouse/clickhouse-go/releases/tag/v2.5.0) - - 圧縮のためのメモリ使用量を削減 - - サーバー側クエリ[パラメータバインディング](/interfaces/cli.md/#cli-queries-with-parameters) -### Reliability and performance {#reliability-and-performance-2} -- オブジェクトストアで多数の小ファイルを取得するクエリの読み取りパフォーマンスを改善 -- 新たに立ち上げるサービスに対して、サービスが最初に起動されたバージョンに対する[互換性](/operations/settings/settings#compatibility)設定を設定 -### Bug fixes {#bug-fixes-4} -- 高度なスケーリングスライダーを使用してリソースを予約することが即時に効果を持つようになりました。 -## December 20, 2022 {#december-20-2022} - -このリリースは管理者がSQLコンソールにシームレスにログインできるようにし、コールドリードの読み取りパフォーマンスを改善し、ClickHouse Cloud用のMetabaseコネクタを改善します。 -### Console changes {#console-changes-27} -- 管理者ユーザーに対してSQLコンソールへのシームレスアクセスを有効に -- 新しい招待者に対するデフォルトの役割を「管理者」に変更 -- オンボーディングサーベイを追加 -### Reliability and performance {#reliability-and-performance-3} -- ネットワーク障害が発生した場合にリカバリーするために、長時間実行される挿入クエリのための再試行ロジックを追加 -- コールドリードの読み取りパフォーマンスを改善 -### Integrations changes {#integrations-changes-26} -- [Metabaseプラグイン](/integrations/data-visualization/metabase-and-clickhouse.md)が長らく待たれたv0.9.1のメジャーアップデートを受けました。最新のMetabaseバージョンと互換性があり、ClickHouse Cloudに対して十分にテストされています。 -## December 6, 2022 - General Availability {#december-6-2022---general-availability} - -ClickHouse Cloudは、SOC2タイプIIのコンプライアンス、プロダクションワークロードの稼働時間SLA、および公開ステータスページをもって生産準備が整いました。このリリースには、AWS Marketplace統合、ClickHouseユーザーのためのデータ探索ワークベンチであるSQLコンソール、およびClickHouse Cloudでのセルフペースの学習を提供するClickHouse Academyなどの新しい大きな機能が含まれています。この[ブログ](https://clickhouse.com/blog/clickhouse-cloud-generally-available)で詳細を確認してください。 -### Production-ready {#production-ready} -- SOC2タイプIIのコンプライアンス (詳細は[ブログ](https://clickhouse.com/blog/clickhouse-cloud-is-now-soc-2-type-ii-compliant)と[Trust Center](https://trust.clickhouse.com/)を参照) -- ClickHouse Cloud用の公開[ステータスページ](https://status.clickhouse.com/) -- プロダクションのユースケース向けの稼働時間SLAを提供 -- [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-jettukeanwrfc)での利用可能性 -### Major new capabilities {#major-new-capabilities} -- ClickHouseユーザーのためのデータ探索ワークベンチであるSQLコンソールを導入 -- 自己学習型ClickHouse Cloudである[ClickHouse Academy](https://learn.clickhouse.com/visitor_class_catalog)を開始 -### Pricing and metering changes {#pricing-and-metering-changes} -- 試用期間を30日間に延長 -- スタータープロジェクトや開発/ステージング環境に適した固定容量、低月額の開発サービスを導入 -- ClickHouse Cloudの運用とスケーリングの改善に伴うプロダクションサービスの新たな低価格を導入 -- コンピュートの計測精度と信頼性を改善 -### Integrations changes {#integrations-changes-27} -- ClickHouse Postgres / MySQL統合エンジンのサポートを有効化 -- SQLユーザー定義関数 (UDF) のサポートを追加 -- 高度なKafka Connectシンクをベータステータスに -- バージョン、更新状況などのリッチなメタデータを導入し、統合UIを改善 -### Console changes {#console-changes-28} - -- クラウドコンソールでの多要素認証サポート -- モバイルデバイス向けのクラウドコンソールナビゲーションを改善 -### Documentation changes {#documentation-changes} - -- ClickHouse Cloud専用の[ドキュメント](/cloud/overview)セクションを導入 -### Bug fixes {#bug-fixes-5} -- バックアップからの復元が依存関係の解決により常に成功しない既知の問題に対処しました -## November 29, 2022 {#november-29-2022} - -このリリースはSOC2タイプIIコンプライアンスを達成し、ClickHouseバージョンを22.11に更新し、いくつかのClickHouseクライアントと統合を改善します。 -### General changes {#general-changes-4} - -- SOC2タイプIIコンプライアンスを達成 (詳細は[ブログ](https://clickhouse.com/blog/clickhouse-cloud-is-now-soc-2-type-ii-compliant)と[Trust Center](https://trust.clickhouse.com)を参照) -### Console changes {#console-changes-29} - -- サービスが自動的に一時停止されていることを示す「アイドル」ステータスインジケーターを追加 -### ClickHouse 22.11 version upgrade {#clickhouse-2211-version-upgrade} - -- HudiおよびDeltaLakeテーブルエンジンとテーブル関数のサポートを追加 -- S3に対する再帰的なディレクトリトラバースを改善 -- 複合時間間隔構文のサポートを追加 -- 挿入時の信頼性を改善 -- 完全な変更リストについては[詳細な22.11変更ログ](/whats-new/cloud#clickhouse-2211-version-upgrade)を参照してください -### Integrations {#integrations-1} - -- Python client: v3.11サポート、挿入パフォーマンスの改善 -- Go client: DateTimeおよびInt64のサポートを修正 -- JS client: 相互SSL認証のサポート -- dbt-clickhouse: DBT v1.3のサポート -### Bug fixes {#bug-fixes-6} - -- アップグレード後に古いClickHouseバージョンが表示されるバグを修正 -- 「default」アカウントの権限を変更してもセッションが中断されないように -- 新たに作成された非管理者アカウントはデフォルトでシステムテーブルアクセスが無効に -### Known issues in this release {#known-issues-in-this-release} - -- バックアップからの復元が依存関係の解決により機能しない場合がある -## November 17, 2022 {#november-17-2022} - -このリリースはローカルClickHouseテーブルおよびHTTPソースからの辞書を有効にし、ムンバイ地域のサポートを導入し、クラウドコンソールのユーザーエクスペリエンスを改善します。 -### General changes {#general-changes-5} - -- ローカルClickHouseテーブルおよびHTTPソースからの[dictionaries](/sql-reference/dictionaries/index.md)のサポートを追加 -- ムンバイ[地域](/cloud/reference/supported-regions.md)のサポートを導入 -### Console changes {#console-changes-30} - -- 請求書のフォーマットを改善 -- 支払い方法の取り込みのためのユーザーインターフェイスを合理化 -- バックアップのためのより詳細なアクティビティロギングを追加 -- ファイルアップロード中のエラーハンドリングを改善 -### Bug fixes {#bug-fixes-7} -- 一部のパーツに単一の大きなファイルがある場合にバックアップが失敗する可能性のあるバグを修正 -- アクセスリストの変更が同時に適用された場合にバックアップからの復元が成功しないバグを修正 -### Known issues {#known-issues} -- バックアップからの復元が依存関係の解決により機能しない場合があります -## November 3, 2022 {#november-3-2022} - -このリリースは、価格から読み取りおよび書き込みユニットを削除し(詳細は[料金ページ](https://clickhouse.com/pricing)を参照)、ClickHouseバージョンを22.10に更新し、セルフサービス顧客向けのより高い垂直スケーリングをサポートし、より良いデフォルトにより信頼性を向上させます。 -### General changes {#general-changes-6} - -- 価格モデルから読み取り/書き込みユニットを削除 -### Configuration changes {#configuration-changes-2} - -- `allow_suspicious_low_cardinality_types`、`allow_suspicious_fixed_string_types`、`allow_suspicious_codecs`の設定(デフォルトはfalse)は安定性の理由から変更できなくなりました。 -### Console changes {#console-changes-31} - -- 支払い顧客向けに垂直スケーリングのセルフサービス最大を720GBメモリに増加 -- バックアップからの復元ワークフローを改善し、IPアクセスリストのルールおよびパスワードを設定 -- サービス作成ダイアログにGCPとAzureの待機リストを紹介 -- ファイルアップロード中のエラーハンドリングを改善 -- 請求管理のワークフローを改善 -### ClickHouse 22.10 version upgrade {#clickhouse-2210-version-upgrade} - -- 多数の大きなパーツが存在する場合の「パーツが多すぎる」しきい値を緩和し、オブジェクトストア上のマージを改善しました(少なくとも10 GiB)。これにより、単一のテーブルの単一パーティション内にペタバイト単位のデータが可能になります。 -- 一定の時間しきい値を超えた後にマージするために、`min_age_to_force_merge_seconds`設定でのマージの制御を改善。 -- 設定をリセットするためにMySQL互換の構文を追加しました `SET setting_name = DEFAULT`。 -- モートンカーブエンコーディング、Java整数ハッシュ、乱数生成用の関数を追加しました。 -- 完全な変更リストについては[詳細な22.10変更ログ](/whats-new/cloud#clickhouse-2210-version-upgrade)を参照してください。 -## October 25, 2022 {#october-25-2022} - -このリリースは小さいワークロードの計算消費を大幅に削減し、計算価格を引き下げ(詳細は[料金](https://clickhouse.com/pricing)ページを参照)、より良いデフォルトによる安定性を改善し、ClickHouse Cloudコンソールの請求および使用状況ビューを向上させます。 -### General changes {#general-changes-7} - -- 最小サービスメモリアロケーションを24Gに削減 -- サービスアイドルタイムアウトを30分から5分に削減 -### Configuration changes {#configuration-changes-3} - -- MergeTreeテーブルの`max_parts_in_total`設定のデフォルト値が100,000から10,000に引き下げられました。この変更の理由は、データパーツが大量にあると、クラウド内のサービスの起動時間が遅くなることが観察されたためです。大量のパーツは通常、誤ってあまりにも細かいパーティションキーを選択したことを示し、これは通常意図せず行われ、避けるべきです。デフォルトの変更により、これらのケースをより早く検出できるようになります。 -### Console changes {#console-changes-32} - -- 試用ユーザーの請求ビューでのクレジット使用詳細を強化 -- ツールチップとヘルプテキストを改善し、使用状況ビューに料金ページへのリンクを追加 -- IPフィルタリングオプションを切り替える際のワークフローを改善 -- クラウドコンソールに再送信メール確認ボタンを追加 -## October 4, 2022 - Beta {#october-4-2022---beta} - -ClickHouse Cloudは2022年10月4日にパブリックベータを開始しました。この[ブログ](https://clickhouse.com/blog/clickhouse-cloud-public-beta)で詳細を学んでください。 - -ClickHouse CloudバージョンはClickHouseコアv22.10に基づいています。互換性のある機能のリストについては、[Cloud Compatibility](/cloud/reference/cloud-compatibility.md)ガイドを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md.hash deleted file mode 100644 index 635dbb1baa3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelog.md.hash +++ /dev/null @@ -1 +0,0 @@ -50de021550d0718d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md deleted file mode 100644 index 020f7f8eb19..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -slug: '/cloud/reference/changelogs' -title: 'Changelogs' -description: 'クラウド変更履歴のランディングページ' ---- - - - -| Page | Description | -|---------------------------------------------------------------|-------------------------------------------------| -| [Cloud Changelog](/whats-new/cloud) | ClickHouse Cloud の変更ログ | -| [Release Notes](/cloud/reference/changelogs/release-notes) | すべての ClickHouse Cloud リリースのリリースノート | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md.hash deleted file mode 100644 index ac31602d21c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/changelogs-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -fb5b3251b669307d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md deleted file mode 100644 index 66f5a96de0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -slug: '/whats-new/cloud-compatibility' -sidebar_label: 'クラウド互換性' -title: 'クラウド互換性' -description: 'このガイドでは、ClickHouseクラウドで機能的および運用上何が期待されるかについて概説します。' ---- - - - - -# ClickHouse Cloud — 互換性ガイド - -このガイドは、ClickHouse Cloudの機能的および運用上の期待についての概要を提供します。ClickHouse CloudはオープンソースのClickHouseディストリビューションに基づいていますが、アーキテクチャや実装にいくつかの違いがある場合があります。バックグラウンドとして、[ClickHouse Cloudの構築方法](https://clickhouse.com/blog/building-clickhouse-cloud-from-scratch-in-a-year)についてのこのブログを読むのは興味深く関連性があるかもしれません。 - -## ClickHouse Cloud アーキテクチャ {#clickhouse-cloud-architecture} -ClickHouse Cloudは、運用コストを大幅に削減し、スケールでClickHouseを実行する際のコストを軽減します。デプロイメントのサイズを事前に決定したり、高可用性のためにレプリケーションを設定したり、手動でデータをシャーディングしたり、ワークロードが増えたときにサーバーをスケールアップしたり、使用していないときにダウンさせたりする必要はありません—これらはすべて私たちが処理します。 - -これらの利点は、ClickHouse Cloudのアーキテクチャに基づく選択の結果です: -- コンピュートとストレージは分離されており、したがって別の次元に沿って自動的にスケールできるため、静的インスタンス構成でストレージまたはコンピュートを過剰にプロビジョニングする必要がありません。 -- オブジェクトストレージの上にある階層型ストレージとマルチレベルキャッシングは、事実上無限のスケーリングを提供し、良好な価格/パフォーマンス比を提供するため、ストレージパーティションのサイズを事前に決定する必要がなく、高額なストレージコストについて心配する必要がありません。 -- 高可用性はデフォルトでオンであり、レプリケーションは透過的に管理されるため、アプリケーションの構築やデータの分析に集中できます。 -- 変動する継続的なワークロードのための自動スケーリングはデフォルトでオンであり、サービスのサイズを事前に決定したり、ワークロードが増えたときにサーバーをスケールアップしたり、活動が少ないときに手動でサーバーをスケールダウンしたりする必要がありません。 -- 断続的なワークロードのためのシームレスなハイバーネーションはデフォルトでオンです。非活動期間の後、コンピュートリソースを自動的に一時停止し、新しいクエリが到着したときに透過的に再開するため、アイドル状態のリソースに対して支払う必要がありません。 -- 高度なスケーリングコントロールは、追加のコスト管理のための自動スケーリング最大値を設定したり、専門的なパフォーマンス要件を持つアプリケーションのためにコンピュートリソースを予約する自動スケーリング最小値を設定する機能を提供します。 - -## 機能 {#capabilities} -ClickHouse Cloudは、オープンソースのClickHouseディストリビューションの中で厳選された機能セットへのアクセスを提供します。以下の表は、現在ClickHouse Cloudで無効になっているいくつかの機能を示しています。 - -### DDL構文 {#ddl-syntax} -ほとんどの場合、ClickHouse CloudのDDL構文はセルフマネージドインストールで利用可能なものと一致します。いくつかの注目すべき例外: - - 現在サポートされていない`CREATE AS SELECT`のサポート。回避策として、`CREATE ... EMPTY ... AS SELECT`を使用し、そのテーブルに挿入することをお勧めします(例については[このブログ](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1)を参照してください)。 - - 一部の実験的構文は無効にされている場合があります。たとえば、`ALTER TABLE ... MODIFY QUERY`ステートメント。 - - セキュリティ上の理由から、一部のイントロスペクション機能が無効にされている場合があります。たとえば、`addressToLine` SQL関数。 - - ClickHouse Cloudでは`ON CLUSTER`パラメータを使用しないでください-これは必要ありません。これらはほとんどが効果のない関数ですが、[マクロ](/operations/server-configuration-parameters/settings#macros)を使用しようとするとエラーが発生する可能性があります。マクロは通常、ClickHouse Cloudでは機能せず、必要ありません。 - -### データベースおよびテーブルエンジン {#database-and-table-engines} - -ClickHouse Cloudはデフォルトで高可用性のあるレプリケートされたサービスを提供します。その結果、すべてのデータベースおよびテーブルエンジンは「Replicated」です。「Replicated」を指定する必要はありません—たとえば、`ReplicatedMergeTree`と`MergeTree`はClickHouse Cloudで使用されるときに同じです。 - -**サポートされているテーブルエンジン** - - - ReplicatedMergeTree(デフォルト、指定がない場合) - - ReplicatedSummingMergeTree - - ReplicatedAggregatingMergeTree - - ReplicatedReplacingMergeTree - - ReplicatedCollapsingMergeTree - - ReplicatedVersionedCollapsingMergeTree - - MergeTree(ReplicatedMergeTreeに変換される) - - SummingMergeTree(ReplicatedSummingMergeTreeに変換される) - - AggregatingMergeTree(ReplicatedAggregatingMergeTreeに変換される) - - ReplacingMergeTree(ReplicatedReplacingMergeTreeに変換される) - - CollapsingMergeTree(ReplicatedCollapsingMergeTreeに変換される) - - VersionedCollapsingMergeTree(ReplicatedVersionedCollapsingMergeTreeに変換される) - - URL - - View - - MaterializedView - - GenerateRandom - - Null - - Buffer - - Memory - - Deltalake - - Hudi - - MySQL - - MongoDB - - NATS - - RabbitMQ - - PostgreSQL - - S3 - -### インターフェース {#interfaces} -ClickHouse CloudはHTTPS、ネイティブインターフェース、および[MySQLワイヤプロトコル](/interfaces/mysql)をサポートしています。Postgresなどの他のインターフェースのサポートはまもなく登場します。 - -### 辞書 {#dictionaries} -辞書は、ClickHouseでのルックアップを高速化するための一般的な方法です。ClickHouse Cloudは現在、PostgreSQL、MySQL、リモートおよびローカルのClickHouseサーバー、Redis、MongoDB、HTTPソースからの辞書をサポートしています。 - -### フェデレーションクエリ {#federated-queries} -私たちは、クラウド内でのクロスクラスター通信や、外部セルフマネージドClickHouseクラスターとの通信のために、フェデレーションClickHouseクエリをサポートしています。ClickHouse Cloudは現在、次の統合エンジンを使用したフェデレーションクエリをサポートしています: - - Deltalake - - Hudi - - MySQL - - MongoDB - - NATS - - RabbitMQ - - PostgreSQL - - S3 - -SQLite、ODBC、JDBC、Redis、HDFS、Hiveなどの一部外部データベースおよびテーブルエンジンとのフェデレーションクエリはまだサポートされていません。 - -### ユーザー定義関数 {#user-defined-functions} - -ユーザー定義関数は、ClickHouseの最近の機能です。ClickHouse Cloudは現在SQL UDFのみをサポートしています。 - -### 実験的機能 {#experimental-features} - -実験的機能は、サービスの展開の安定性を確保するためにClickHouse Cloudサービスでは無効になっています。 - -### Kafka {#kafka} - -[Kafkaテーブルエンジン](/integrations/data-ingestion/kafka/index.md)はClickHouse Cloudで一般的に利用できません。代わりに、Kafka接続コンポーネントをClickHouseサービスから切り離すアーキテクチャを利用して、関心の分離を実現することをお勧めします。Kafkaストリームからデータを抽出するためには[ClickPipes](https://clickhouse.com/cloud/clickpipes)をお勧めします。あるいは、[Kafkaユーザーガイド](/integrations/data-ingestion/kafka/index.md)に記載されているプッシュベースの代替案を検討してください。 - -### 名前付きコレクション {#named-collections} - -[名前付きコレクション](/operations/named-collections)は現在ClickHouse Cloudではサポートされていません。 - -## 運用デフォルトと考慮事項 {#operational-defaults-and-considerations} -以下はClickHouse Cloudサービスのデフォルト設定です。場合によっては、これらの設定はサービスの正しい動作を確保するために固定されており、他の場合には調整可能です。 - -### 運用制限 {#operational-limits} - -#### `max_parts_in_total: 10,000` {#max_parts_in_total-10000} -MergeTreeテーブルの`max_parts_in_total`設定のデフォルト値が100,000から10,000に引き下げられました。この変更の理由は、大量のデータパートがクラウド内のサービスの起動時間を遅くする可能性があることを観察したためです。大量のパーツは通常、意図せずに選択されたあまりにも細かいパーティションキーの選択を示しており、これを避けるべきです。デフォルトの変更により、これらのケースをより早く検出できるようになります。 - -#### `max_concurrent_queries: 1,000` {#max_concurrent_queries-1000} -デフォルトの`100`からこのサーバーごとの設定を`1000`に増加させ、より多くの同時実行を許可します。これにより、提供されるティアサービスの`number of replicas * 1,000`の同時クエリが実現します。単一のレプリカに制限される基本ティアサービスでは`1000`の同時クエリを、`1000+`はスケールおよびエンタープライズに対して、構成されたレプリカの数に応じて許可されます。 - -#### `max_table_size_to_drop: 1,000,000,000,000` {#max_table_size_to_drop-1000000000000} -テーブル/パーティションを最大1TBまで削除できるように、設定を50GBから増加しました。 - -### システム設定 {#system-settings} -ClickHouse Cloudは変動するワークロードに合わせて調整されており、そのためほとんどのシステム設定は現在調整可能ではありません。ほとんどのユーザーがシステム設定を調整する必要がないと予想していますが、システムチューニングに関する質問がある場合は、ClickHouse Cloudサポートにお問い合わせください。 - -### 高度なセキュリティ管理 {#advanced-security-administration} -ClickHouseサービスを作成する一環として、デフォルトのデータベースと、このデータベースに広範な権限を持つデフォルトユーザーを作成します。この初期ユーザーは追加のユーザーを作成し、そのユーザーにこのデータベースへの権限を割り当てることができます。これを超えて、Kerberos、LDAP、またはSSL X.509証明書認証を使用してデータベース内の以下のセキュリティ機能を有効にする機能は、現在サポートされていません。 - -## ロードマップ {#roadmap} - -私たちは、クラウド内での実行可能なUDFのサポートを導入し、多くの他の機能の需要を評価しています。フィードバックがあり、特定の機能をリクエストしたい場合は、[こちらから提出してください](https://console.clickhouse.cloud/support)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md.hash deleted file mode 100644 index c8103269eef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/cloud-compatibility.md.hash +++ /dev/null @@ -1 +0,0 @@ -7de3b31c157c42a9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/compute-compute-separation.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/compute-compute-separation.md.hash deleted file mode 100644 index 7656243c398..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/compute-compute-separation.md.hash +++ /dev/null @@ -1 +0,0 @@ -04deab2bed1f82b2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md deleted file mode 100644 index 668a6b3c620..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -slug: '/cloud/reference' -keywords: -- 'Cloud' -- 'reference' -- 'architecture' -- 'SharedMergeTree' -- 'Compute-Compute Separation' -- 'Bring Your Own Cloud' -- 'Changelogs' -- 'Supported Cloud Regions' -- 'Cloud Compatibility' -title: '概要' -hide_title: true -description: 'Cloudリファレンスセクションのランディングページ' ---- - - - - -# Cloud Reference - -このセクションは、ClickHouse Cloudのより技術的な詳細に関するリファレンスガイドとして機能し、以下のページが含まれています。 - -| ページ | 説明 | -|-----------------------------------|------------------------------------------------------------------------------------------------------| -| [Architecture](/cloud/reference/architecture) | ClickHouse Cloudのアーキテクチャについて、ストレージ、コンピュート、管理、およびセキュリティを含めて説明します。 | -| [SharedMergeTree](/cloud/reference/shared-merge-tree) | ReplicatedMergeTreeおよびその類似物のクラウドネイティブな代替であるSharedMergeTreeの解説。 | -| [Warehouses](/cloud/reference/warehouses) | ClickHouse CloudにおけるWarehousesとCompute-Computeの分離についての解説。 | -| [BYOC (Bring Your Own Cloud)](/cloud/reference/byoc)| ClickHouse Cloudで利用可能なBring Your Own Cloud (BYOC)サービスについての解説。 | -| [Changelogs](/cloud/reference/changelogs) | Cloudの変更履歴とリリースノート。 | -| [Cloud Compatibility](/whats-new/cloud-compatibility) | ClickHouse Cloudにおける機能的および運用的な期待についてのガイド。 | -| [Supported Cloud Regions](/cloud/reference/supported-regions) | AWS、Google、Azureのサポートされているクラウドリージョンのリスト。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md.hash deleted file mode 100644 index 9db623ba8ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -45ff30305843593b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md deleted file mode 100644 index 560c42f0d97..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -slug: '/cloud/reference/changelogs/release-notes' -title: 'Cloud Release Notes' -description: 'Landing page for Cloud release notes' ---- - - - - - -| ページ | 説明 | -|-----|-----| -| [v24.5 Cloudの変更履歴](/changelogs/24.5) | v24.5のファストリリース変更履歴 | -| [v24.10 Cloudの変更履歴](/changelogs/24.10) | v24.10のファストリリース変更履歴 | -| [v24.8 Cloudの変更履歴](/changelogs/24.8) | v24.8のファストリリース変更履歴 | -| [v24.12 Cloudの変更履歴](/changelogs/24.12) | v24.12のファストリリース変更履歴 | -| [v24.6 Cloudの変更履歴](/changelogs/24.6) | v24.6のファストリリース変更履歴 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md.hash deleted file mode 100644 index 746a708914b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/release-notes-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -e78f3769709d1b41 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md deleted file mode 100644 index 16251055528..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -slug: '/cloud/reference/shared-merge-tree' -sidebar_label: 'SharedMergeTree' -title: 'SharedMergeTree' -keywords: -- 'SharedMergeTree' -description: 'SharedMergeTree テーブルエンジンについて説明します' ---- - -import shared_merge_tree from '@site/static/images/cloud/reference/shared-merge-tree-1.png'; -import shared_merge_tree_2 from '@site/static/images/cloud/reference/shared-merge-tree-2.png'; -import Image from '@theme/IdealImage'; - - - -# SharedMergeTree テーブルエンジン - -*\* ClickHouse Cloud(および第一パーティパートナーのクラウドサービス)でのみ利用可能* - -SharedMergeTree テーブルエンジンファミリーは、ReplicatedMergeTree エンジンのクラウドネイティブな代替であり、共有ストレージ(例:Amazon S3、Google Cloud Storage、MinIO、Azure Blob Storage)上で動作するように最適化されています。各特定の MergeTree エンジンタイプには SharedMergeTree のアナログがあります。即ち、ReplacingSharedMergeTree は ReplacingReplicatedMergeTree を置き換えます。 - -SharedMergeTree テーブルエンジンファミリーは ClickHouse Cloud を支えています。エンドユーザーにとって、ReplicatedMergeTree ベースのエンジンの代わりに SharedMergeTree エンジンファミリーを使用するために必要な変更はありません。以下の追加メリットを提供します: - -- より高い挿入スループット -- バックグラウンドマージのスループットの向上 -- ミューテーションのスループットの向上 -- スケールアップおよびスケールダウン操作の高速化 -- セレクトクエリに対するより軽量な強い整合性 - -SharedMergeTree がもたらす重要な改善の一つは、ReplicatedMergeTree に比べてコンピュートとストレージの深い分離を提供することです。以下に、ReplicatedMergeTree がどのようにコンピュートとストレージを分離しているかを示します: - -ReplicatedMergeTree 図 - -ご覧の通り、ReplicatedMergeTree に保存されているデータはオブジェクトストレージにありますが、メタデータは依然として各 clickhouse-server に存在します。これは、すべてのレプリケーション操作に対してメタデータもすべてのレプリカに複製する必要があることを意味します。 - -メタデータを持つ ReplicatedMergeTree 図 - -ReplicatedMergeTree とは異なり、SharedMergeTree はレプリカ同士の通信を必要としません。代わりに、すべての通信は共有ストレージと clickhouse-keeper を通じて行われます。SharedMergeTree は非同期リーダーレスレプリケーションを実装し、コーディネーションとメタデータストレージには clickhouse-keeper を使用します。これにより、サービスのスケールアップおよびスケールダウンに伴い、メタデータを複製する必要がなくなります。これにより、より迅速なレプリケーション、ミューテーション、マージ、スケールアップ操作が行えます。SharedMergeTree は各テーブルに対して数百のレプリカを許容し、シャードなしで動的にスケールすることが可能です。ClickHouse Cloud では分散クエリエグゼキューションアプローチを利用して、クエリのためのより多くのコンピュートリソースを使用します。 - -## インストロスペクション {#introspection} - -ReplicatedMergeTree のインストロスペクションに使用されるほとんどのシステムテーブルは SharedMergeTree にも存在しますが、`system.replication_queue` と `system.replicated_fetches` はデータとメタデータの複製が行われないため存在しません。しかし、SharedMergeTree にはこれら2つのテーブルに対応する代替があります。 - -**system.virtual_parts** - -このテーブルは SharedMergeTree の `system.replication_queue` の代替として機能します。現在のパーツの最も最近のセットと、マージ、ミューテーション、削除されたパーティションなどの進行中の将来のパーツに関する情報を格納します。 - -**system.shared_merge_tree_fetches** - -このテーブルは SharedMergeTree の `system.replicated_fetches` の代替です。プライマリキーとチェックサムのメモリ内にある現在の進行中のフェッチに関する情報を含みます。 - -## SharedMergeTree の有効化 {#enabling-sharedmergetree} - -`SharedMergeTree` はデフォルトで有効です。 - -SharedMergeTree テーブルエンジンをサポートするサービスでは、手動で有効にする必要はありません。以前と同様にテーブルを作成でき、CREATE TABLE クエリで指定されたエンジンに対応する SharedMergeTree ベースのテーブルエンジンが自動的に使用されます。 - -```sql -CREATE TABLE my_table( - key UInt64, - value String -) -ENGINE = MergeTree -ORDER BY key -``` - -これにより、SharedMergeTree テーブルエンジンを使用して `my_table` テーブルが作成されます。 - -ClickHouse Cloud では `ENGINE=MergeTree` を指定する必要はありません。以下のクエリは上記のクエリと同じです。 - -```sql -CREATE TABLE my_table( - key UInt64, - value String -) -ORDER BY key -``` - -Replacing、Collapsing、Aggregating、Summing、VersionedCollapsing、または Graphite MergeTree テーブルを使用すると、自動的に対応する SharedMergeTree ベースのテーブルエンジンに変換されます。 - -```sql -CREATE TABLE myFirstReplacingMT -( - `key` Int64, - `someCol` String, - `eventTime` DateTime -) -ENGINE = ReplacingMergeTree -ORDER BY key; -``` - -特定のテーブルについて、どのテーブルエンジンが `CREATE TABLE` ステートメントで使用されたかを `SHOW CREATE TABLE` で確認できます: -``` sql -SHOW CREATE TABLE myFirstReplacingMT; -``` - -```sql -CREATE TABLE default.myFirstReplacingMT -( `key` Int64, `someCol` String, `eventTime` DateTime ) -ENGINE = SharedReplacingMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') -ORDER BY key -``` - -## 設定 {#settings} - -いくつかの設定の動作は大きく変更されています: - -- `insert_quorum` -- SharedMergeTree へのすべての挿入がクォーラム挿入(共有ストレージに書き込まれます)であるため、SharedMergeTree テーブルエンジンを使用する際にこの設定は必要ありません。 -- `insert_quorum_parallel` -- SharedMergeTree へのすべての挿入がクォーラム挿入(共有ストレージに書き込まれます)であるため、SharedMergeTree テーブルエンジンを使用する際にこの設定は必要ありません。 -- `select_sequential_consistency` -- クォーラム挿入を必要とせず、`SELECT` クエリでクリックハウスキーパーに追加の負荷をかけます。 - -## 一貫性 {#consistency} - -SharedMergeTree は ReplicatedMergeTree よりも優れた軽量な一貫性を提供します。SharedMergeTree に挿入する際、`insert_quorum` や `insert_quorum_parallel` などの設定を提供する必要はありません。挿入はクォーラム挿入であり、メタデータは ClickHouse-Keeper に格納され、メタデータは少なくともクォーラムの ClickHouse-Keeper に複製されます。クラスター内の各レプリカは ClickHouse-Keeper から新しい情報を非同期的にフェッチします。 - -ほとんどの場合、`select_sequential_consistency` や `SYSTEM SYNC REPLICA LIGHTWEIGHT` を使用する必要はありません。非同期レプリケーションはほとんどのシナリオをカバーし、非常に低いレイテンシを持っています。古い読み取りを防ぐ必要がある珍しいケースでは、以下の推奨事項に従ってください。 - -1. 読み取りと書き込みが同じセッションや同じノードで行われている場合、`select_sequential_consistency` は必要ありません。なぜなら、あなたのレプリカはすでに最新のメタデータを持っているからです。 - -2. 1つのレプリカに書き込み、別のレプリカから読み取る場合は、`SYSTEM SYNC REPLICA LIGHTWEIGHT` を使用してレプリカが ClickHouse-Keeper からメタデータをフェッチするように強制できます。 - -3. クエリの一部として設定として `select_sequential_consistency` を使用します。 - -## 関連コンテンツ {#related-content} - -- [ClickHouse Cloud は SharedMergeTree と Lightweight Updates で性能を向上させます](https://clickhouse.com/blog/clickhouse-cloud-boosts-performance-with-sharedmergetree-and-lightweight-updates) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md.hash deleted file mode 100644 index 0e4d71746ca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/shared-merge-tree.md.hash +++ /dev/null @@ -1 +0,0 @@ -a06f9d55284f5f7b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md deleted file mode 100644 index bfb2ccccfd5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: 'Supported Cloud Regions' -sidebar_label: 'Supported Cloud Regions' -keywords: -- 'aws' -- 'gcp' -- 'google cloud' -- 'azure' -- 'cloud' -- 'regions' -description: 'Supported regions for ClickHouse Cloud' -slug: '/cloud/reference/supported-regions' ---- - -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - - -# サポートされているクラウドリージョン - -## AWSリージョン {#aws-regions} - -- ap-northeast-1 (東京) -- ap-south-1 (ムンバイ) -- ap-southeast-1 (シンガポール) -- ap-southeast-2 (シドニー) -- eu-central-1 (フランクフルト) -- eu-west-1 (アイルランド) -- eu-west-2 (ロンドン) -- me-central-1 (UAE) -- us-east-1 (バージニア州北部) -- us-east-2 (オハイオ) -- us-west-2 (オレゴン) - -**検討中:** -- ca-central-1 (カナダ) -- af-south-1 (南アフリカ) -- eu-north-1 (ストックホルム) -- sa-east-1 (南アメリカ) -- ap-northeast-2 (韓国、ソウル) - -## Google Cloudリージョン {#google-cloud-regions} - -- asia-southeast1 (シンガポール) -- europe-west4 (オランダ) -- us-central1 (アイオワ) -- us-east1 (サウスカロライナ) - -**検討中:** - -- us-west1 (オレゴン) -- australia-southeast1 (シドニー) -- asia-northeast1 (東京) -- europe-west3 (フランクフルト) -- europe-west6 (チューリッヒ) -- northamerica-northeast1 (モントリオール) - -## Azureリージョン {#azure-regions} - -- West US 3 (アリゾナ) -- East US 2 (バージニア) -- Germany West Central (フランクフルト) - -**検討中:** - -JapanEast -:::note -現在リストにないリージョンにデプロイが必要ですか? [リクエストを送信](https://clickhouse.com/pricing?modal=open)してください。 -::: - -## プライベートリージョン {#private-regions} - - - -企業向けプランサービスにはプライベートリージョンをご利用いただけます。プライベートリージョンのリクエストについては、[お問い合わせ](https://clickhouse.com/company/contact)ください。 - -プライベートリージョンに関する重要な考慮事項: -- サービスは自動スケールしません。 -- サービスは停止またはアイドル状態にできません。 -- マニュアルスケーリング(垂直および水平の両方)はサポートチケットで有効にできます。 -- サービスがCMEKでの設定を必要とする場合、顧客はサービス開始時にAWS KMSキーを提供する必要があります。 -- 新たなサービスを起動するためには、リクエストをサポートチケットを通じて行う必要があります。 - -HIPAAコンプライアンスに関しては追加の要件がある場合があります(BAAへの署名を含む)。HIPAAは現在、企業向けプランサービスのみで利用可能です。 - -## HIPAA準拠リージョン {#hipaa-compliant-regions} - - - -顧客はビジネスアソシエイト契約(BAA)に署名し、営業またはサポートを通じてオンボーディングをリクエストする必要があります。HIPAA準拠リージョンは以下の通りです: -- AWS eu-central-1 (フランクフルト) -- AWS eu-west-2 (ロンドン) -- AWS us-east-1 (バージニア州北部) -- AWS us-east-2 (オハイオ) -- AWS us-west-2 (オレゴン) -- GCP us-central1 (アイオワ) -- GCP us-east1 (サウスカロライナ) - -## PCI準拠リージョン {#pci-compliant-regions} - - - -顧客はPCI準拠リージョンでサービスを設定するために営業またはサポートを通じてオンボーディングをリクエストする必要があります。PCI準拠をサポートするリージョンは以下の通りです: -- AWS eu-central-1 (フランクフルト) -- AWS eu-west-2 (ロンドン) -- AWS us-east-1 (バージニア州北部) -- AWS us-east-2 (オハイオ) -- AWS us-west-2 (オレゴン) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md.hash deleted file mode 100644 index 4abb0067808..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/supported-regions.md.hash +++ /dev/null @@ -1 +0,0 @@ -f2d355a55a35b571 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md deleted file mode 100644 index 045f3609ffd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: 'Warehouses' -slug: '/cloud/reference/warehouses' -keywords: -- 'compute separation' -- 'cloud' -- 'architecture' -- 'compute-compute' -- 'warehouse' -- 'warehouses' -- 'hydra' -description: 'ClickHouse Cloud における Compute-compute separation' ---- - -import compute_1 from '@site/static/images/cloud/reference/compute-compute-1.png'; -import compute_2 from '@site/static/images/cloud/reference/compute-compute-2.png'; -import compute_3 from '@site/static/images/cloud/reference/compute-compute-3.png'; -import compute_4 from '@site/static/images/cloud/reference/compute-compute-4.png'; -import compute_5 from '@site/static/images/cloud/reference/compute-compute-5.png'; -import compute_7 from '@site/static/images/cloud/reference/compute-compute-7.png'; -import compute_8 from '@site/static/images/cloud/reference/compute-compute-8.png'; -import Image from '@theme/IdealImage'; - - - -# ウェアハウス - -## コンピュート-コンピュート分離とは何ですか? {#what-is-compute-compute-separation} - -コンピュート-コンピュート分離は、ScaleおよびEnterpriseのティアで利用可能です。 - -各ClickHouse Cloudサービスには以下が含まれます: -- 2つ以上のClickHouseノード(またはレプリカ)のグループが必要ですが、子サービスは単一のレプリカでもかまいません。 -- サービスに接続するために使用するサービスURLであるエンドポイント(またはClickHouse Cloud UIコンソールを介して作成された複数のエンドポイント)。 -- サービスがすべてのデータを保存するオブジェクトストレージフォルダー(部分的なメタデータも含む): - -:::note -子サービスは、単一の親サービスとは異なり、垂直スケールが可能です。 -::: - -ClickHouse Cloudの現在のサービス - -
- -_Fig. 1 - ClickHouse Cloudの現在のサービス_ - -コンピュート-コンピュート分離により、ユーザーは複数のコンピュートノードグループを作成でき、それぞれ独自のエンドポイントを持ち、同じオブジェクトストレージフォルダーを使用するため、同じテーブル、ビューなどを使用することができます。 - -各コンピュートノードグループは独自のエンドポイントを持つため、ワークロードに使用するレプリカのセットを選択できます。ワークロードの中には、1つの小さなレプリカだけで満足するものもありますが、他のものは高可用性(HA)と数百ギガのメモリを必要とするかもしれません。コンピュート-コンピュート分離は、読み取り操作と書き込み操作を分離して、それらが互いに干渉しないようにすることも可能にします: - -ClickHouse Cloudにおけるコンピュートの分離 - -
- -_Fig. 2 - ClickHouse Cloudにおけるコンピュートの分離_ - -既存のサービスと同じデータを共有する追加サービスを作成したり、同じデータを共有する複数のサービスを持つ完全に新しいセットアップを作成することが可能です。 - -## ウェアハウスとは何ですか? {#what-is-a-warehouse} - -ClickHouse Cloudにおいて、_ウェアハウス_は同じデータを共有する一連のサービスです。 -各ウェアハウスには、主要サービス(最初に作成されたサービス)と、1つ以上の副次サービスがあります。例えば、以下のスクリーンショットでは、"DWH Prod"というウェアハウスが2つのサービスを持つ様子が見えます: - -- 主要サービス `DWH Prod` -- 副次サービス `DWH Prod Subservice` - -主要と副次サービスを持つウェアハウスの例 - -
- -_Fig. 3 - ウェアハウスの例_ - -ウェアハウス内のすべてのサービスは、同じ以下のものを共有します: - -- リージョン(例:us-east1) -- クラウドサービスプロバイダー(AWS、GCP、またはAzure) -- ClickHouseデータベースバージョン - -サービスを所属ウェアハウスでソートすることができます。 - -## アクセス制御 {#access-controls} - -### データベース資格情報 {#database-credentials} - -ウェアハウス内のすべてのサービスは、同じテーブルのセットを共有するため、それらの他のサービスへのアクセス制御も共有します。つまり、サービス1で作成されたすべてのデータベースユーザーは、同じ権限(テーブル、ビューなどの権限)を持ってサービス2を利用でき、逆も同様です。ユーザーは各サービスごとに別のエンドポイントを使用しますが、同じユーザー名とパスワードを使用します。言い換えれば、_ストレージを共有するサービスの間でユーザーが共有されます:_ - -同じデータを共有するサービス間のユーザーアクセス - -
- -_Fig. 4 - ユーザーAliceはサービス1で作成されましたが、同じ資格情報を使用して同じデータを共有するすべてのサービスにアクセスできます。_ - -### ネットワークアクセス制御 {#network-access-control} - -特定のサービスが他のアプリケーションやアドホックユーザーによって使用されないように制限することが便利な場合があります。これは、現在の通常のサービスに対する設定に似たネットワーク制限を使用することで実現できます(ClickHouse Cloudコンソールの特定のサービスのサービスタブで**設定**に移動)。 - -各サービスにはIPフィルタリング設定を別々に適用できるため、どのアプリケーションがどのサービスにアクセスできるかを制御できます。これにより、特定のサービスの利用を制限することができます: - -ネットワークアクセス制御設定 - -
- -_Fig. 5 - ネットワーク設定のためにAliceはサービス2へのアクセスが制限されています。_ - -### 読み取り vs 読み書き {#read-vs-read-write} - -特定のサービスへの書き込みアクセスを制限し、ウェアハウス内のサービスのサブセットのみが書き込みを許可することが便利な場合もあります。これは、2番目およびその後のサービスを作成する際に行うことができます(最初のサービスは常に読み書き可能である必要があります): - -ウェアハウス内の読み書きサービスと読み取り専用サービス - -
- -_Fig. 6 - ウェアハウス内の読み書きサービスと読み取り専用サービス_ - -:::note -1. 読み取り専用サービスは現在、ユーザー管理操作(作成、削除など)を許可しています。この動作は将来的に変更される可能性があります。 -2. 現在、リフレッシュ可能なマテリアライズドビューは、読み取り専用サービスを含むウェアハウス内のすべてのサービスで実行されます。この動作は将来的に変更され、RWサービスのみで実行されるようになります。 -::: - - -## スケーリング {#scaling} - -ウェアハウス内の各サービスは、以下の点でワークロードに合わせて調整できます: -- ノード(レプリカ)の数。主要サービス(ウェアハウス内で最初に作成されたサービス)は2つ以上のノードを持つ必要があります。各副次サービスは1つ以上のノードを持つことができます。 -- ノード(レプリカ)のサイズ -- サービスが自動的にスケールするかどうか -- サービスが非活動時にアイドル化されるかどうか(これはグループ内の最初のサービスには適用できません - **制限**セクションを参照してください) - -## 挙動の変更 {#changes-in-behavior} -コンピュート-コンピュートがサービスに対して有効化されると(少なくとも1つの副次サービスが作成されている場合)、`clusterAllReplicas()`関数呼び出しと`default`クラスタ名は、呼び出しが行われたサービスのレプリカのみを利用します。つまり、同じデータセットに接続されている2つのサービスがあり、サービス1から`clusterAllReplicas(default, system, processes)`が呼び出された場合、サービス1で実行されているプロセスのみが表示されます。必要に応じて、すべてのレプリカにアクセスするために、例えば`clusterAllReplicas('all_groups.default', system, processes)`を呼び出すことができます。 - -## 制限事項 {#limitations} - -1. **主要サービスは常に稼働している必要があり、アイドル化されてはなりません(制限はGAの後に削除されます)。** プライベートプレビューおよびGAの後しばらくの間、主要サービス(通常は他のサービスを追加して拡張したい既存のサービス)は常に稼働しており、アイドル設定が無効化されます。少なくとも1つの副次サービスがある場合、主要サービスを停止またはアイドル化することはできません。すべての副次サービスが削除されると、元のサービスを再び停止またはアイドル化できます。 - -2. **ワークロードを隔離できない場合があります。** データベースワークロードを相互に隔離するオプションを提供することが目標ですが、同じデータを共有する他のサービスに影響を与えるサービス内のワークロードが存在する場合があります。これは、主にOLTPライクなワークロードに関連するかなり稀な状況です。 - -3. **すべての読み書きサービスはバックグラウンドマージ操作を実行しています。** データがClickHouseに挿入されると、最初にデータは一時的なパーティションに挿入され、その後バックグラウンドでマージが実行されます。これらのマージはメモリおよびCPUリソースを消費する可能性があります。2つの読み書きサービスが同じストレージを共有する場合、両方ともバックグラウンド操作を実行しています。つまり、サービス1で`INSERT`クエリがありましたが、サービス2によってマージ操作が完了している場合があります。読み取り専用サービスはバックグラウンドマージを実行しないため、これにリソースを消費しません。 - -4. **1つの読み書きサービスでの挿入は、アイドル化が有効な場合に別の読み書きサービスがアイドル化されるのを妨げる可能性があります。** 前のポイントにより、第2サービスが最初のサービスのためにバックグラウンドマージ操作を実行します。これらのバックグラウンド操作は、第2サービスがアイドル化するのを妨げる可能性があります。バックグラウンド操作が完了すると、サービスはアイドル化されます。読み取り専用サービスは影響を受けず、遅延なくアイドル化されます。 - -5. **CREATE/RENAME/DROP DATABASEクエリは、アイドル化されたり停止されたサービスによってデフォルトでブロックされることがあります。** これらのクエリはハングする可能性があります。これを回避するには、`settings distributed_ddl_task_timeout=0`でデータベース管理クエリをセッションまたはクエリレベルで実行できます。例えば: - -```sql -create database db_test_ddl_single_query_setting -settings distributed_ddl_task_timeout=0 -``` - -6. **非常にまれに、長期間(数日間)アイドル化または停止されていた副次サービスが、同じウェアハウス内の他のサービスにパフォーマンスの低下を引き起こす可能性があります。** この問題はすぐに解決され、バックグラウンドで実行されているミューテーションに関連しています。この問題が発生していると思われる場合は、ClickHouse [サポート](https://clickhouse.com/support/program) にお問い合わせください。 - -7. **現在、ウェアハウスあたり5つのサービスのソフト制限があります。** 1つのウェアハウスに5つ以上のサービスが必要な場合は、サポートチームにお問い合わせください。 - -## 価格設定 {#pricing} - -コンピュートの価格は、ウェアハウス内のすべてのサービス(主要および副次)で同じです。ストレージは1回のみ請求され、最初(元の)サービスに含まれています。 - -## バックアップ {#backups} - -- 単一のウェアハウス内のすべてのサービスは同じストレージを共有するため、バックアップは主要(初期)サービスのみに作成されます。これにより、ウェアハウス内のすべてのサービスのデータがバックアップされます。 -- ウェアハウスの主要サービスからバックアップを復元すると、既存のウェアハウスに接続されていない完全に新しいサービスに復元されます。その後、復元が完了した後すぐに新しいサービスに他のサービスを追加できます。 - -## ウェアハウスの使用 {#using-warehouses} - -### ウェアハウスの作成 {#creating-a-warehouse} - -ウェアハウスを作成するには、既存のサービスとデータを共有する第2サービスを作成する必要があります。これは、既存のサービスのいずれかにあるプラスのサインをクリックすることで行えます: - -ウェアハウス内に新しいサービスを作成 - -
- -_Fig. 7 - ウェアハウス内に新しいサービスを作成するためにプラスのサインをクリック_ - -サービス作成画面では、元のサービスが新しいサービスのデータソースとしてドロップダウンに選択されます。作成後、これら2つのサービスがウェアハウスを形成します。 - -### ウェアハウスの名前変更 {#renaming-a-warehouse} - -ウェアハウスの名前を変更する方法は2つあります: - -- サービスページの右上で「ウェアハウスでソート」を選択し、ウェアハウス名の近くにある鉛筆アイコンをクリックします。 -- どのサービスでもウェアハウス名をクリックして、そこでウェアハウスの名前を変更します。 - -### ウェアハウスの削除 {#deleting-a-warehouse} - -ウェアハウスを削除することは、すべてのコンピュートサービスとデータ(テーブル、ビュー、ユーザーなど)を削除することを意味します。このアクションは元に戻すことができません。 -ウェアハウスを削除するには、最初に作成されたサービスを削除する必要があります。これを行うためには: - -1. 最初に作成されたサービスに加えて作成されたすべてのサービスを削除します。 -2. 最初のサービスを削除します(警告:このステップでウェアハウスのすべてのデータが削除されます)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md.hash deleted file mode 100644 index ecb25c1c895..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/reference/warehouses.md.hash +++ /dev/null @@ -1 +0,0 @@ -d6ad26bda3aae394 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/_category_.yml deleted file mode 100644 index b7253753fd5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/_category_.yml +++ /dev/null @@ -1,6 +0,0 @@ -label: 'Cloud Security' -collapsible: true -collapsed: true -link: - type: generated-index - title: Cloud Security diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md deleted file mode 100644 index 278c5d5ed44..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -slug: '/cloud/security/secure-s3' -sidebar_label: 'Amazon Simple Storage Service(S3) データの安全なアクセス' -title: 'Amazon Simple Storage Service(S3) データの安全なアクセス' -description: 'この記事では、ClickHouse Cloud の顧客が、Amazon Simple Storage Service(S3) と認証するためにロールベースのアクセスを活用してデータに安全にアクセスする方法を示しています。' ---- - -import Image from '@theme/IdealImage'; -import secure_s3 from '@site/static/images/cloud/security/secures3.jpg'; -import s3_info from '@site/static/images/cloud/security/secures3_arn.png'; -import s3_output from '@site/static/images/cloud/security/secures3_output.jpg'; - -This article demonstrates how ClickHouse Cloud customers can leverage role-based access to authenticate with Amazon Simple Storage Service(S3) and access their data securely. - -## Introduction {#introduction} - -Before diving into the setup for secure S3 access, it is important to understand how this works. Below is an overview of how ClickHouse services can access private S3 buckets by assuming into a role within customers' AWS account. - -Overview of Secure S3 Access with ClickHouse - -This approach allows customers to manage all access to their S3 buckets in a single place (the IAM policy of the assumed-role) without having to go through all of their bucket policies to add or remove access. - -## Setup {#setup} - -### Obtaining the ClickHouse service IAM role Arn {#obtaining-the-clickhouse-service-iam-role-arn} - -1 - Login to your ClickHouse cloud account. - -2 - Select the ClickHouse service you want to create the integration - -3 - Select the **Settings** tab - -4 - Scroll down to the **Network security information** section at the bottom of the page - -5 - Copy the **Service role ID (IAM)** value belong to the service as shown below. - -Obtaining ClickHouse service IAM Role ARN - -### Setting up IAM assume role {#setting-up-iam-assume-role} - -#### Option 1: Deploying with CloudFormation stack {#option-1-deploying-with-cloudformation-stack} - -1 - Login to your AWS Account in the web browser with an IAM user that has permission to create & manage IAM role. - -2 - Visit [このURL](https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/quickcreate?templateURL=https://s3.us-east-2.amazonaws.com/clickhouse-public-resources.clickhouse.cloud/cf-templates/secure-s3.yaml&stackName=ClickHouseSecureS3) to populate the CloudFormation stack. - -3 - Enter (or paste) the **IAM Role** belong to the ClickHouse service - -4 - Configure the CloudFormation stack. Below is additional information about these parameters. - -| Parameter | Default Value | Description | -| :--- | :----: | :---- | -| RoleName | ClickHouseAccess-001 | The name of the new role that ClickHouse Cloud will use to access your S3 bucket | -| Role Session Name | * | Role Session Name can be used as a shared secret to further protect your bucket. | -| ClickHouse Instance Roles | | Comma separated list of ClickHouse service IAM roles that can use this Secure S3 integration. | -| Bucket Access | Read | Sets the level of access for the provided buckets. | -| Bucket Names | | Comma separated list of **bucket names** that this role will have access to. | - -*Note*: Do not put the full bucket Arn but instead just the bucket name only. - -5 - Select the **I acknowledge that AWS CloudFormation might create IAM resources with custom names.** checkbox - -6 - Click **Create stack** button at bottom right - -7 - Make sure the CloudFormation stack completes with no error. - -8 - Select the **Outputs** of the CloudFormation stack - -9 - Copy the **RoleArn** value for this integration. This is what needed to access your S3 bucket. - -CloudFormation stack output showing IAM Role ARN - -#### Option 2: Manually create IAM role. {#option-2-manually-create-iam-role} - -1 - Login to your AWS Account in the web browser with an IAM user that has permission to create & manage IAM role. - -2 - Browse to IAM Service Console - -3 - Create a new IAM role with the following IAM & Trust policy. - -Trust policy (Please replace `{ClickHouse_IAM_ARN}` with the IAM Role arn belong to your ClickHouse instance): - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "{ClickHouse_IAM_ARN}" - }, - "Action": "sts:AssumeRole" - } - ] -} -``` - -IAM policy (Please replace `{BUCKET_NAME}` with your bucket name): - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::{BUCKET_NAME}" - ], - "Effect": "Allow" - }, - { - "Action": [ - "s3:Get*", - "s3:List*" - ], - "Resource": [ - "arn:aws:s3:::{BUCKET_NAME}/*" - ], - "Effect": "Allow" - } - ] -} -``` - -4 - Copy the new **IAM Role Arn** after creation. This is what needed to access your S3 bucket. - -## Access your S3 bucket with the ClickHouseAccess Role {#access-your-s3-bucket-with-the-clickhouseaccess-role} - -ClickHouse Cloud has a new feature that allows you to specify `extra_credentials` as part of the S3 table function. Below is an example of how to run a query using the newly created role copied from above. - -```sql -DESCRIBE TABLE s3('https://s3.amazonaws.com/BUCKETNAME/BUCKETOBJECT.csv','CSVWithNames',extra_credentials(role_arn = 'arn:aws:iam::111111111111:role/ClickHouseAccessRole-001')) -``` - -Below is an example query that uses the `role_session_name` as a shared secret to query data from a bucket. If the `role_session_name` is not correct, this operation will fail. - -```sql -DESCRIBE TABLE s3('https://s3.amazonaws.com/BUCKETNAME/BUCKETOBJECT.csv','CSVWithNames',extra_credentials(role_arn = 'arn:aws:iam::111111111111:role/ClickHouseAccessRole-001', role_session_name = 'secret-role-name')) -``` - -:::note -We recommend that your source S3 is in the same region as your ClickHouse Cloud Service to reduce on data transfer costs. For more information, refer to [S3 pricing]( https://aws.amazon.com/s3/pricing/) -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md.hash deleted file mode 100644 index ce2a099cc8e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/accessing-s3-data-securely.md.hash +++ /dev/null @@ -1 +0,0 @@ -785df0b271a0a024 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md deleted file mode 100644 index 205190e049f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -sidebar_label: '監査ログ' -slug: '/cloud/security/audit-logging' -title: 'Audit Logging' -description: 'このページはClickHouse Cloudでの監査ログについて説明しています。ClickHouse Cloudの組織に対する変更を記録する監査ログへのアクセス方法と解釈方法について説明しています。' ---- - -import Image from '@theme/IdealImage'; -import activity_log_1 from '@site/static/images/cloud/security/activity_log1.png'; -import activity_log_2 from '@site/static/images/cloud/security/activity_log2.png'; -import activity_log_3 from '@site/static/images/cloud/security/activity_log3.png'; - -In ClickHouse Cloud, あなたの組織の詳細に移動します。 - -ClickHouse Cloud activity tab - -
- -左メニューから **Audit** タブを選択すると、あなたの ClickHouse Cloud 組織で行われた変更を確認できます。変更を行ったのは誰で、いつ発生したのかも含まれています。 - -**Activity** ページには、あなたの組織に関するイベントが記録されたテーブルが表示されます。デフォルトでは、このリストは逆年代順(最新のイベントが上部)にソートされています。カラムヘッダーをクリックしてテーブルの順序を変更できます。テーブルの各アイテムには以下のフィールドが含まれます: - -- **Activity:** イベントを説明するテキストスニペット -- **User:** イベントを開始したユーザー -- **IP Address:** 該当する場合、このフィールドにはイベントを開始したユーザーのIPアドレスが表示されます -- **Time:** イベントのタイムスタンプ - -ClickHouse Cloud Activity Table - -
- -提供された検索バーを使用すると、サービス名やIPアドレスなどのいくつかの基準に基づいてイベントを特定できます。この情報は、配布や外部ツールでの分析のためにCSV形式でエクスポートすることも可能です。 - -
- ClickHouse Cloud Activity CSV export -
- -## ログに記録されたイベントのリスト {#list-of-events-logged} - -組織のためにキャプチャされたさまざまなタイプのイベントは、**Service**、**Organization**、**User** の3つのカテゴリにグループ化されています。ログに記録されたイベントのリストには以下が含まれます: - -### Service {#service} - -- サービスが作成されました -- サービスが削除されました -- サービスが停止しました -- サービスが開始されました -- サービス名が変更されました -- サービスのIPアクセスリストが変更されました -- サービスのパスワードがリセットされました - -### Organization {#organization} - -- 組織が作成されました -- 組織が削除されました -- 組織名が変更されました - -### User {#user} - -- ユーザーの役割が変更されました -- ユーザーが組織から削除されました -- ユーザーが組織に招待されました -- ユーザーが組織に参加しました -- ユーザーの招待が削除されました -- ユーザーが組織を離れました - -## 監査イベントのためのAPI {#api-for-audit-events} - -ユーザーは ClickHouse Cloud API `activity` エンドポイントを使用して、監査イベントのエクスポートを取得できます。詳細は [API reference](https://clickhouse.com/docs/cloud/manage/api/swagger) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md.hash deleted file mode 100644 index 686be939f79..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/audit-logging.md.hash +++ /dev/null @@ -1 +0,0 @@ -85a890844d282bbf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md deleted file mode 100644 index b3246c2cc80..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md +++ /dev/null @@ -1,354 +0,0 @@ ---- -title: 'AWS プライベートリンク' -description: 'このドキュメントでは、AWS プライベートリンクを使用して ClickHouse Cloud に接続する方法について説明します。' -slug: '/manage/security/aws-privatelink' ---- - -import Image from '@theme/IdealImage'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge'; -import aws_private_link_pecreate from '@site/static/images/cloud/security/aws-privatelink-pe-create.png'; -import aws_private_link_endpoint_settings from '@site/static/images/cloud/security/aws-privatelink-endpoint-settings.png'; -import aws_private_link_select_vpc from '@site/static/images/cloud/security/aws-privatelink-select-vpc-and-subnets.png'; -import aws_private_link_vpc_endpoint_id from '@site/static/images/cloud/security/aws-privatelink-vpc-endpoint-id.png'; -import aws_private_link_endpoints_menu from '@site/static/images/cloud/security/aws-privatelink-endpoints-menu.png'; -import aws_private_link_modify_dnsname from '@site/static/images/cloud/security/aws-privatelink-modify-dns-name.png'; -import pe_remove_private_endpoint from '@site/static/images/cloud/security/pe-remove-private-endpoint.png'; -import aws_private_link_pe_filters from '@site/static/images/cloud/security/aws-privatelink-pe-filters.png'; -import aws_private_link_ped_nsname from '@site/static/images/cloud/security/aws-privatelink-pe-dns-name.png'; - - -# AWS PrivateLink - - - -AWS PrivateLinkを使用すると、VPC、AWSサービス、オンプレミスシステム、ClickHouse Cloud間で、安全な接続を確立できます。これにより、トラフィックが公衆インターネットにさらされることはありません。本ドキュメントでは、AWS PrivateLinkを使用してClickHouse Cloudに接続する手順を概説します。 - -ClickHouse CloudサービスへのアクセスをAWS PrivateLinkアドレスを介してのみ制限するには、ClickHouse Cloudの[IPアクセスリスト](/cloud/security/setting-ip-filters)に記載された手順に従ってください。 - -:::note -ClickHouse Cloudは、現在[クロスリージョンPrivateLink](https://aws.amazon.com/about-aws/whats-new/2024/11/aws-privatelink-across-region-connectivity/)のベータ版をサポートしています。 -::: - -**AWS PrivateLinkを有効にするには、次の手順を完了してください**: -1. エンドポイント「サービス名」を取得します。 -1. AWSエンドポイントを作成します。 -1. 「エンドポイントID」をClickHouse Cloud組織に追加します。 -1. 「エンドポイントID」をClickHouseサービスの許可リストに追加します。 - -Terraformの例は[こちら](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)でご確認いただけます。 - -## 注意 {#attention} -ClickHouseは、AWSリージョン内で同じ公開された[サービスエンドポイント](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html#endpoint-service-overview)を再利用するためにサービスをグループ化しようとします。ただし、このグループ化は保証されておらず、特に複数のClickHouse組織にサービスを分散させた場合は特にそうです。 -既にClickHouse組織内の他のサービスに対してPrivateLinkが設定されている場合、そのグループ化のためにほとんどの手順をスキップし、最終手順「[ClickHouseの「エンドポイントID」をClickHouseサービスの許可リストに追加](#add-endpoint-id-to-services-allow-list)」に直接進むことが可能です。 - -## 前提条件 {#prerequisites} - -始める前に、必要なものは次のとおりです: - -1. あなたのAWSアカウント。 -1. [ClickHouse APIキー](/cloud/manage/openapi)で、ClickHouse側でプライベートエンドポイントを作成および管理するために必要な権限を持っていること。 - -## 手順 {#steps} - -AWS PrivateLinkを介してClickHouse Cloudサービスに接続するための手順は以下の通りです。 - -### エンドポイント「サービス名」を取得 {#obtain-endpoint-service-info} - -#### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console} - -ClickHouse Cloudコンソールで、PrivateLinkを介して接続したいサービスを開き、次に**設定**メニューに移動します。 - -プライベートエンドポイント - -`サービス名`と`DNS名`をメモし、次のステップに[移動します](#create-aws-endpoint)。 - -#### オプション2: API {#option-2-api} - -まず、以下の環境変数を設定してからコマンドを実行してください: - -```shell -REGION= -PROVIDER=aws -KEY_ID=<あなたのClickHouseキーID> -KEY_SECRET=<あなたのClickHouseキーシークレット> -ORG_ID=<あなたのClickHouse組織ID> -SERVICE_NAME=<あなたのClickHouseサービス名> -``` - -地域、プロバイダー、およびサービス名でフィルタリングしてClickHouseの`INSTANCE_ID`を取得します: - -```shell -INSTANCE_ID=$(curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services" | \ -jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\" and .name==\"${SERVICE_NAME:?}\") | .id " -r) -``` - -プライベートリンク構成のために`endpointServiceId`と`privateDnsHostname`を取得します: - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | \ -jq .result -``` - -このコマンドは以下のような結果を返すはずです: - -```result -{ - "endpointServiceId": "com.amazonaws.vpce.us-west-2.vpce-svc-xxxxxxxxxxxxxxxxx", - "privateDnsHostname": "xxxxxxxxxx.us-west-2.vpce.aws.clickhouse.cloud" -} -``` - -`endpointServiceId`と`privateDnsHostname`をメモし、次のステップに[移動します](#create-aws-endpoint)。 - -### AWSエンドポイントの作成 {#create-aws-endpoint} - -:::important -このセクションでは、AWS PrivateLinkを介してClickHouseを構成するためのClickHouse固有の詳細を説明します。AWS固有の手順は、参照として提供されていますが、時間が経つにつれて予告なしに変更される可能性があります。特定のユースケースに基づいてAWS構成を検討してください。 - -ClickHouseは、必要なAWS VPCエンドポイント、セキュリティグループのルール、またはDNSレコードの設定に対して責任を負わないことに注意してください。 - -以前にPrivateLinkの設定中に「プライベートDNS名」を有効にしており、新しいサービスをPrivateLink経由で構成する際に問題が発生した場合は、ClickHouseサポートにお問い合わせください。他のAWSの設定作業に関する問題については、直接AWSサポートに連絡してください。 -::: - -#### オプション1: AWSコンソール {#option-1-aws-console} - -AWSコンソールを開き、**VPC** → **エンドポイント** → **エンドポイントを作成**に移動します。 - -**NLBおよびGWLBを使用するエンドポイントサービス**を選択し、[エンドポイント「サービス名」](#obtain-endpoint-service-info)ステップで取得した`サービス名`コンソールまたは`endpointServiceId`APIを**サービス名**フィールドに入力します。**サービスの確認**をクリックします: - -AWS PrivateLinkエンドポイント設定 - -PrivateLinkを介してクロスリージョン接続を確立したい場合は、「クロスリージョンエンドポイント」のチェックボックスを有効にし、サービスリージョンを指定します。サービスリージョンはClickHouseインスタンスが稼働している場所です。 - -「サービス名を確認できませんでした。」というエラーメッセージが表示された場合は、新しいリージョンをサポートされているリージョンリストに追加するようカスタマーサポートにお問い合わせください。 - -次に、VPCとサブネットを選択します: - -VPCとサブネットの選択 - -オプションのステップとして、セキュリティグループ/タグを割り当てます: - -:::note -ポート`443`、`8443`、`9440`、`3306`がセキュリティグループ内で許可されていることを確認してください。 -::: - -VPCエンドポイントを作成した後、`エンドポイントID`の値をメモします。次のステップで必要になります。 - -VPCエンドポイントID - -#### オプション2: AWS CloudFormation {#option-2-aws-cloudformation} - -次に、[エンドポイント「サービス名」](#obtain-endpoint-service-info)ステップで取得した`サービス名`コンソールまたは`endpointServiceId`APIを使用してVPCエンドポイントを作成する必要があります。正しいサブネットID、セキュリティグループ、およびVPC IDを使用してください。 - -```response -Resources: - ClickHouseInterfaceEndpoint: - Type: 'AWS::EC2::VPCEndpoint' - Properties: - VpcEndpointType: Interface - PrivateDnsEnabled: false - ServiceName: <サービス名(endpointServiceId)、上記を参照> - VpcId: vpc-vpc_id - SubnetIds: - - subnet-subnet_id1 - - subnet-subnet_id2 - - subnet-subnet_id3 - SecurityGroupIds: - - sg-security_group_id1 - - sg-security_group_id2 - - sg-security_group_id3 -``` - -VPCエンドポイントを作成した後、`エンドポイントID`の値をメモします。次のステップで必要になります。 - -#### オプション3: Terraform {#option-3-terraform} - -以下の`service_name`は、[エンドポイント「サービス名」](#obtain-endpoint-service-info)ステップで取得した`サービス名`コンソールまたは`endpointServiceId`APIです。 - -```json -resource "aws_vpc_endpoint" "this" { - vpc_id = var.vpc_id - service_name = "<上記のコメントを参照>" - vpc_endpoint_type = "Interface" - security_group_ids = [ - Var.security_group_id1,var.security_group_id2, var.security_group_id3, - ] - subnet_ids = [var.subnet_id1,var.subnet_id2,var.subnet_id3] - private_dns_enabled = false - service_region = "(オプション) 指定すると、VPCエンドポイントが指定されたリージョンのサービスに接続します。マルチリージョンPrivateLink接続の場合は定義します。" -} -``` - -VPCエンドポイントを作成した後、`エンドポイントID`の値をメモします。次のステップで必要になります。 - -#### エンドポイントのプライベートDNS名を設定 {#set-private-dns-name-for-endpoint} - -:::note -DNSを設定する方法は多岐にわたります。特定のユースケースに応じてDNSを設定してください。 -::: - -[エンドポイント「サービス名」](#obtain-endpoint-service-info)ステップから取得した「DNS名」をAWSエンドポイントネットワークインターフェースにポイントする必要があります。これにより、VPC/ネットワーク内のサービス/コンポーネントが正しくそれを解決できるようになります。 - -### 「エンドポイントID」をClickHouseサービスの許可リストに追加 {#add-endpoint-id-to-services-allow-list} - -#### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console-2} - -追加するには、ClickHouse Cloudコンソールに移動し、PrivateLink経由で接続したいサービスを開いて、次に**設定**に移動します。**プライベートエンドポイントを設定**をクリックして、プライベートエンドポイント設定を開きます。[Create AWS Endpoint](#create-aws-endpoint)ステップで取得した`エンドポイントID`を入力します。「エンドポイントを作成」をクリックします。 - -:::note -既存のPrivateLink接続からのアクセスを許可したい場合は、既存のエンドポイントドロップダウンメニューを使用してください。 -::: - -プライベートエンドポイントフィルター - -削除するには、ClickHouse Cloudコンソールに移動し、サービスを見つけ、そのサービスの**設定**に移動して、削除したいエンドポイントを見つけます。エンドポイントのリストから削除します。 - -#### オプション2: API {#option-2-api-2} - -プライベートリンクを使用して利用可能にする必要がある各インスタンスにエンドポイントIDを許可リストに追加する必要があります。 - -[Create AWS Endpoint](#create-aws-endpoint)ステップからのデータを使用して、`ENDPOINT_ID`環境変数を設定します。 - -コマンドを実行する前に、以下の環境変数を設定してください: - -```bash -REGION= -PROVIDER=aws -KEY_ID=<あなたのClickHouseキーID> -KEY_SECRET=<あなたのClickHouseキーシークレット> -ORG_ID=<あなたのClickHouse組織ID> -SERVICE_NAME=<あなたのClickHouseサービス名> -``` - -エンドポイントIDを許可リストに追加するには: - -```bash -cat <APIまたは`DNS名`コンソールであるプライベートエンドポイントを使用する必要があります。 - -#### プライベートDNSホスト名を取得する {#getting-private-dns-hostname} - -##### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console-3} - -ClickHouse Cloudコンソールで、**設定**に移動します。**プライベートエンドポイントを設定**ボタンをクリックします。開いたフライアウトで、**DNS名**をコピーします。 - -プライベートエンドポイントDNS名 - -##### オプション2: API {#option-2-api-3} - -コマンドを実行する前に、以下の環境変数を設定してください: - -```bash -KEY_ID=<あなたのClickHouseキーID> -KEY_SECRET=<あなたのClickHouseキーシークレット> -ORG_ID=<あなたのClickHouse組織ID> -INSTANCE_ID=<あなたのClickHouseサービス名> -``` - -[ステップ](#option-2-api)から`INSTANCE_ID`を取得できます。 - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | \ -jq .result -``` - -これは以下のような出力を生成します: - -```result -{ - "endpointServiceId": "com.amazonaws.vpce.us-west-2.vpce-svc-xxxxxxxxxxxxxxxxx", - "privateDnsHostname": "xxxxxxxxxx.us-west-2.vpce.aws.clickhouse.cloud" -} -``` - -この例では、`privateDnsHostname`のホスト名を介した接続はPrivateLinkにルーティングされますが、`endpointServiceId`のホスト名を介した接続はインターネットを経由してルーティングされます。 - -## トラブルシューティング {#troubleshooting} - -### 1つのリージョン内の複数のPrivateLinks {#multiple-privatelinks-in-one-region} - -ほとんどの場合、各VPCのために単一のエンドポイントサービスを作成する必要があります。このエンドポイントは、VPCから複数のClickHouse Cloudサービスへのリクエストをルーティングできます。 -[こちら](#attention)を参照してください。 - -### プライベートエンドポイントへの接続がタイムアウトしました {#connection-to-private-endpoint-timed-out} - -- VPCエンドポイントにセキュリティグループを添付してください。 -- エンドポイントに添付されたセキュリティグループの`inbound`ルールを確認し、ClickHouseポートを許可してください。 -- 接続テストに使用されるVMに添付されたセキュリティグループの`outbound`ルールを確認し、ClickHouseポートへの接続を許可してください。 - -### プライベートホスト名: ホストのアドレスが見つかりません {#private-hostname-not-found-address-of-host} - -- DNS構成を確認してください。 - -### ピアによる接続リセット {#connection-reset-by-peer} - -- おそらくエンドポイントIDがサービス許可リストに追加されていないため、[ステップ](#add-endpoint-id-to-services-allow-list)をご覧ください。 - -### エンドポイントフィルターの確認 {#checking-endpoint-filters} - -コマンドを実行する前に、以下の環境変数を設定してください: - -```bash -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -INSTANCE_ID=<インスタンスID> -``` - -[ステップ](#option-2-api)から`INSTANCE_ID`を取得できます。 - -```shell -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ --X GET -H "Content-Type: application/json" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | \ -jq .result.privateEndpointIds -``` - -### リモートデータベースへの接続 {#connecting-to-a-remote-database} - -たとえば、ClickHouse Cloudで[MySQL](../../sql-reference/table-functions/mysql.md)または[PostgreSQL](../../sql-reference/table-functions/postgresql.md)テーブル関数を使用して、Amazon Web Services (AWS) VPCにホストされているデータベースに接続しようとしている場合、AWS PrivateLinkを使用してこの接続を安全に有効にすることはできません。PrivateLinkは一方向の単方向接続です。あなたの内部ネットワークまたはAmazon VPCがClickHouse Cloudに安全に接続できるようにしますが、ClickHouse Cloudが内部ネットワークに接続することはできません。 - -[AWS PrivateLinkのドキュメント](https://docs.aws.amazon.com/whitepapers/latest/building-scalable-secure-multi-vpc-network-infrastructure/aws-privatelink.html)によると: - -> AWS PrivateLinkを使用するのは、クライアント/サーバーのセットアップがあり、特定のサービスまたはサービスプロバイダーVPC内のインスタンスのセットに対して1つ以上の消費者VPCによる単方向のアクセスを許可したい場合です。消費者VPC内のクライアントのみが、サービスプロバイダーVPC内のサービスへの接続を開始できます。 - -これを実現するために、AWSセキュリティグループを構成して、ClickHouse Cloudから内部/プライベートデータベースサービスへの接続を許可する必要があります。[ClickHouse CloudリージョンのデフォルトのイーグレスIPアドレス](/manage/security/cloud-endpoints-api)や、[利用可能な静的IPアドレス](https://api.clickhouse.cloud/static-ips.json)を確認してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md.hash deleted file mode 100644 index 70fd5cb9d95..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/aws-privatelink.md.hash +++ /dev/null @@ -1 +0,0 @@ -c0f7615c2e970831 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md deleted file mode 100644 index 2998f43e14b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md +++ /dev/null @@ -1,548 +0,0 @@ ---- -title: 'Azure プライベートリンク' -sidebar_label: 'Azure プライベートリンク' -slug: '/cloud/security/azure-privatelink' -description: 'Azure プライベートリンクの設定方法' -keywords: -- 'azure' -- 'private link' -- 'privatelink' ---- - -import Image from '@theme/IdealImage'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge'; -import azure_pe from '@site/static/images/cloud/security/azure-pe.png'; -import azure_privatelink_pe_create from '@site/static/images/cloud/security/azure-privatelink-pe-create.png'; -import azure_private_link_center from '@site/static/images/cloud/security/azure-private-link-center.png'; -import azure_pe_create_basic from '@site/static/images/cloud/security/azure-pe-create-basic.png'; -import azure_pe_resource from '@site/static/images/cloud/security/azure-pe-resource.png'; -import azure_pe_create_vnet from '@site/static/images/cloud/security/azure-pe-create-vnet.png'; -import azure_pe_create_dns from '@site/static/images/cloud/security/azure-pe-create-dns.png'; -import azure_pe_create_tags from '@site/static/images/cloud/security/azure-pe-create-tags.png'; -import azure_pe_create_review from '@site/static/images/cloud/security/azure-pe-create-review.png'; -import azure_pe_ip from '@site/static/images/cloud/security/azure-pe-ip.png'; -import azure_pe_view from '@site/static/images/cloud/security/azure-pe-view.png'; -import azure_pe_resource_guid from '@site/static/images/cloud/security/azure-pe-resource-guid.png'; -import azure_pl_dns_wildcard from '@site/static/images/cloud/security/azure-pl-dns-wildcard.png'; -import azure_pe_remove_private_endpoint from '@site/static/images/cloud/security/azure-pe-remove-private-endpoint.png'; -import azure_privatelink_pe_filter from '@site/static/images/cloud/security/azure-privatelink-pe-filter.png'; -import azure_privatelink_pe_dns from '@site/static/images/cloud/security/azure-privatelink-pe-dns.png'; - - -# Azure Private Link - - - -このガイドでは、Azure Private Linkを使用して、Azure(顧客所有およびMicrosoftパートナーサービスを含む)とClickHouse Cloudの間で仮想ネットワークを介したプライベート接続を提供する方法を示します。Azure Private Linkは、ネットワークアーキテクチャを簡素化し、公開インターネットへのデータ露出を排除することで、Azure内のエンドポイント間の接続を安全にします。 - -Overview of PrivateLink - -AWSやGCPとは異なり、AzureはPrivate Linkを介してのリージョン間接続をサポートしています。これにより、異なるリージョンに配置されているVNet間でClickHouseサービスとの接続を確立できます。 - -:::note -リージョン間のトラフィックには追加料金がかかる場合があります。最新のAzureドキュメントをご確認ください。 -::: - -**Azure Private Linkを有効にするために、次の手順を完了してください:** - -1. Private LinkのAzure接続エイリアスを取得します -1. Azureでプライベートエンドポイントを作成します -1. プライベートエンドポイントGUIDをClickHouse Cloud組織に追加します -1. プライベートエンドポイントGUIDをサービスの許可リストに追加します -1. プライベートリンクを使用してClickHouse Cloudサービスにアクセスします - - -## 注意 {#attention} -ClickHouseは、Azureリージョン内で同じ公開された[Private Linkサービス](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview)を再利用するために、サービスをグループ化しようとします。ただし、このグループ化は保証されておらず、特にサービスを複数のClickHouse組織に分散させている場合は特にそうです。 -すでにClickHouse組織内で他のサービスのためにPrivate Linkが設定されている場合、そのグループ化のために大部分の手順をスキップし、最終手順である[プライベートエンドポイントGUIDをサービスの許可リストに追加](#add-private-endpoint-guid-to-services-allow-list)に直接進むことができます。 - -ClickHouseの[Terraformプロバイダリポジトリ](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)でTerraformの例を見つけてください。 - -## Azure接続エイリアスを取得する {#obtain-azure-connection-alias-for-private-link} - -### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console} - -ClickHouse Cloudコンソールで、PrivateLinkを介して接続したいサービスを開き、**設定**メニューを開きます。**プライベートエンドポイントを設定**ボタンをクリックします。Private Linkの設定に使用する`サービス名`および`DNS名`をメモしておきます。 - -Private Endpoints - -`サービス名`および`DNS名`をメモしておいてください。次のステップで必要になります。 - -### オプション2: API {#option-2-api} - -始める前に、ClickHouse Cloud APIキーが必要です。[新しいキーを作成](/cloud/manage/openapi)するか、既存のキーを使用できます。 - -APIキーが手に入ったら、コマンドを実行する前に次の環境変数を設定します: - -```bash -REGION=<地域コード、Azure形式を使用、例: westus3> -PROVIDER=azure -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -SERVICE_NAME=<あなたのClickHouseサービス名> -``` - -地域、プロバイダ、サービス名でフィルタリングしてClickHouseの`INSTANCE_ID`を取得します: - -```shell -INSTANCE_ID=$(curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services" | \ -jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\" and .name==\"${SERVICE_NAME:?}\") | .id " -r) -``` - -Private Link用のAzure接続エイリアスとプライベートDNSホスト名を取得します: - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | jq .result -{ - "endpointServiceId": "production-westus3-0-0.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.westus3.azure.privatelinkservice", - "privateDnsHostname": "xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud" -} -``` - -`endpointServiceId`をメモしておきます。次のステップで使用します。 - -## Azureでプライベートエンドポイントを作成する {#create-private-endpoint-in-azure} - -:::important -このセクションでは、Azure Private Linkを介してClickHouseを構成するためのClickHouse特有の詳細をカバーしています。Azure特有の手順は参照用に提供されており、どこを見れば良いかのガイドとなりますが、Azureクラウドプロバイダからの通知なしに時間と共に変更される可能性があります。特定のユースケースに基づいてAzure構成を検討してください。 - -ClickHouseは、必要なAzureプライベートエンドポイントやDNSレコードの構成について責任を負いません。 - -Azure構成タスクに関する問題は、Azureサポートに直接連絡してください。 -::: - -このセクションでは、Azureでプライベートエンドポイントを作成します。AzureポータルまたはTerraformを使用できます。 - -### オプション1: Azureポータルを使用してAzureでプライベートエンドポイントを作成する {#option-1-using-azure-portal-to-create-a-private-endpoint-in-azure} - -Azureポータルで、**プライベートリンクセンター → プライベートエンドポイント**を開きます。 - -Open Azure Private Center - -**作成**ボタンをクリックして、プライベートエンドポイント作成ダイアログを開きます。 - -Open Azure Private Center - ---- - -次の画面で、以下のオプションを指定します: - -- **サブスクリプション** / **リソースグループ**: プライベートエンドポイント用のAzureサブスクリプションおよびリソースグループを選択してください。 -- **名前**: **プライベートエンドポイント**用の名前を設定します。 -- **リージョン**: Private Linkを介してClickHouse Cloudに接続されるデプロイ済みVNetのあるリージョンを選択します。 - -上記の手順が完了したら、**次へ: リソース**ボタンをクリックします。 - -Create Private Endpoint Basic - ---- - -**AzureリソースのIDまたはエイリアスで接続**オプションを選択します。 - -**リソースIDまたはエイリアス**には、[Azure接続エイリアスを取得する](#obtain-azure-connection-alias-for-private-link)ステップで取得した`endpointServiceId`を使用します。 - -**次へ: 仮想ネットワーク**ボタンをクリックします。 - -Private Endpoint Resource Selection - ---- - -- **仮想ネットワーク**: Private Linkを使用してClickHouse Cloudに接続したいVNetを選択します。 -- **サブネット**: プライベートエンドポイントが作成されるサブネットを選択します。 - -オプション: - -- **アプリケーションセキュリティグループ**: プライベートエンドポイントにASGをアタッチし、ネットワークセキュリティグループでそれを使用してプライベートエンドポイントへの入出力ネットワークトラフィックをフィルタリングできます。 - -**次へ: DNS**ボタンをクリックします。 - -Private Endpoint Virtual Network Selection - -**次へ: タグ**ボタンをクリックします。 - ---- - -Private Endpoint DNS Configuration - -オプションで、プライベートエンドポイントにタグをアタッチできます。 - -**次へ: レビュー + 作成**ボタンをクリックします。 - ---- - -Private Endpoint Tags - -最後に、**作成**ボタンをクリックします。 - -Private Endpoint Review - -作成したプライベートエンドポイントの**接続ステータス**は**保留中**の状態になります。このプライベートエンドポイントをサービスの許可リストに追加すると、**承認済み**の状態に変更されます。 - -プライベートエンドポイントに関連するネットワークインターフェースを開き、**プライベートIPv4アドレス**(この例では10.0.0.4)をコピーします。次のステップでこの情報が必要になります。 - -Private Endpoint IP Address - -### オプション2: Terraformを使用してAzureでプライベートエンドポイントを作成する {#option-2-using-terraform-to-create-a-private-endpoint-in-azure} - -Terraformを使用してプライベートエンドポイントを作成するために、以下のテンプレートを使用します: - -```json -resource "azurerm_private_endpoint" "example_clickhouse_cloud" { - name = var.pe_name - location = var.pe_location - resource_group_name = var.pe_resource_group_name - subnet_id = var.pe_subnet_id - - private_service_connection { - name = "test-pl" - private_connection_resource_alias = "" - is_manual_connection = true - } -} -``` - -### プライベートエンドポイントの`resourceGuid`を取得する {#obtaining-private-endpoint-resourceguid} - -Private Linkを使用するには、プライベートエンドポイント接続GUIDをサービスの許可リストに追加する必要があります。 - -プライベートエンドポイントリソースGUIDはAzureポータルにのみ表示されます。前のステップで作成したプライベートエンドポイントを開き、**JSONビュー**をクリックします: - -Private Endpoint View - -プロパティの下にある`resourceGuid`フィールドを見つけ、この値をコピーします: - -Private Endpoint Resource GUID - -## プライベートリンク用のDNSを設定する {#setting-up-dns-for-private-link} - -プライベートリンクを介してリソースにアクセスするために、プライベートDNSゾーン(`${location_code}.privatelink.azure.clickhouse.cloud`)を作成し、それをVNetにアタッチする必要があります。 - -### プライベートDNSゾーンを作成する {#create-private-dns-zone} - -**オプション1: Azureポータルを使用** - -[Azureポータルを使用してAzureプライベートDNSゾーンを作成するためのガイド](https://learn.microsoft.com/en-us/azure/dns/private-dns-getstarted-portal)に従ってください。 - -**オプション2: Terraformを使用** - -プライベートDNSゾーンを作成するために、次のTerraformテンプレートを使用します: - -```json -resource "azurerm_private_dns_zone" "clickhouse_cloud_private_link_zone" { - name = "${var.location}.privatelink.azure.clickhouse.cloud" - resource_group_name = var.resource_group_name -} -``` - -### ワイルドカードDNSレコードを作成する {#create-a-wildcard-dns-record} - -ワイルドカードレコードを作成し、プライベートエンドポイントを指すようにします: - -**オプション1: Azureポータルを使用** - -1. `MyAzureResourceGroup`リソースグループを開き、`${region_code}.privatelink.azure.clickhouse.cloud`プライベートゾーンを選択します。 -2. + レコードセットを選択します。 -3. 名前には`*`と入力します。 -4. IPアドレスにはプライベートエンドポイントのIPアドレスを入力します。 -5. **OK**を選択します。 - -Private Link DNS Wildcard Setup - -**オプション2: Terraformを使用** - -ワイルドカードDNSレコードを作成するために、次のTerraformテンプレートを使用します: - -```json -resource "azurerm_private_dns_a_record" "example" { - name = "*" - zone_name = var.zone_name - resource_group_name = var.resource_group_name - ttl = 300 - records = ["10.0.0.4"] -} -``` - -### 仮想ネットワークリンクを作成する {#create-a-virtual-network-link} - -プライベートDNSゾーンと仮想ネットワークをリンクするには、仮想ネットワークリンクを作成する必要があります。 - -**オプション1: Azureポータルを使用** - -[プライベートDNSゾーンに仮想ネットワークをリンクする](https://learn.microsoft.com/en-us/azure/dns/private-dns-getstarted-portal#link-the-virtual-network)ためのガイドに従ってください。 - -**オプション2: Terraformを使用** - -:::note -DNSを設定する方法はいくつかあります。特定のユースケースに基づいてDNSを設定してください。 -::: - -[Azure接続エイリアスを取得する](#obtain-azure-connection-alias-for-private-link)ステップから取得した"DNS名"をプライベートエンドポイントのIPアドレスにポイントする必要があります。これにより、VPC/ネットワーク内のサービスやコンポーネントが適切に解決できるようになります。 - -### DNS設定を確認する {#verify-dns-setup} - -`xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud`ドメインはプライベートエンドポイントのIPにポイントされる必要があります。(この例では10.0.0.4) - -```bash -nslookup xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud. -サーバー: 127.0.0.53 -アドレス: 127.0.0.53#53 - -非権威的応答: -名前: xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud -アドレス: 10.0.0.4 -``` - -## プライベートエンドポイントGUIDをClickHouse Cloud組織に追加する {#add-the-private-endpoint-guid-to-your-clickhouse-cloud-organization} - -### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console-1} - -組織にエンドポイントを追加するには、[プライベートエンドポイントGUIDをサービスの許可リストに追加する](#add-private-endpoint-guid-to-services-allow-list)ステップに進みます。ClickHouse Cloudコンソールを使用して`プライベートエンドポイントGUID`をサービスの許可リストに追加すると、自動的に組織にも追加されます。 - -エンドポイントを削除するには、**組織の詳細 -> プライベートエンドポイント**を開き、削除ボタンをクリックしてエンドポイントを削除します。 - -Remove Private Endpoint - -### オプション2: API {#option-2-api-1} - -コマンドを実行する前に次の環境変数を設定します: - -```bash -PROVIDER=azure -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -ENDPOINT_ID=<プライベートエンドポイントresourceGuid> -REGION=<地域コード、Azure形式を使用> -``` - -[プライベートエンドポイント`resourceGuid`を取得する](#obtaining-private-endpoint-resourceguid)ステップからのデータを使用して`ENDPOINT_ID`環境変数を設定します。 - -プライベートエンドポイントを追加するために次のコマンドを実行します: - -```bash -cat < - -### オプション2: API {#option-2-api-2} - -コマンドを実行する前に次の環境変数を設定します: - -```bash -PROVIDER=azure -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -ENDPOINT_ID=<プライベートエンドポイントresourceGuid> -INSTANCE_ID=<インスタンスID> -``` - -プライベートリンクを使用して利用可能な各サービスについて実行します。 - -プライベートエンドポイントをサービスの許可リストに追加するために次のコマンドを実行します: - -```bash -cat <APIまたは`DNS名`コンソールを使用する必要があります。 - -### プライベートDNSホスト名を取得する {#obtaining-the-private-dns-hostname} - -#### オプション1: ClickHouse Cloudコンソール {#option-1-clickhouse-cloud-console-3} - -ClickHouse Cloudコンソールで、**設定**に移動します。**プライベートエンドポイントを設定**ボタンをクリックします。開いたフライアウトで、**DNS名**をコピーします。 - -Private Endpoint DNS Name - -#### オプション2: API {#option-2-api-3} - -コマンドを実行する前に次の環境変数を設定します: - -```bash -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -INSTANCE_ID=<インスタンスID> -``` - -次のコマンドを実行します: - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | jq .result -``` - -以下のような応答を受け取るはずです: - -```response -{ - ... - "privateDnsHostname": "xxxxxxx.<地域コード>.privatelink.azure.clickhouse.cloud" -} -``` - -この例では、`xxxxxxx.region_code.privatelink.azure.clickhouse.cloud`ホスト名への接続はプライベートリンクにルーティングされます。一方、`xxxxxxx.region_code.azure.clickhouse.cloud`はインターネットを介ってルーティングされます。 - -プライベートリンクを使用してClickHouse Cloudサービスに接続するには、`privateDnsHostname`を使用してください。 - -## トラブルシューティング {#troubleshooting} - -### DNS設定をテストする {#test-dns-setup} - -次のコマンドを実行します: - -```bash -nslookup -``` -ここで「dns名」は[Azure接続エイリアスを取得する](#obtain-azure-connection-alias-for-private-link)からの`privateDnsHostname`APIまたは`DNS名`コンソールです。 - -次のような応答を受け取るはずです: - -```response -非権威的応答: -名前: -アドレス: 10.0.0.4 -``` - -### 接続がリセットされた {#connection-reset-by-peer} - -おそらく、プライベートエンドポイントGUIDがサービスの許可リストに追加されていません。[_プライベートエンドポイントGUIDをサービスの許可リストに追加する_ステップ](#add-private-endpoint-guid-to-services-allow-list)を再確認してください。 - -### プライベートエンドポイントが保留中の状態 {#private-endpoint-is-in-pending-state} - -おそらく、プライベートエンドポイントGUIDがサービスの許可リストに追加されていません。[_プライベートエンドポイントGUIDをサービスの許可リストに追加する_ステップ](#add-private-endpoint-guid-to-services-allow-list)を再確認してください。 - -### 接続をテストする {#test-connectivity} - -プライベートリンクを介して接続する際に問題がある場合は、`openssl`を使用して接続を確認してください。プライベートリンクエンドポイントのステータスが`受理済み`であることを確認します。 - -OpenSSLは接続できるはずです(出力にCONNECTEDと表示されます)。`errno=104`は予想されることです。 - -```bash -openssl s_client -connect abcd.westus3.privatelink.azure.clickhouse.cloud.cloud:9440 -``` - -```response - -# highlight-next-line -CONNECTED(00000003) -write:errno=104 ---- -no peer certificate available ---- -No client certificate CA names sent ---- -SSL handshake has read 0 bytes and written 335 bytes -Verification: OK ---- -New, (NONE), Cipher is (NONE) -Secure Renegotiation IS NOT supported -Compression: NONE -Expansion: NONE -No ALPN negotiated -Early data was not sent -Verify return code: 0 (ok) -``` - -### プライベートエンドポイントフィルタを確認する {#checking-private-endpoint-filters} - -コマンドを実行する前に次の環境変数を設定します: - -```bash -KEY_ID=<キーID> -KEY_SECRET=<キーシークレット> -ORG_ID= -INSTANCE_ID=<インスタンスID> -``` - -プライベートエンドポイントフィルタを確認するために次のコマンドを実行します: - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | jq .result.privateEndpointIds -``` - -## 更なる情報 {#more-information} - -Azure Private Linkに関する詳細情報については、[azure.microsoft.com/en-us/products/private-link](https://azure.microsoft.com/en-us/products/private-link)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md.hash deleted file mode 100644 index 5ffe9af3ec8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/azure-privatelink.md.hash +++ /dev/null @@ -1 +0,0 @@ -48bc465c726d7a9e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md deleted file mode 100644 index af9e1e02d29..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -sidebar_label: 'Overview' -slug: '/cloud/security/cloud-access-management/overview' -title: 'Cloud access management' -description: 'Describes how access control in ClickHouse cloud works, including - information on role types' ---- - -import Image from '@theme/IdealImage'; -import user_grant_permissions_options from '@site/static/images/cloud/security/cloud-access-management/user_grant_permissions_options.png'; - - - -# ClickHouse Cloudにおけるアクセス制御 {#access-control-in-clickhouse-cloud} -ClickHouseは、コンソールとデータベースの2か所でユーザーアクセスを制御します。コンソールアクセスは、clickhouse.cloudユーザーインターフェイスを介して管理されます。データベースアクセスは、データベースユーザーアカウントとロールを介して管理されます。さらに、コンソールユーザーには、SQLコンソールを介してデータベースと対話するための権限を持つロールをデータベース内に付与することができます。 - -## コンソールユーザーとロール {#console-users-and-roles} -コンソール > ユーザーとロールページで、組織およびサービスロールの割り当てを設定します。各サービスの設定ページでSQLコンソールロールの割り当てを設定します。 - -ユーザーには組織レベルのロールが割り当てられる必要があり、一つまたは複数のサービスのためにサービスロールが任意で割り当てられることがあります。サービス設定ページで、ユーザーがSQLコンソールにアクセスするためのサービスロールが任意で設定されることがあります。 -- Organization Adminロールが割り当てられているユーザーには、デフォルトでService Adminが付与されます。 -- SAML統合を介して組織に追加されたユーザーには、メンバーのロールが自動的に割り当てられます。 -- Service AdminはデフォルトでSQLコンソール管理者ロールが付与されます。SQLコンソールの権限は、サービス設定ページで削除されることがあります。 - -| コンテキスト | ロール | 説明 | -|:---------------|:------------------------|:-------------------------------------------------| -| 組織 | Admin | 組織のすべての管理活動を行い、すべての設定を制御します。デフォルトで組織内の最初のユーザーに割り当てられます。 | -| 組織 | Developer | サービスを除くすべての表示アクセス、読み取り専用APIキーを生成する能力。 | -| 組織 | Billing | 使用状況および請求書を表示し、支払い方法を管理します。 | -| 組織 | Member | サインインのみで、個人のプロフィール設定を管理する能力があります。デフォルトでSAML SSOユーザーに割り当てられます。 | -| サービス | Service Admin | サービス設定を管理します。 | -| サービス | Service Read Only | サービスおよび設定を表示します。 | -| SQLコンソール | SQLコンソール管理者 | サービス内のデータベースに対する管理アクセス(Defaultデータベースロールと同等)。 | -| SQLコンソール | SQLコンソール読み取り専用 | サービス内のデータベースに対する読み取り専用のアクセス。 | -| SQLコンソール | カスタム | SQL [`GRANT`](/sql-reference/statements/grant)文を使用して設定します。SQLコンソールユーザーには、ユーザー名の後にロールを付けて割り当てます。 | - -SQLコンソールユーザーのためにカスタムロールを作成し、一般的なロールを付与するには、以下のコマンドを実行します。メールアドレスは、コンソール内のユーザーのメールアドレスと一致する必要があります。 - -1. database_developerロールを作成し、`SHOW`、`CREATE`、`ALTER`、および`DELETE`権限を付与します。 - - ```sql - CREATE ROLE OR REPLACE database_developer; - GRANT SHOW ON * TO database_developer; - GRANT CREATE ON * TO database_developer; - GRANT ALTER ON * TO database_developer; - GRANT DELETE ON * TO database_developer; - ``` - -2. SQLコンソールユーザーmy.user@domain.comのためのロールを作成し、database_developerロールを割り当てます。 - - ```sql - CREATE ROLE OR REPLACE `sql-console-role:my.user@domain.com`; - GRANT database_developer TO `sql-console-role:my.user@domain.com`; - ``` - -### SQLコンソールのパスワードレス認証 {#sql-console-passwordless-authentication} -SQLコンソールユーザーは各セッションのために作成され、自動的に回転されるX.509証明書を使用して認証されます。ユーザーはセッション終了時に削除されます。監査のためのアクセスリストを生成する際は、コンソール内のサービスの設定タブに移動し、データベース内に存在するデータベースユーザーに加えてSQLコンソールアクセスを記録してください。カスタムロールが設定されている場合、ユーザーのアクセスは、ユーザー名で終わるロールにリストされます。 - -## データベース権限 {#database-permissions} -以下をサービスとデータベース内でSQL [GRANT](/sql-reference/statements/grant)文を使用して設定します。 - -| ロール | 説明 | -|:------------------------|:-----------------------------------------------------------------------------------------------------------| -| Default | サービスへのフル管理アクセス | -| Custom | SQL [`GRANT`](/sql-reference/statements/grant)文を使用して設定します | - -- データベースロールは加算的です。これは、ユーザーが2つのロールのメンバーである場合、ユーザーは2つのロールで付与された最も多くのアクセスを持つことを意味します。ロールを追加してもアクセスを失いません。 -- データベースロールは、他のロールに付与することができ、階層構造を結果として持ちます。ロールは、メンバーであるロールのすべての権限を継承します。 -- データベースロールはサービスごとに固有であり、同じサービス内の複数のデータベースに適用される場合があります。 - -以下の図は、ユーザーが権限を付与される異なる方法を示しています。 - -ユーザーが権限を付与される異なる方法を示す図 - -### 初期設定 {#initial-settings} -データベースには、`default`という名前のアカウントが自動的に追加され、サービス作成時にdefault_roleが付与されます。サービスを作成するユーザーには、サービスが作成されたときに`default`アカウントに割り当てられる自動生成されたランダムパスワードが提示されます。初期設定後はパスワードは表示されず、後でコンソール内でService Admin権限を持つユーザーが変更できます。このアカウントまたはコンソール内でService Admin権限を持つアカウントは、いつでも追加のデータベースユーザーとロールを設定できます。 - -:::note -コンソール内の`default`アカウントに割り当てられたパスワードを変更するには、左側のサービスメニューに移動し、サービスにアクセスし、設定タブに移動してパスワードリセットボタンをクリックします。 -::: - -新しいユーザーアカウントを作成し、そのユーザーにdefault_roleを付与することをお勧めします。これは、ユーザーによって実行された活動がそのユーザーIDに識別され、`default`アカウントは非常時対応の活動用に予約されるためです。 - - ```sql - CREATE USER userID IDENTIFIED WITH sha256_hash by 'hashed_password'; - GRANT default_role to userID; - ``` - -ユーザーは、SHA256ハッシュジェネレーターやPythonの`hashlib`のようなコード関数を使用して、適切な複雑さを持つ12文字以上のパスワードをSHA256文字列に変換し、それをシステム管理者にパスワードとして提供することができます。これにより、管理者はクリアテキストのパスワードを見たり扱ったりしないことが保証されます。 - -### SQLコンソールユーザーのデータベースアクセスリスト {#database-access-listings-with-sql-console-users} -以下のプロセスを使用して、組織内のSQLコンソールとデータベース全体の完全なアクセスリストを生成できます。 - -1. データベース内のすべてのグラントのリストを取得するには、以下のクエリを実行します。 - - ```sql - SELECT grants.user_name, - grants.role_name, - users.name AS role_member, - grants.access_type, - grants.database, - grants.table - FROM system.grants LEFT OUTER JOIN system.role_grants ON grants.role_name = role_grants.granted_role_name - LEFT OUTER JOIN system.users ON role_grants.user_name = users.name - - UNION ALL - - SELECT grants.user_name, - grants.role_name, - role_grants.role_name AS role_member, - grants.access_type, - grants.database, - grants.table - FROM system.role_grants LEFT OUTER JOIN system.grants ON role_grants.granted_role_name = grants.role_name - WHERE role_grants.user_name is null; - ``` - -2. このリストをSQLコンソールへのアクセスを持つコンソールユーザーに結びつけます。 - - a. コンソールに移動します。 - - b. 該当するサービスを選択します。 - - c. 左側の設定を選択します。 - - d. SQLコンソールアクセスセクションまでスクロールします。 - - e. データベースへのアクセスを持つユーザーの番号のリンク`There are # users with access to this service.`をクリックして、ユーザーリストを表示します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md.hash deleted file mode 100644 index c9af84de708..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-access-management.md.hash +++ /dev/null @@ -1 +0,0 @@ -af348ab62ae71a97 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md deleted file mode 100644 index 0e5975460fc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -sidebar_label: 'クラウド認証' -slug: '/cloud/security/cloud-authentication' -title: 'クラウド認証' -description: 'このガイドでは、認証の構成に関するいくつかの良い手法を説明しています。' ---- - -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - - -# Cloud Authentication - -ClickHouse Cloudは、複数の認証方法を提供しています。このガイドでは、認証を設定するための良いプラクティスについて説明します。認証方法を選択する際は、常にセキュリティチームに確認してください。 - -## Password Settings {#password-settings} - -現在、当社のコンソールおよびサービス(データベース)の最小パスワード設定は、[NIST 800-63B](https://pages.nist.gov/800-63-3/sp800-63b.html#sec4) 認証者保証レベル1に準拠しています: -- 最小12文字 -- 次の4つの項目のうち3つを含む: - - 1つの大文字 - - 1つの小文字 - - 1つの数字 - - 1つの特殊文字 - -## Email + Password {#email--password} - -ClickHouse Cloudでは、メールアドレスとパスワードで認証することができます。この方法を使用する際は、ClickHouseアカウントを保護するために強力なパスワードを使用するのが最善です。記憶できるパスワードを考案するための多くのオンラインリソースがあります。あるいは、ランダムパスワードジェネレータを使用し、パスワードマネージャーにパスワードを保存してセキュリティを強化することもできます。 - -## SSO Using Google or Microsoft Social Authentication {#sso-using-google-or-microsoft-social-authentication} - -貴社がGoogle WorkspaceまたはMicrosoft 365を使用している場合、ClickHouse Cloud内の現在のシングルサインオン設定を活用できます。これを行うには、会社のメールアドレスを使用してサインアップし、他のユーザーを会社のメールアドレスを利用して招待するだけです。その結果、ユーザーはClickHouse Cloudに認証する前に、貴社のアイデンティティプロバイダーまたは直接GoogleやMicrosoftの認証を通じて、自社のログインフローを使用してログインする必要があります。 - -## Multi-Factor Authentication {#multi-factor-authentication} - -メール + パスワードまたはソーシャル認証を持つユーザーは、マルチファクター認証(MFA)を使用してアカウントをさらに保護できます。MFAを設定するには: -1. console.clickhouse.cloudにログインします -2. 左上隅のClickHouseロゴの横にあるイニシャルをクリックします -3. プロフィールを選択します -4. 左側のセキュリティを選択します -5. 認証アプリのタイルで「セットアップ」をクリックします -6. Authy、1Password、Google Authenticatorなどの認証アプリを使用してQRコードをスキャンします -7. コードを入力して確認します -8. 次の画面で、回復コードをコピーして安全な場所に保管します -9. `I have safely recorded this code`の横にあるチェックボックスをチェックします -10. 続行をクリックします - -## Account recovery {#account-recovery} - -
- Obtain recovery code - - 以前にMFAに登録していて、回復コードを作成しなかったか失くした場合は、以下の手順で新しい回復コードを取得してください: - 1. https://console.clickhouse.cloudにアクセスします - 2. 認証情報とMFAでサインインします - 3. 左上隅のプロフィールにアクセスします - 4. 左側のセキュリティをクリックします - 5. 認証アプリの横にあるゴミ箱をクリックします - 6. 認証アプリを削除をクリックします - 7. コードを入力して続行をクリックします - 8. 認証アプリセクションで「セットアップ」をクリックします - 9. QRコードをスキャンし、新しいコードを入力します - 10. 回復コードをコピーして安全な場所に保管します - 11. `I have safely recorded this code`の横にあるチェックボックスをチェックします - 12. 続行をクリックします - -
-
- Forgot password - - パスワードを忘れた場合は、以下の手順でセルフサービス回復を行ってください: - 1. https://console.clickhouse.cloudにアクセスします - 2. メールアドレスを入力して続行をクリックします - 3. パスワードを忘れましたか?をクリックします - 4. パスワードリセットリンクを送信をクリックします - 5. メールを確認し、メールからパスワードをリセットをクリックします - 6. 新しいパスワードを入力し、確認してパスワードを更新をクリックします - 7. サインインに戻るをクリックします - 8. 新しいパスワードで通常通りサインインします - -
-
- Lost MFA device or token - - MFAデバイスを失くしたり、トークンを削除した場合は、以下の手順で回復して新しいトークンを作成してください: - 1. https://console.clickhouse.cloudにアクセスします - 2. 認証情報を入力して続行をクリックします - 3. マルチファクター認証画面でキャンセルをクリックします - 4. 回復コードをクリックします - 5. コードを入力して続行を押します - 6. 新しい回復コードをコピーして安全な場所に保管します - 7. `I have safely recorded this code`の横のボックスにチェックを入れ、続行をクリックします - 8. サインイン後、左上のプロフィールに移動します - 9. 左上のセキュリティをクリックします - 10. 古い認証アプリを削除するために、認証アプリの横にあるゴミ箱アイコンをクリックします - 11. 認証アプリを削除をクリックします - 12. マルチファクター認証のプロンプトが表示されたら、キャンセルをクリックします - 13. 回復コードをクリックします - 14. 回復コードを入力し(これはステップ7で生成された新しいコードです)、続行をクリックします - 15. 新しい回復コードをコピーして安全な場所に保管します - これは削除プロセスの間に画面を離れた場合のフェイルセーフです - 16. `I have safely recorded this code`の横のボックスにチェックを入れ、続行をクリックします - 17. 上記のプロセスに従って新しいMFAファクターをセットアップします - -
-
- Lost MFA and recovery code - - MFAデバイスと回復コードを失った場合、またはMFAデバイスを失い回復コードを取得していない場合は、以下の手順でリセットを要求してください: - - **チケットを提出する**: 管理ユーザーが他にいる組織に所属している場合、たとえ単一ユーザー組織にアクセスを試みていても、Adminロールに割り当てられた組織のメンバーに、組織にログインしてあなたの代わりにMFAをリセットするためのサポートチケットを提出するよう頼んでください。リクエストが認証されていることを確認でき次第、MFAをリセットし、Adminに通知します。通常通りMFAなしでサインインし、必要に応じて新しいファクターを登録するためにプロフィール設定に移動してください。 - - **メールを介してリセット**: 組織内で唯一のユーザーである場合、アカウントに関連付けられたメールアドレスを使用して、サポートケースをメールで提出してください(support@clickhouse.com)。リクエストが正しいメールから来ていることを確認でき次第、MFAとパスワードをリセットします。パスワードリセットリンクにアクセスするためにメールにアクセスしてください。新しいパスワードを設定した後、必要に応じて新しいファクターを登録するためにプロフィール設定に移動してください。 - -
- -## SAML SSO {#saml-sso} - - - -ClickHouse Cloudは、セキュリティアサーションマークアップ言語(SAML)シングルサインオン(SSO)もサポートしています。詳細については、[SAML SSO Setup](/cloud/security/saml-setup)を参照してください。 - -## Database User ID + Password {#database-user-id--password} - -パスワードを保護するために、[ユーザーアカウントの作成](/sql-reference/statements/create/user.md)時にSHA256_hashメソッドを使用してください。 - -**TIP:** 管理者権限のないユーザーは自分自身のパスワードを設定できないため、アカウントをセットアップするために管理者に提供する前に、ユーザーに[こちらのような](https://tools.keycdn.com/sha256-online-generator)ジェネレータを使用してパスワードをハッシュするよう依頼してください。パスワードは上記の[要件](#password-settings)に従う必要があります。 - -```sql -CREATE USER userName IDENTIFIED WITH sha256_hash BY 'hash'; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md.hash deleted file mode 100644 index ee1185d2e2c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/cloud-authentication.md.hash +++ /dev/null @@ -1 +0,0 @@ -cb8fb09406f41a08 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md deleted file mode 100644 index c36d481851c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -slug: '/cloud/security/cloud-access-management' -title: 'Cloud Access Management' -description: 'Cloud Access Management Table Of Contents' ---- - - - -| Page | Description | -|----------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------| -| [概要](/cloud/security/cloud-access-management/overview) | ClickHouse Cloudにおけるアクセス制御の概要 | -| [クラウド認証](/cloud/security/cloud-authentication) | 認証の設定に関する良いプラクティスを探求するガイド | -| [SAML SSOのセットアップ](/cloud/security/saml-setup) | SAML SSOのセットアップ方法に関するガイド。 | -| [一般的なアクセス管理クエリ](/cloud/security/common-access-management-queries) | SQLユーザーとロールの定義、そしてこれらの権限をデータベース、テーブル、行、カラムに適用する基本を示す記事。 | -| [新しいユーザーの招待](/cloud/security/inviting-new-users) | 組織に新しいユーザーを招待し、彼らにロールを割り当てる方法の手引き。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md.hash deleted file mode 100644 index 05d6705ec16..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-access-management/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb9f8cca5bf83136 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md deleted file mode 100644 index 318aa97f747..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -slug: '/manage/security/cloud-endpoints-api' -sidebar_label: 'Cloud IP アドレス' -title: 'Cloud IP アドレス' -description: 'このページは、ClickHouse内のCloud Endpoints APIセキュリティ機能に関するドキュメントです。認証および認可メカニズムを介してアクセスを管理することで、ClickHouseデプロイメントをセキュアにする方法について詳細に説明しています。' ---- - -import Image from '@theme/IdealImage'; -import aws_rds_mysql from '@site/static/images/_snippets/aws-rds-mysql.png'; -import gcp_authorized_network from '@site/static/images/_snippets/gcp-authorized-network.png'; - -## Static IPs API {#static-ips-api} - -静的IPのリストを取得する必要がある場合は、次のClickHouse Cloud APIエンドポイントを使用できます: [`https://api.clickhouse.cloud/static-ips.json`](https://api.clickhouse.cloud/static-ips.json)。このAPIは、地域やクラウドごとのingress/egress IPやS3エンドポイントなど、ClickHouse Cloudサービスのエンドポイントを提供します。 - -MySQLやPostgreSQLエンジンのような統合を使用している場合、ClickHouse Cloudがあなたのインスタンスにアクセスするための承認が必要な場合があります。このAPIを使用して公開IPを取得し、GCPの`firewalls`や`Authorized networks`、またはAzureやAWSの`Security Groups`、あるいは使用している他のインフラストラクチャのエグレス管理システムに構成できます。 - -例えば、AWSの地域`ap-south-1`でホストされているClickHouse Cloudサービスにアクセスを許可するには、その地域の`egress_ips`アドレスを追加できます: - -```bash -❯ curl -s https://api.clickhouse.cloud/static-ips.json | jq '.' -{ - "aws": [ - { - "egress_ips": [ - "3.110.39.68", - "15.206.7.77", - "3.6.83.17" - ], - "ingress_ips": [ - "15.206.78.111", - "3.6.185.108", - "43.204.6.248" - ], - "region": "ap-south-1", - "s3_endpoints": "vpce-0a975c9130d07276d" - }, -... -``` - -例えば、`us-east-2`で実行されているAWS RDSインスタンスがClickHouse Cloudサービスに接続する必要がある場合、以下のInboundセキュリティグループルールを持っている必要があります: - -AWS Security group rules - -同じClickHouse Cloudサービスが`us-east-2`で実行されているが、今回はGCPのMySQLに接続する場合、`Authorized networks`は次のようになります: - -GCP Authorized networks diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md.hash deleted file mode 100644 index a079c2a88cc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cloud-endpoints-api.md.hash +++ /dev/null @@ -1 +0,0 @@ -1b65274993bbabb5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md deleted file mode 100644 index 8e93e390bad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -sidebar_label: 'Enhanced Encryption' -slug: '/cloud/security/cmek' -title: 'Customer Managed Encryption Keys (CMEK)' -description: 'Learn more about customer managed encryption keys' ---- - -import Image from '@theme/IdealImage'; -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' -import cmek_performance from '@site/static/images/_snippets/cmek-performance.png'; - - -# ClickHouse 強化暗号化 - - - -静止データは、クラウドプロバイダー管理の AES 256 キーを使用してデフォルトで暗号化されています。顧客は、サービスデータに対して追加の保護層を提供するために透過的データ暗号化 (TDE) を有効にするか、独自のキーを提供して顧客管理暗号化キー (CMEK) を実装することができます。 - -強化された暗号化は現在、AWS および GCP サービスで利用可能です。Azure は近日中に対応予定です。 - -## 透過的データ暗号化 (TDE) {#transparent-data-encryption-tde} - -TDEは、サービス作成時に有効にしなければなりません。既存のサービスは作成後に暗号化することはできません。 - -1. `新しいサービスを作成` を選択します -2. サービスに名前を付けます -3. クラウドプロバイダーとして AWS または GCP を選択し、ドロップダウンから希望のリージョンを選択します -4. エンタープライズ機能のドロップダウンをクリックし、透過的データ暗号化 (TDE) を有効にします -5. サービスを作成をクリックします - -## 顧客管理暗号化キー (CMEK) {#customer-managed-encryption-keys-cmek} - -:::warning -ClickHouse Cloud サービスを暗号化するために使用される KMS キーを削除すると、ClickHouse サービスが停止し、そのデータは取得できなくなり、既存のバックアップも失われます。キーをローテーションする際に誤ってデータを失わないように、削除する前に古い KMS キーを一定期間維持することをお勧めします。 -::: - -サービスが TDE で暗号化されると、顧客はキーを更新して CMEK を有効にできます。TDE 設定を更新した後、サービスは自動的に再起動されます。このプロセス中、古い KMS キーがデータ暗号化キー (DEK) を復号し、新しい KMS キーが DEK を再暗号化します。これにより、再起動後のサービスは今後の暗号化操作に新しい KMS キーを使用します。このプロセスには数分かかることがあります。 - -
- AWS KMS による CMEK の有効化 - -1. ClickHouse Cloud で暗号化されたサービスを選択します -2. 左側の設定をクリックします -3. 画面の下部で、ネットワークセキュリティ情報を展開します -4. 暗号化ロール ID (AWS) または暗号化サービスアカウント (GCP) をコピーします - 今後のステップで必要になります -5. [AWS 用の KMS キーを作成](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html) -6. キーをクリックします -7. 次のように AWS キー ポリシーを更新します: - - ```json - { - "Sid": "ClickHouse アクセスを許可", - "Effect": "Allow", - "Principal": { - "AWS": "{ 暗号化ロール ID }" - }, - "Action": [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:DescribeKey" - ], - "Resource": "*" - } - ``` - -10. キーポリシーを保存します -11. キー ARN をコピーします -12. ClickHouse Cloud に戻り、サービス設定の透過的データ暗号化セクションにキー ARN を貼り付けます -13. 変更を保存します - -
- -
- GCP KMS による CMEK の有効化 - -1. ClickHouse Cloud で暗号化されたサービスを選択します -2. 左側の設定をクリックします -3. 画面の下部で、ネットワークセキュリティ情報を展開します -4. 暗号化サービスアカウント (GCP) をコピーします - 今後のステップで必要になります -5. [GCP 用の KMS キーを作成](https://cloud.google.com/kms/docs/create-key) -6. キーをクリックします -7. 上記ステップ 4 でコピーした GCP 暗号化サービスアカウントに次の権限を付与します。 - - Cloud KMS CryptoKey Encrypter/Decrypter - - Cloud KMS Viewer -10. キー権限を保存します -11. キーリソースパスをコピーします -12. ClickHouse Cloud に戻り、サービス設定の透過的データ暗号化セクションにキーリソースパスを貼り付けます -13. 変更を保存します - -
- -## キーローテーション {#key-rotation} - -CMEK を設定した後は、上記の手順に従って新しい KMS キーを作成し、権限を付与してキーをローテーションします。サービス設定に戻り、新しい ARN (AWS) またはキーリソースパス (GCP) を貼り付けて設定を保存します。サービスは新しいキーを適用するために再起動します。 - -## バックアップと復元 {#backup-and-restore} - -バックアップは、関連するサービスと同じキーを使用して暗号化されます。暗号化されたバックアップを復元すると、元のインスタンスと同じ KMS キーを使用する暗号化されたインスタンスが作成されます。必要に応じて、復元後に KMS キーをローテーションすることができます。詳細については、[キーローテーション](#key-rotation)を参照してください。 - -## KMS キーポーラー {#kms-key-poller} - -CMEK を使用している場合、提供された KMS キーの有効性は 10 分ごとにチェックされます。KMS キーへのアクセスが無効になると、ClickHouse サービスは停止します。サービスを再開するには、このガイドの手順に従って KMS キーへのアクセスを復元し、その後サービスを再起動します。 - -## パフォーマンス {#performance} - -このページに記載されているように、ClickHouse の組み込みの [データ暗号化機能のための仮想ファイルシステム](/operations/storing-data#encrypted-virtual-file-system) を使用してデータを暗号化および保護します。 - -この機能で使用されるアルゴリズムは `AES_256_CTR` であり、ワークロードに応じて 5 ~ 15% のパフォーマンスペナルティが期待されています: - -CMEK パフォーマンスペナルティ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md.hash deleted file mode 100644 index a6d7ee1cfb4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/cmek.md.hash +++ /dev/null @@ -1 +0,0 @@ -c4d8c9c41fecc037 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md deleted file mode 100644 index 066ef3da7da..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -sidebar_label: '共通アクセス管理クエリ' -title: '共通アクセス管理クエリ' -slug: '/cloud/security/common-access-management-queries' -description: 'この記事では、SQLユーザーとロールの基本的な定義方法、そしてそれらの権限とアクセス許可をデータベース、テーブル、行、およびカラムに適用する方法を示します。' ---- - -import CommonUserRolesContent from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md'; - - -# 一般アクセス管理クエリ - -:::tip セルフマネージド -セルフマネージドの ClickHouse を使用している場合は、 [SQL ユーザーとロール](/guides/sre/user-management/index.md) をご覧ください。 -::: - -この記事では、SQL ユーザーとロールを定義し、それらの権限をデータベース、テーブル、行、カラムに適用する基本について説明します。 - -## 管理者ユーザー {#admin-user} - -ClickHouse Cloud サービスには、サービスが作成されると同時に作成される管理者ユーザー `default` があります。 パスワードはサービス作成時に提供され、**Admin** ロールを持つ ClickHouse Cloud ユーザーによってリセット可能です。 - -ClickHouse Cloud サービスに追加の SQL ユーザーを追加する際には、SQL ユーザー名とパスワードが必要です。 彼らに管理レベルの権限を付与したい場合は、`default_role` を新しいユーザーに割り当ててください。 例えば、ユーザー `clickhouse_admin` を追加する場合: - -```sql -CREATE USER IF NOT EXISTS clickhouse_admin -IDENTIFIED WITH sha256_password BY 'P!@ssword42!'; -``` - -```sql -GRANT default_role TO clickhouse_admin; -``` - -:::note -SQL コンソールを使用する際、SQL ステートメントは `default` ユーザーとして実行されません。 代わりに、ステートメントは `sql-console:${cloud_login_email}` という名前のユーザーとして実行され、`cloud_login_email` は現在クエリを実行しているユーザーのメールアドレスです。 - -これらの自動生成された SQL コンソールユーザーは `default` ロールを持っています。 -::: - -## パスワードなし認証 {#passwordless-authentication} - -SQL コンソールには 2 つのロールが用意されています: `sql_console_admin` は `default_role` と同じ権限を持ち、 `sql_console_read_only` は読み取り専用の権限を持ちます。 - -管理者ユーザーはデフォルトで `sql_console_admin` ロールが割り当てられるため、何も変更されません。 ただし、`sql_console_read_only` ロールにより、非管理者ユーザーに読み取り専用またはフルアクセスを任意のインスタンスに許可できます。 管理者がこのアクセスを構成する必要があります。 ロールは `GRANT` または `REVOKE` コマンドを使用して、インスタンス特有の要件により適合させることができ、これらのロールに加えた変更は永続化されます。 - -### 運用レベルのアクセス制御 {#granular-access-control} - -このアクセス制御機能は、ユーザーごとに詳細な制御を手動で設定することもできます。 新しい `sql_console_*` ロールをユーザーに割り当てる前に、`sql-console-role:` という名前空間に一致する SQL コンソールユーザー固有のデータベースロールを作成する必要があります。 例えば: - -```sql -CREATE ROLE OR REPLACE sql-console-role:; -GRANT TO sql-console-role:; -``` - -一致するロールが検出されると、それがボイラープレートロールの代わりにユーザーに割り当てられます。 これにより、`sql_console_sa_role` や `sql_console_pm_role` などのロールを作成し、特定のユーザーに付与するなど、より複雑なアクセス制御構成を導入できます。 例えば: - -```sql -CREATE ROLE OR REPLACE sql_console_sa_role; -GRANT TO sql_console_sa_role; -CREATE ROLE OR REPLACE sql_console_pm_role; -GRANT TO sql_console_pm_role; -CREATE ROLE OR REPLACE `sql-console-role:christoph@clickhouse.com`; -CREATE ROLE OR REPLACE `sql-console-role:jake@clickhouse.com`; -CREATE ROLE OR REPLACE `sql-console-role:zach@clickhouse.com`; -GRANT sql_console_sa_role to `sql-console-role:christoph@clickhouse.com`; -GRANT sql_console_sa_role to `sql-console-role:jake@clickhouse.com`; -GRANT sql_console_pm_role to `sql-console-role:zach@clickhouse.com`; -``` - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md.hash deleted file mode 100644 index a7a80db6770..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/common-access-management-queries.md.hash +++ /dev/null @@ -1 +0,0 @@ -a49d2805bf32958f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md deleted file mode 100644 index 043562d1522..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -sidebar_label: 'セキュリティとコンプライアンス' -slug: '/cloud/security/security-and-compliance' -title: 'セキュリティとコンプライアンス' -description: 'このページでは、ClickHouse Cloud によって実装されたセキュリティとコンプライアンス対策について説明します。' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge'; - - -# セキュリティとコンプライアンスレポート -ClickHouse Cloudは、お客様のセキュリティおよびコンプライアンスニーズを評価し、追加のレポートのリクエストに応じてプログラムを継続的に拡張しています。詳細情報やレポートのダウンロードについては、当社の[Trust Center](https://trust.clickhouse.com)をご覧ください。 - -### SOC 2 タイプ II (2022年以降) {#soc-2-type-ii-since-2022} - -System and Organization Controls (SOC) 2は、セキュリティ、可用性、機密性、処理の整合性、およびプライバシー基準に焦点を当てたレポートであり、Trust Services Criteria (TSC)が組織のシステムに適用され、これらのコントロールに関して依存者(私たちの顧客)に対して保証を提供するために設計されています。ClickHouseは独立した外部監査人と連携して、少なくとも年に1回は監査を実施し、私たちのシステムのセキュリティ、可用性、および処理の整合性、ならびに私たちのシステムによって処理されるデータの機密性とプライバシーに関して検討します。このレポートは、私たちのClickHouse CloudとBring Your Own Cloud (BYOC)の提供に関するものです。 - -### ISO 27001 (2023年以降) {#iso-27001-since-2023} - -International Standards Organization (ISO) 27001は、情報セキュリティに関する国際標準です。企業がリスク管理、ポリシー作成およびコミュニケーション、セキュリティコントロールの実施、およびコンポーネントが関連性と有効性を維持することを確保するための監視を含む情報セキュリティ管理システム (ISMS)を実装することを要求しています。ClickHouseは内部監査を実施し、独立した外部監査人と協力して、認証発行間の2年間にわたって監査および中間検査を実施しています。 - -### U.S. DPF (2024年以降) {#us-dpf-since-2024} - -U.S. Data Privacy Frameworkは、米国の組織が欧州連合/欧州経済地域、英国、スイスから米国への個人データ移転に関する信頼できるメカニズムを提供するために開発され、EU、UK、およびスイスの法律に準拠するものです (https://dataprivacyframework.gov/Program-Overview)。ClickHouseはフレームワークに自己認証し、[Data Privacy Framework List](https://dataprivacyframework.gov/list)に掲載されています。 - -### HIPAA (2024年以降) {#hipaa-since-2024} - - - -お客様はビジネスアソシエイト契約 (BAA) に署名し、ePHIのロードにHIPAA準拠地域にサービスをオンボードするために営業またはサポートに連絡する必要があります。さらに、お客様は私たちの[共有責任モデル](/cloud/security/shared-responsibility-model)を確認し、使用ケースに適したコントロールを選択および実装する必要があります。 - -1996年の健康保険のポータビリティおよび説明責任に関する法律 (HIPAA) は、保護された健康情報 (PHI) の管理に焦点を当てた米国のプライバシー法です。HIPAAには、電子的個人健康情報 (ePHI) を保護することに焦点を当てた[セキュリティルール](https://www.hhs.gov/hipaa/for-professionals/security/index.html)を含むいくつかの要件があります。ClickHouseは、指定されたサービスに保存されたePHIの機密性、整合性、およびセキュリティを確保するための管理的、物理的、技術的な保護策を実施しています。これらの活動は、私たちの[Trust Center](https://trust.clickhouse.com)でダウンロード可能なSOC 2 タイプ II レポートに組み込まれています。 - -### PCIサービスプロバイダー (2025年以降) {#pci-service-provider-since-2025} - - - -お客様は、カード保持者データをロードするためにPCI準拠地域にサービスをオンボードするために営業またはサポートに連絡する必要があります。さらに、お客様は私たちの[Trust Center](https://trust.clickhouse.com)で利用可能なPCI責任の概要を確認し、使用ケースに適したコントロールを選択および実装する必要があります。 - -[Payment Card Industry Data Security Standard (PCI DSS)](https://www.pcisecuritystandards.org/standards/pci-dss/)は、クレジットカードの支払いデータを保護するためにPCIセキュリティ標準評議会によって作成された規則のセットです。ClickHouseは、クレジットカードデータの保存に関連するPCI基準に対する合格したコンプライアンスレポート (ROC) をもたらしたQualified Security Assessor (QSA)による外部監査を受けました。私たちのコンプライアンステストの証明書 (AOC) とPCI責任の概要をダウンロードするには、[Trust Center](https://trust.clickhouse.com)をご覧ください。 - - -# プライバシーコンプライアンス - -上記の項目に加えて、ClickHouseは一般データ保護規則 (GDPR)、カリフォルニア消費者プライバシー法 (CCPA)、およびその他の関連するプライバシーフレームワークに対処する内部コンプライアンスプログラムを維持しています。ClickHouseが収集する個人データ、その使用方法、保護方法、その他のプライバシー関連情報の詳細は、以下の場所で確認できます。 - -### 法的文書 {#legal-documents} - -- [プライバシーポリシー](https://clickhouse.com/legal/privacy-policy) -- [クッキーポリシー](https://clickhouse.com/legal/cookie-policy) -- [データプライバシーフレームワーク通知](https://clickhouse.com/legal/data-privacy-framework) -- [データ処理付録 (DPA)](https://clickhouse.com/legal/agreements/data-processing-addendum) - -### 処理場所 {#processing-locations} - -- [サブプロセッサーと提携先](https://clickhouse.com/legal/agreements/subprocessors) -- [データ処理の場所](https://trust.clickhouse.com) - -### 追加手続き {#additional-procedures} - -- [個人データアクセス](/cloud/security/personal-data-access) -- [アカウント削除](/cloud/manage/close_account) - - -# 支払いコンプライアンス - -ClickHouseは、[PCI SAQ A v4.0](https://www.pcisecuritystandards.org/document_library/)に準拠したクレジットカードによる支払いの安全な方法を提供しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md.hash deleted file mode 100644 index 2fe0b1771eb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/compliance-overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -2f1c1834318da8e3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md deleted file mode 100644 index 6b460eb8c4c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -slug: '/cloud/security/connectivity' -title: 'コネクティビティの概要' -description: 'コネクティビティのためのランディングページ' ---- - - - - -# 接続 - -このセクションでは接続性について説明し、以下のページが含まれています。 - -| ページ | 説明 | -|----------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| -| [IPフィルターの設定](/cloud/security/setting-ip-filters) | IPアクセスリストを使用してClickHouseサービスへのトラフィックを制御する方法に関するガイド。 | -| [プライベートネットワーキング](/cloud/security/private-link-overview) | サービスをクラウド仮想ネットワークに接続する方法に関する情報。 | -| [S3データへの安全なアクセス](/cloud/security/secure-s3) | Amazon Simple Storage Service(S3)で認証するための役割ベースのアクセスを活用し、安全にデータにアクセスする方法に関するガイド。 | -| [クラウドIPアドレス](/manage/security/cloud-endpoints-api) | ClickHouse Cloudでサポートされている各クラウドおよびリージョンの静的IPおよびS3エンドポイントを一覧にしたテーブル。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md.hash deleted file mode 100644 index f0068e03c8d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/connectivity-overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -efdffdc0bdc1ee4f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md deleted file mode 100644 index aff48391108..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md +++ /dev/null @@ -1,439 +0,0 @@ ---- -title: 'GCP Private Service Connect' -description: 'このドキュメントでは、Google Cloud Platform (GCP) プライベートサービス接続(PSC)を使用してClickHouse - Cloudに接続し、ClickHouse CloudのIPアクセスリストを使用してGCP PSCアドレス以外からのClickHouse Cloudサービスへのアクセスを無効にする方法について説明します。' -sidebar_label: 'GCP Private Service Connect' -slug: '/manage/security/gcp-private-service-connect' ---- - -import Image from '@theme/IdealImage'; -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge'; -import gcp_psc_overview from '@site/static/images/cloud/security/gcp-psc-overview.png'; -import gcp_privatelink_pe_create from '@site/static/images/cloud/security/gcp-privatelink-pe-create.png'; -import gcp_psc_open from '@site/static/images/cloud/security/gcp-psc-open.png'; -import gcp_psc_enable_global_access from '@site/static/images/cloud/security/gcp-psc-enable-global-access.png'; -import gcp_psc_copy_connection_id from '@site/static/images/cloud/security/gcp-psc-copy-connection-id.png'; -import gcp_psc_create_zone from '@site/static/images/cloud/security/gcp-psc-create-zone.png'; -import gcp_psc_zone_type from '@site/static/images/cloud/security/gcp-psc-zone-type.png'; -import gcp_psc_dns_record from '@site/static/images/cloud/security/gcp-psc-dns-record.png'; -import gcp_pe_remove_private_endpoint from '@site/static/images/cloud/security/gcp-pe-remove-private-endpoint.png'; -import gcp_privatelink_pe_filters from '@site/static/images/cloud/security/gcp-privatelink-pe-filters.png'; -import gcp_privatelink_pe_dns from '@site/static/images/cloud/security/gcp-privatelink-pe-dns.png'; - -# Private Service Connect {#private-service-connect} - - - -Private Service Connect(PSC)は、消費者が仮想プライベートクラウド(VPC)ネットワーク内で管理されたサービスにプライベートにアクセスできるようにするGoogle Cloudのネットワーキング機能です。同様に、管理サービスプロデューサーは、これらのサービスを独自の別のVPCネットワークでホストし、消費者へのプライベート接続を提供することができます。 - -サービスプロデューサーは、プライベートサービスコネクトサービスを作成することで、アプリケーションを消費者に公開します。サービス消費者は、これらのプライベートサービスコネクトサービスに直接アクセスします。 - -Overview of Private Service Connect - -:::important -デフォルトでは、ClickHouseサービスはプライベートサービス接続経由では利用できません。PSC接続が承認され、確立されていても、以下の[ステップ](#add-endpoint-id-to-services-allow-list)を完了して、インスタンスレベルでPSC IDを許可リストに明示的に追加する必要があります。 -::: - - -**プライベートサービスコネクトのグローバルアクセスを使用する際の重要な考慮事項**: -1. グローバルアクセスを利用するリージョンは、同じVPCに属する必要があります。 -1. グローバルアクセスは、PSCレベルで明示的に有効化する必要があります(以下のスクリーンショットを参照)。 -1. ファイアウォール設定が他のリージョンからのPSCへのアクセスをブロックしないことを確認してください。 -1. GCPのリージョン間データ転送料金が発生する可能性があることに注意してください。 - -リージョン間接続はサポートされていません。プロデューサーと消費者のリージョンは同じである必要があります。ただし、VPC内の他のリージョンから接続するには、プライベートサービスコネクト(PSC)レベルで[グローバルアクセス](https://cloud.google.com/vpc/docs/about-accessing-vpc-hosted-services-endpoints#global-access)を有効にすることができます。 - -**GCP PSCを有効にするために以下の手順を完了してください**: -1. プライベートサービスコネクトのためのGCPサービスアタッチメントを取得します。 -1. サービスエンドポイントを作成します。 -1. ClickHouse Cloudサービスに「エンドポイントID」を追加します。 -1. ClickHouseサービス許可リストに「エンドポイントID」を追加します。 - - -## Attention {#attention} -ClickHouseは、GCPリージョン内で同じ公開された[PSCエンドポイント](https://cloud.google.com/vpc/docs/private-service-connect)を再利用するためにサービスをグループ化しようとします。ただし、このグループ化は保証されておらず、特にサービスが複数のClickHouse組織に分散されている場合、特に保証されません。 -ClickHouse組織内で他のサービス用にPSCが既に構成されている場合は、そのグループ化のためほとんどのステップをスキップし、次の最終ステップへ直接進むことができます:[ClickHouseサービス許可リストに「エンドポイントID」を追加](#add-endpoint-id-to-services-allow-list)します。 - -Terraformの例は[こちら](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)で見つけることができます。 - -## Before you get started {#before-you-get-started} - -:::note -以下に、ClickHouse Cloudサービス内でプライベートサービスコネクトを設定する方法を示すコード例を提供します。以下の例では、以下を使用します: - - GCPリージョン: `us-central1` - - GCPプロジェクト(顧客GCPプロジェクト): `my-gcp-project` - - 顧客GCPプロジェクト内のGCPプライベートIPアドレス: `10.128.0.2` - - 顧客GCPプロジェクト内のGCP VPC: `default` -::: - -ClickHouse Cloudサービスについての情報を取得する必要があります。これは、ClickHouse CloudコンソールまたはClickHouse APIを通じて行うことができます。ClickHouse APIを使用する場合、次の環境変数を設定してください: - -```shell -REGION= -PROVIDER=gcp -KEY_ID= -KEY_SECRET= -ORG_ID= -SERVICE_NAME= -``` - -[新しいClickHouse Cloud APIキーを作成する](/cloud/manage/openapi)か、既存のものを使用できます。 - -地域、プロバイダー、サービス名でフィルタリングしてClickHouseの`INSTANCE_ID`を取得します: - -```shell -INSTANCE_ID=$(curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -"https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services" | \ -jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\" and .name==\"${SERVICE_NAME:?}\") | .id " -r) -``` - -:::note - - ClickHouseコンソールから組織IDを取得できます(組織 -> 組織の詳細)。 - - [新しいキーを作成する](/cloud/manage/openapi)か、既存のものを使用できます。 -::: - -## Obtain GCP service attachment and DNS name for Private Service Connect {#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect} - -### Option 1: ClickHouse Cloud console {#option-1-clickhouse-cloud-console} - -ClickHouse Cloudコンソールで、プライベートサービスコネクトを介して接続したいサービスを開き、次に**設定**メニューを開きます。「**プライベートエンドポイントを設定**」ボタンをクリックします。**サービス名**(`endpointServiceId`)と**DNS名**(`privateDnsHostname`)をメモしておきます。次のステップで使用します。 - -Private Endpoints - -### Option 2: API {#option-2-api} - -:::note -このステップを実行するためには、リージョン内に少なくとも1つのインスタンスがデプロイされている必要があります。 -::: - -プライベートサービスコネクトのGCPサービスアタッチメントとDNS名を取得します: - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | jq .result -{ - "endpointServiceId": "projects/.../regions/us-central1/serviceAttachments/production-us-central1-clickhouse-cloud", - "privateDnsHostname": "xxxxxxxxxx.us-central1.p.gcp.clickhouse.cloud" -} -``` - -`endpointServiceId`及び`privateDnsHostname`をメモしてください。次のステップで使用します。 - -## Create service endpoint {#create-service-endpoint} - -:::important -このセクションでは、GCP PSC(プライベートサービスコネクト)を介してClickHouseを構成するためのClickHouse特有の詳細をカバーしています。GCP特有のステップは参照として提供されていますが、変更される可能性があることに注意してください。特定のユースケースに基づいてGCP設定を検討してください。 - -ClickHouseは必要なGCP PSCエンドポイント、DNSレコードの構成に責任を負いません。 - -GCP設定タスクに関連する問題がある場合は、GCPサポートに直接連絡してください。 -::: - -このセクションでは、サービスエンドポイントを作成します。 - -### Adding a Private Service Connection {#adding-a-private-service-connection} - -まず最初に、プライベートサービス接続を作成します。 - -#### Option 1: Using Google Cloud console {#option-1-using-google-cloud-console} - -Google Cloudコンソールで、**ネットワークサービス -> プライベートサービスコネクト**に移動します。 - -Open Private Service Connect in Google Cloud Console - -「**エンドポイントを接続**」ボタンをクリックして、プライベートサービスコネクトの作成ダイアログを開きます。 - -- **ターゲット**: **公開サービス**を使用 -- **ターゲットサービス**: [プライベートサービスコネクトのGCPサービスアタッチメントを取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)ステップで取得した`endpointServiceId`APIまたは`サービス名`コンソールを使用します。 -- **エンドポイント名**: PSCの**エンドポイント名**に名前を設定します。 -- **ネットワーク/サブネットワーク/ IPアドレス**: 接続に使用したいネットワークを選択します。プライベートサービスコネクトエンドポイントのために新しいIPアドレスを作成するか、既存のIPアドレスを使用する必要があります。私たちの例では、名前を**your-ip-address**とし、IPアドレス`10.128.0.2`を割り当てたアドレスを事前に作成しました。 -- エンドポイントをどのリージョンからも利用できるようにするために、**グローバルアクセスを有効にする**チェックボックスを有効にできます。 - -Enable Global Access for Private Service Connect - -PSCエンドポイントを作成するには、**ADD ENDPOINT**ボタンを使用します。 - -接続が承認されると、**ステータス**列は**保留中**から**承認済**に変わります。 - -Copy PSC Connection ID - -***PSC接続ID***をコピーします。次のステップで***エンドポイントID***として使用します。 - -#### Option 2: Using Terraform {#option-2-using-terraform} - -```json -provider "google" { - project = "my-gcp-project" - region = "us-central1" -} - -variable "region" { - type = string - default = "us-central1" -} - -variable "subnetwork" { - type = string - default = "https://www.googleapis.com/compute/v1/projects/my-gcp-project/regions/us-central1/subnetworks/default" -} - -variable "network" { - type = string - default = "https://www.googleapis.com/compute/v1/projects/my-gcp-project/global/networks/default" -} - -resource "google_compute_address" "psc_endpoint_ip" { - address = "10.128.0.2" - address_type = "INTERNAL" - name = "your-ip-address" - purpose = "GCE_ENDPOINT" - region = var.region - subnetwork = var.subnetwork -} - -resource "google_compute_forwarding_rule" "clickhouse_cloud_psc" { - ip_address = google_compute_address.psc_endpoint_ip.self_link - name = "ch-cloud-${var.region}" - network = var.network - region = var.region - load_balancing_scheme = "" - # service attachment - target = "https://www.googleapis.com/compute/v1/$TARGET" # See below in notes -} - -output "psc_connection_id" { - value = google_compute_forwarding_rule.clickhouse_cloud_psc.psc_connection_id - description = "Add GCP PSC Connection ID to allow list on instance level." -} -``` - -:::note -`endpointServiceId`APIまたは`サービス名`コンソールを使用して、[プライベートサービスコネクトのGCPサービスアタッチメントを取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)ステップを参照してください。 -::: - -## Set Private DNS Name for Endpoint {#setting-up-dns} - -:::note -DNSの構成方法はいくつかあります。特定のユースケースに応じてDNSを設定してください。 -::: - -[プライベートサービスコネクトのGCPサービスアタッチメントを取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)ステップから取得した「DNS名」をGCPプライベートサービスコネクトエンドポイントIPアドレスにポイントする必要があります。これにより、VPC/ネットワーク内のサービス/コンポーネントがそれを正しく解決できるようになります。 - -## Add Endpoint ID to ClickHouse Cloud organization {#add-endpoint-id-to-clickhouse-cloud-organization} - -### Option 1: ClickHouse Cloud console {#option-1-clickhouse-cloud-console-1} - -組織にエンドポイントを追加するには、[ClickHouseサービス許可リストに「エンドポイントID」を追加](#add-endpoint-id-to-services-allow-list)ステップに進んでください。ClickHouse Cloudコンソールを使用して`PSC接続ID`をサービス許可リストに追加すると、自動的に組織に追加されます。 - -エンドポイントを削除するには、**組織の詳細 -> プライベートエンドポイント**を開き、削除ボタンをクリックしてエンドポイントを削除します。 - -Remove Private Endpoint from ClickHouse Cloud - -### Option 2: API {#option-2-api-1} - -コマンドを実行する前にこれらの環境変数を設定してください: - -[Adding a Private Service Connection](#adding-a-private-service-connection)ステップからの「エンドポイントID」の値で`ENDPOINT_ID`を以下のように置き換えます。 - -エンドポイントを追加するには、次のコマンドを実行します: - -```bash -cat < - -### Option 2: API {#option-2-api-2} - -コマンドを実行する前にこれらの環境変数を設定してください: - -[Adding a Private Service Connection](#adding-a-private-service-connection)ステップからの「エンドポイントID」の値で**ENDPOINT_ID**を置き換えます。 - -プライベートサービスコネクトを使用して利用可能である必要がある各サービスに対して実行します。 - -追加するには: - -```bash -cat < - -#### Option 2: API {#option-2-api-3} - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}/privateEndpointConfig" | jq .result -``` - -```response -{ - ... - "privateDnsHostname": "xxxxxxx..p.gcp.clickhouse.cloud" -} -``` - -この例では、`xxxxxxx.yy-xxxxN.p.gcp.clickhouse.cloud`ホスト名への接続はプライベートサービスコネクトにルーティングされます。一方、`xxxxxxx.yy-xxxxN.gcp.clickhouse.cloud`はインターネット経由でルーティングされます。 - -## Troubleshooting {#troubleshooting} - -### Test DNS setup {#test-dns-setup} - -DNS_NAME - [プライベートサービスコネクトのGCPサービスアタッチメントを取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)ステップからの`privateDnsHostname`を使用します。 - -```bash -nslookup $DNS_NAME -``` - -```response -非権威的回答: -... -アドレス:10.128.0.2 -``` - -### Connection reset by peer {#connection-reset-by-peer} - -- おそらく、エンドポイントIDがサービス許可リストに追加されていない可能性があります。[_Add endpoint ID to services allow-list_ステップ](#add-endpoint-id-to-services-allow-list)を再度確認してください。 - -### Test connectivity {#test-connectivity} - -PSCリンクを使用して接続する際に問題がある場合は、`openssl`を使用して接続性を確認してください。プライベートサービスコネクトエンドポイントのステータスが`承認済`であることを確認してください: - -OpenSSLは接続できる必要があります(出力にCONNECTEDと表示されます)。`errno=104`は予期される結果です。 - -DNS_NAME - [プライベートサービスコネクトのGCPサービスアタッチメントを取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)ステップからの`privateDnsHostname`を使用します。 - -```bash -openssl s_client -connect ${DNS_NAME}:9440 -``` - -```response - -# highlight-next-line -CONNECTED(00000003) -write:errno=104 ---- -ピア証明書は利用できません ---- -クライアント証明書CA名は送信されませんでした ---- -SSLハンドシェイクは0バイトを読み取り335バイトを書き込みました -検証:OK ---- -新しい、(NONE)、暗号は(NONE) -セキュアな再交渉はサポートされていません -圧縮:NONE -展開:NONE -ALPN交渉は行われませんでした -早期データは送信されませんでした -検証戻りコード:0(ok) -``` - -### Checking Endpoint filters {#checking-endpoint-filters} - -#### REST API {#rest-api} - -```bash -curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | jq .result.privateEndpointIds -[ - "102600141743718403" -] -``` - -### Connecting to a remote database {#connecting-to-a-remote-database} - -たとえば、ClickHouse Cloudで[MySQL](../../sql-reference/table-functions/mysql.md)または[PostgreSQL](../../sql-reference/table-functions/postgresql.md)テーブル関数を使用して、GCPにホストされたデータベースに接続しようとしているとします。GCP PSCはこの接続を安全に有効にするために使用できません。PSCは一方向の単方向接続です。内部ネットワークやGCP VPCがClickHouse Cloudに安全に接続できるようにしますが、ClickHouse Cloudが内部ネットワークに接続することはできません。 - -[GCPプライベートサービスコネクトに関するドキュメント](https://cloud.google.com/vpc/docs/private-service-connect)によれば: - -> サービス指向の設計:プロデューサーサービスは、消費者VPCネットワークに対し単一のIPアドレスを公開する負荷分散装置を介して公開されます。プロデューサーサービスにアクセスする消費者トラフィックは一方向であり、サービスのIPアドレスにのみアクセスでき、全体のピアVPCネットワークにアクセスすることはできません。 - -これを実現するには、ClickHouse Cloudから内部/プライベートデータベースサービスへの接続を許可するようにGCP VPCファイアウォールルールを構成してください。[ClickHouse Cloudリージョンのデフォルトの出口IPアドレス](/manage/security/cloud-endpoints-api)と、[利用可能な静的IPアドレス](https://api.clickhouse.cloud/static-ips.json)を確認してください。 - -## More information {#more-information} - -詳細な情報については、[cloud.google.com/vpc/docs/configure-private-service-connect-services](https://cloud.google.com/vpc/docs/configure-private-service-connect-services)を訪れてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md.hash deleted file mode 100644 index 71a0d65a738..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/gcp-private-service-connect.md.hash +++ /dev/null @@ -1 +0,0 @@ -c16f6c4dd9f07121 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md deleted file mode 100644 index 29d186c9269..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -slug: '/cloud/security' -keywords: -- 'Cloud' -- 'Security' -title: '概要' -hide_title: true -description: 'ClickHouseクラウドセキュリティのランディングページ' ---- - - - - -# ClickHouse Cloud Security - -このセクションでは、ClickHouse Cloud におけるセキュリティについて掘り下げており、以下のページが含まれています。 - -| ページ | 説明 | -|---------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Shared Responsibility Model](shared-responsibility-model.md) | 各サービスタイプに提供されるセキュリティ機能に関する情報。 | -| [Cloud Access Management](cloud-access-management/index.md) | アクセス制御、認証、SSO の設定、一般的なアクセス管理のクエリ、および新規ユーザーの招待方法に関する情報。 | -| [Connectivity](connectivity-overview.md) | IP フィルターの設定、プライベートネットワーキング、S3 データおよび Cloud IP アドレスへの安全なアクセスに関する情報。 | -| [Enhanced Encryption](cmek.md) | 静的データはデフォルトでクラウドプロバイダー管理の AES 256 キーを使用して暗号化されます。顧客はサービスデータの保護のために、透過的データ暗号化 (TDE) を有効にすることができます。 | -| [Audit Logging](audit-logging.md) | ClickHouse Cloud における監査ログに関するガイド。 | -| [Privacy and Compliance](privacy-compliance-overview.md) | ClickHouse Cloud のセキュリティとコンプライアンスに関する情報、個人情報を確認および修正する方法に関するガイド。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md.hash deleted file mode 100644 index b9becabec14..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -84e755e836339115 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md deleted file mode 100644 index 16c59968487..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -sidebar_label: '新しいユーザーの招待' -slug: '/cloud/security/inviting-new-users' -title: '新しいユーザーの招待' -description: 'このページでは管理者が組織に新しいユーザーを招待し、それらに役割を割り当てる方法について説明しています' ---- - -import Image from '@theme/IdealImage'; -import users_and_roles from '@site/static/images/cloud/security/users_and_roles.png'; -import invite_user from '@site/static/images/cloud/security/invite-user.png'; - -Administrators can invite others to organization, assigning them the `Developer`, `Admin` or `Billing Admin` role. - -:::note -Admins and developers are different than database users. To create database users and roles, please use the SQL console. To learn more, visit our docs on [Users and Roles](/cloud/security/cloud-access-management). -::: - -To invite a user, select the organization and click `Users and roles`: - -ClickHouse Cloud users and roles page - -
- -Select `Invite members`, and enter the email address of up to 3 new users at once, selecting the role for each. - -ClickHouse Cloud invite user page - -
- -Click `Send invites`. Users will receive an email from which they can join the organization. diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md.hash deleted file mode 100644 index eded3f75b59..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/inviting-new-users.md.hash +++ /dev/null @@ -1 +0,0 @@ -8dc4c0cc007d0d81 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md deleted file mode 100644 index c2d02a96512..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -sidebar_label: 'Personal Data Access' -slug: '/cloud/security/personal-data-access' -title: 'Personal Data Access' -description: 'As a registered user, ClickHouse allows you to view and manage your - personal account data, including contact information.' ---- - -import Image from '@theme/IdealImage'; -import support_case_form from '@site/static/images/cloud/security/support-case-form.png'; - -## Intro {#intro} - -登録ユーザーとして、ClickHouseでは、連絡先情報を含む個人アカウントデータを表示および管理することができます。あなたの役割に応じて、これはあなたの組織内の他のユーザーの連絡先情報、APIキーの詳細、その他の関連情報へのアクセスを含む場合があります。これらの詳細は、ClickHouseコンソールを通じてセルフサービス形式で直接管理できます。 - -**データ主体アクセス要求 (DSAR) とは** - -ご所在の地域によっては、ClickHouseが保有する個人データに関する追加の権利(データ主体の権利)が法律によって提供されることがあります。これについては、ClickHouseのプライバシーポリシーで説明されています。データ主体の権利を行使する手続きは、データ主体アクセス要求 (DSAR) と呼ばれます。 - -**個人データの範囲** - -ClickHouseが収集する個人データやその使用方法については、ClickHouseのプライバシーポリシーを確認してください。 - -## Self Service {#self-service} - -デフォルトでは、ClickHouseはユーザーがClickHouseコンソールから自分の個人データを直接表示できるようにしています。 - -以下は、アカウント設定およびサービス使用中にClickHouseが収集するデータの要約と、特定の個人データがClickHouseコンソール内のどこで表示できるかの情報です。 - -| Location/URL | Description | Personal Data | -|-------------|----------------|-----------------------------------------| -| https://auth.clickhouse.cloud/u/signup/ | アカウント登録 | email, password | -| https://console.clickhouse.cloud/profile | 一般ユーザープロフィール詳細 | name, email | -| https://console.clickhouse.cloud/organizations/OrgID/members | 組織内のユーザーリスト | name, email | -| https://console.clickhouse.cloud/organizations/OrgID/keys | APIキーのリストと作成者 | email | -| https://console.clickhouse.cloud/organizations/OrgID/audit | 活動ログ、個々のユーザーによるアクションのリスト | email | -| https://console.clickhouse.cloud/organizations/OrgID/billing | 請求情報と請求書 | billing address, email | -| https://console.clickhouse.cloud/support | ClickHouseサポートとのやり取り | name, email | - -注意: `OrgID`を含むURLは、特定のアカウントの`OrgID`を反映するように更新する必要があります。 - -### Current customers {#current-customers} - -弊社とアカウントをお持ちで、セルフサービスオプションで個人データの問題が解決しない場合、プライバシーポリシーに基づきデータ主体アクセス要求を提出できます。そのためには、ClickHouseアカウントにログインし、[サポートケース](https://console.clickhouse.cloud/support)を開いてください。これにより、あなたの身元を確認し、リクエストに対応するプロセスをスムーズに進めることができます。 - -サポートケースには、以下の詳細を含めてください。 - -| Field | Text to include in your request | -|-------------|---------------------------------------------------| -| Subject | データ主体アクセス要求 (DSAR) | -| Description | ClickHouseに探し、収集し、または提供してほしい情報の詳細な説明。 | - -ClickHouse Cloudのサポートケースフォーム - -### Individuals Without an Account {#individuals-without-an-account} - -弊社とアカウントをお持ちでなく、上記のセルフサービスオプションで個人データの問題が解決されていない場合、プライバシーポリシーに従ってデータ主体アクセス要求を行いたい場合は、メールで[privacy@clickhouse.com](mailto:privacy@clickhouse.com)にこれらのリクエストを送信してください。 - -## Identity Verification {#identity-verification} - -メールを通じてデータ主体アクセス要求を提出する場合、あなたの身元を確認し、リクエストを処理するために特定の情報を要求することがあります。適用される法律により、リクエストを拒否することが求められたり許可されたりする場合があります。リクエストを拒否する場合、その理由をお知らせしますが、法的制限に従います。 - -詳細については、[ClickHouseプライバシーポリシー](https://clickhouse.com/legal/privacy-policy)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md.hash deleted file mode 100644 index 004c8f5bb87..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/personal-data-access.md.hash +++ /dev/null @@ -1 +0,0 @@ -fc97d80d792ad2a1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md deleted file mode 100644 index 9ecc2a60d83..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -sidebar_label: 'プライバシーとコンプライアンスの概要' -slug: '/cloud/security/privacy-compliance-overview' -title: 'プライバシーとコンプライアンス' -description: 'プライバシーとコンプライアンスのランディングページ' ---- - - - - -# プライバシーとコンプライアンス - -このセクションには以下のページが含まれています: - -| ページ | 説明 | -|----------------------------------------------------------------------------|--------------------------------------------------------------| -| [セキュリティとコンプライアンス](/cloud/security/security-and-compliance) | ClickHouse Cloudのセキュリティレポートとプライバシーコンプライアンス。 | -| [個人データアクセス](/cloud/security/personal-data-access) | 自分の個人データへのアクセス方法に関する情報。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md.hash deleted file mode 100644 index d66fd77e892..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/privacy-compliance-overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -47c45bc0076326e7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md deleted file mode 100644 index 81a1ccc79de..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -sidebar_label: 'Private Link Overview' -slug: '/cloud/security/private-link-overview' -title: 'Private Link Overview' -description: 'Landing page for Private Link' ---- - - - - -# プライベートリンクの概要 - -ClickHouse Cloudは、あなたのサービスをクラウド仮想ネットワークに接続する機能を提供します。以下のガイドを参照してください: - -- [AWS Private Link](/cloud/security/aws-privatelink.md) -- [GCP Private Service Connect](/cloud/security/gcp-private-service-connect.md) -- [Azure Private Link](/cloud/security/azure-privatelink.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md.hash deleted file mode 100644 index 1440462f318..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/private-link-overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -b28f6e0020816ae5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md deleted file mode 100644 index f47fd783212..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md +++ /dev/null @@ -1,364 +0,0 @@ ---- -sidebar_label: 'SAML SSOセットアップ' -slug: '/cloud/security/saml-setup' -title: 'SAML SSOセットアップ' -description: 'ClickHouse CloudでSAML SSOのセットアップ方法' ---- - -import Image from '@theme/IdealImage'; -import samlOrgId from '@site/static/images/cloud/security/saml-org-id.png'; -import samlOktaSetup from '@site/static/images/cloud/security/saml-okta-setup.png'; -import samlGoogleApp from '@site/static/images/cloud/security/saml-google-app.png'; -import samlAzureApp from '@site/static/images/cloud/security/saml-azure-app.png'; -import samlAzureClaims from '@site/static/images/cloud/security/saml-azure-claims.png'; -import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - - - -# SAML SSO セットアップ - - - -ClickHouse Cloud は、セキュリティアサーションマークアップ言語 (SAML) を介したシングルサインオン (SSO) をサポートしています。これにより、アイデンティティプロバイダー (IdP) で認証することで、ClickHouse Cloud 組織に安全にサインインすることができます。 - -現在、サービスプロバイダーが開始する SSO、個別接続を使用する複数の組織、およびジャストインタイムプロビジョニングをサポートしています。クロスドメインのアイデンティティ管理システム (SCIM) や属性マッピングのサポートはまだ提供していません。 - -## 始める前に {#before-you-begin} - -IdP で管理者権限と ClickHouse Cloud 組織での **Admin** 役割が必要です。IdP 内に接続を設定した後、以下の手順で要求される情報を持って私たちに連絡してください。プロセスが完了します。 - -SSO 接続を補完するために、**組織への直接リンク**を設定することをお勧めします。各 IdP での処理は異なります。ご使用の IdP に対してこれをどう行うか、下をお読みください。 - -## IdP の設定方法 {#how-to-configure-your-idp} - -### 手順 {#steps} - -
- 組織 ID を取得する - - すべての設定には組織 ID が必要です。組織 ID を取得するには: - - 1. [ClickHouse Cloud](https://console.clickhouse.cloud) 組織にサインインします。 - - 組織 ID - - 3. 左下隅で、**Organization** の下にある組織名をクリックします。 - - 4. ポップアップメニューで **Organization details** を選択します。 - - 5. 下記で使用するために **Organization ID** をメモしておきます。 - -
- -
- SAML 統合を設定する - - ClickHouse はサービスプロバイダーが開始する SAML 接続を利用します。これは、https://console.clickhouse.cloud を介してまたは直接リンクを介してログインできることを意味します。現在、アイデンティティプロバイダーが開始する接続はサポートしていません。基本的な SAML 設定は以下の通りです。 - - - SSO URL または ACS URL: `https://auth.clickhouse.cloud/login/callback?connection={organizationid}` - - - Audience URI または Entity ID: `urn:auth0:ch-production:{organizationid}` - - - アプリケーションユーザー名: `email` - - - 属性マッピング: `email = user.email` - - - 組織にアクセスするための直接リンク: `https://console.clickhouse.cloud/?connection={organizationid}` - - - 特定の設定手順は、以下の具体的なアイデンティティプロバイダーを参照してください。 - -
- -
- 接続情報を取得する - - アイデンティティプロバイダーの SSO URL と x.509 証明書を取得します。これらの情報を取得する方法については、具体的なアイデンティティプロバイダーを参照してください。 - -
- -
- サポートケースを提出する - - 1. ClickHouse Cloud コンソールに戻ります。 - - 2. 左側の **Help** を選択し、次に Support サブメニューを選択します。 - - 3. **New case** をクリックします。 - - 4. 件名に "SAML SSO Setup" と入力します。 - - 5. 説明に、上記の手順から収集したリンクを貼り付け、チケットに証明書を添付します。 - - 6. この接続を許可すべきドメイン (e.g. domain.com, domain.ai など) もお知らせください。 - - 7. 新しいケースを作成します。 - - 8. ClickHouse Cloud 内で設定を完了し、テストの準備ができたらお知らせします。 - -
- -
- 設定を完了する - - 1. アイデンティティプロバイダー内でユーザーアクセスを割り当てます。 - - 2. https://console.clickhouse.cloud または上記の「SAML 統合を設定する」で設定した直接リンクを介して ClickHouse にログインします。ユーザーは初めてアクセスした際に 'Member' 役割が最初に割り当てられます。これにより、組織にログインし、個人設定を更新できます。 - - 3. ClickHouse 組織からログアウトします。 - - 4. 元の認証方法でログインし、新しい SSO アカウントに Admin 役割を割り当てます。 - - メール + パスワードアカウントの場合は、`https://console.clickhouse.cloud/?with=email`を使用してください。 - - ソーシャルログインの場合は、適切なボタンをクリックしてください (**Continue with Google** または **Continue with Microsoft**) - - 5. 元の認証方法でログアウトし、https://console.clickhouse.cloud または上記の「SAML 統合を設定する」で設定した直接リンクを介して再度ログインします。 - - 6. 組織の SAML を強制するために、非 SAML ユーザーを削除します。今後、ユーザーはアイデンティティプロバイダーを介して割り当てられます。 - -
- -### Okta SAML の設定 {#configure-okta-saml} - -各 ClickHouse 組織に対して、Okta で 2 つのアプリ統合を設定します:1 つは SAML アプリ、もう 1 つは直接リンクを保持するブックマークです。 - -
- 1. アクセス管理用のグループを作成する - - 1. Okta インスタンスに **Administrator** としてログインします。 - - 2. 左側の **Groups** を選択します。 - - 3. **Add group** をクリックします。 - - 4. グループの名前と説明を入力します。このグループは、SAML アプリと関連するブックマークアプリの間でユーザーを一貫性を持たせるために使用されます。 - - 5. **Save** をクリックします。 - - 6. 作成したグループの名前をクリックします。 - - 7. **Assign people** をクリックして、この ClickHouse 組織にアクセスを希望するユーザーを割り当てます。 - -
- -
- 2. ユーザーがシームレスにログインできるようにブックマークアプリを作成する - - 1. 左側の **Applications** を選択し、次に **Applications** のサブヘッダーを選択します。 - - 2. **Browse App Catalog** をクリックします。 - - 3. **Bookmark App** を検索して選択します。 - - 4. **Add integration** をクリックします。 - - 5. アプリ用のラベルを選択します。 - - 6. URL を `https://console.clickhouse.cloud/?connection={organizationid}` として入力します。 - - 7. **Assignments** タブに移動し、上記で作成したグループを追加します。 - -
- -
- 3. 接続を有効にするための SAML アプリを作成する - - 1. 左側の **Applications** を選択し、次に **Applications** のサブヘッダーを選択します。 - - 2. **Create App Integration** をクリックします。 - - 3. SAML 2.0 を選択して、次へ進みます。 - - 4. アプリケーションの名前を入力し、**Do not display application icon to users** の横のボックスにチェックを入れ、次へ進みます。 - - 5. SAML 設定画面に以下の値で入力します。 - - | フィールド | 値 | - |--------------------------------|-------| - | シングルサインオン URL | `https://auth.clickhouse.cloud/login/callback?connection={organizationid}` | - | Audience URI (SP Entity ID) | `urn:auth0:ch-production:{organizationid}` | - | デフォルト RelayState | 空白のまま | - | Name ID フォーマット | 未指定 | - | アプリケーションユーザー名 | メール | - | アプリケーションユーザー名の更新 | 作成および更新 | - - 7. 以下の属性ステートメントを入力します。 - - | 名前 | 名前フォーマット | 値 | - |---------|---------------|------------| - | email | 基本 | user.email | - - 9. **Next** をクリックします。 - - 10. フィードバック画面で要求された情報を入力し、**Finish** をクリックします。 - - 11. **Assignments** タブに移動し、上記で作成したグループを追加します。 - - 12. 新しいアプリの **Sign On** タブで、**View SAML setup instructions** ボタンをクリックします。 - - Okta SAML 設定手順 - - 13. これら 3 つのアイテムを収集し、上記のサポートケースを提出してプロセスを完了します。 - - アイデンティティプロバイダーのシングルサインオン URL - - アイデンティティプロバイダーの発行者 - - X.509 証明書 - -
- -### Google SAML の設定 {#configure-google-saml} - -各組織に対して Google で 1 つの SAML アプリを設定し、マルチオーガニゼーション SSO を利用する場合はユーザーに直接リンク (`https://console.clickhouse.cloud/?connection={organizationId}`) をブックマークして提供する必要があります。 - -
- Google Web App を作成する - - 1. Google 管理コンソール (admin.google.com) にアクセスします。 - - Google SAML アプリ - - 2. 左側の **Apps** をクリックし、次に **Web and mobile apps** をクリックします。 - - 3. 上部メニューから **Add app** をクリックし、次に **Add custom SAML app** を選択します。 - - 4. アプリの名前を入力し、**Continue** をクリックします。 - - 5. これら 2 つのアイテムを収集し、上記のサポートケースを提出して情報を私たちに送信してください。注意:このデータをコピーする前にセットアップを完了した場合は、アプリのホーム画面から **DOWNLOAD METADATA** をクリックして X.509 証明書を取得してください。 - - SSO URL - - X.509 証明書 - - 7. 以下に ACS URL と Entity ID を入力します。 - - | フィールド | 値 | - |-----------|-------| - | ACS URL | `https://auth.clickhouse.cloud/login/callback?connection={organizationid}` | - | Entity ID | `urn:auth0:ch-production:{organizationid}` | - - 8. **Signed response** のボックスにチェックを入れます。 - - 9. Name ID フォーマットに **EMAIL** を選択し、Name ID は **Basic Information > Primary email.** のままにします。 - - 10. **Continue** をクリックします。 - - 11. 以下の属性マッピングを入力します。 - - | フィールド | 値 | - |-------------------|---------------| - | 基本情報 | プライマリメール | - | アプリ属性 | email | - - 13. **Finish** をクリックします。 - - 14. アプリを有効にするには、**OFF** をすべてのユーザーに対して変更し、設定を **ON** に変更します。アクセスは、画面の左側にあるオプションを選択することで、グループまたは組織単位に制限することもできます。 - -
- -### Azure (Microsoft) SAML の設定 {#configure-azure-microsoft-saml} - -Azure (Microsoft) SAML は Azure Active Directory (AD) または Microsoft Entra としても知られています。 - -
- Azure エンタープライズ アプリケーションを作成する - - 各組織に対して、別のサインオン URL を持つ 1 つのアプリケーション統合を設定します。 - - 1. Microsoft Entra 管理センターにログインします。 - - 2. 左側の **Applications > Enterprise** アプリケーションに移動します。 - - 3. 上部メニューにある **New application** をクリックします。 - - 4. 上部メニューにある **Create your own application** をクリックします。 - - 5. 名前を入力し、**Integrate any other application you don't find in the gallery (Non-gallery)** を選択してから、**Create** をクリックします。 - - Azure Non-Gallery アプリ - - 6. 左側の **Users and groups** をクリックし、ユーザーを割り当てます。 - - 7. 左側の **Single sign-on** をクリックします。 - - 8. **SAML** をクリックします。 - - 9. 以下の設定を使用して Basic SAML Configuration 画面を埋めます。 - - | フィールド | 値 | - |---------------------------|-------| - | Identifier (Entity ID) | `urn:auth0:ch-production:{organizationid}` | - | Reply URL (Assertion Consumer Service URL) | `https://auth.clickhouse.cloud/login/callback?connection={organizationid}` | - | Sign on URL | `https://console.clickhouse.cloud/?connection={organizationid}` | - | Relay State | 空白 | - | Logout URL | 空白 | - - 11. Attributes & Claims の下で以下を追加 (A) または更新 (U) します。 - - | クレーム名 | フォーマット | ソース属性 | - |--------------------------------|---------------|------------------| - | (U) ユニーク ユーザー識別子 (Name ID) | メールアドレス | user.mail | - | (A) email | 基本 | user.mail | - | (U) /identity/claims/name | 除外 | user.mail | - - 属性とクレーム - - 12. これら 2 つのアイテムを収集し、上記のサポートケースを提出してプロセスを完了します: - - ログイン URL - - 証明書 (Base64) - -
- -### Duo SAML の設定 {#configure-duo-saml} - -
- Duo 用の一般的な SAML サービスプロバイダーを作成する - - 1. [Duo Single Sign-On for Generic SAML Service Providers](https://duo.com/docs/sso-generic) の手順に従ってください。 - - 2. 次のブリッジ属性マッピングを使用します: - - | ブリッジ属性 | ClickHouse 属性 | - |:-------------------|:-----------------------| - | メールアドレス | email | - - 3. Duo のクラウドアプリケーションを更新するには、以下の値を使用します: - - | フィールド | 値 | - |:----------|:-------------------------------------------| - | Entity ID | `urn:auth0:ch-production:{organizationid}` | - | Assertion Consumer Service (ACS) URL | `https://auth.clickhouse.cloud/login/callback?connection={organizationid}` | - | サービスプロバイダーのログイン URL | `https://console.clickhouse.cloud/?connection={organizationid}` | - - 4. これら 2 つのアイテムを収集し、上記のサポートケースを提出してプロセスを完了します: - - シングルサインオン URL - - 証明書 - -
- -## 仕組み {#how-it-works} - -### サービスプロバイダーが開始する SSO {#service-provider-initiated-sso} - -私たちはサービスプロバイダーが開始する SSO のみを利用しています。これは、ユーザーが `https://console.clickhouse.cloud` にアクセスし、認証のために IdP にリダイレクトするためにメールアドレスを入力することを意味します。IdP で既に認証されたユーザーは、ログインページでメールアドレスを入力せずに直接リンクを使用して組織に自動ログインできます。 - -### ユーザー役割の割り当て {#assigning-user-roles} - -ユーザーは、IdP アプリケーションに割り当てて最初にログインした後、ClickHouse Cloud コンソールに表示されます。少なくとも 1 人の SSO ユーザーが組織内で Admin 役割を割り当てられている必要があります。ソーシャルログインまたは `https://console.clickhouse.cloud/?with=email` を使用して、元の認証方法でログインし、SSO 役割を更新します。 - -### 非 SSO ユーザーの削除 {#removing-non-sso-users} - -SSO ユーザーを設定し、少なくとも 1 人に Admin 役割を割り当てると、Admin は他の方法(例:ソーシャル認証またはユーザー ID + パスワード)を使用してユーザーを削除できます。Google 認証は、SSO がセットアップされた後も機能し続けます。ユーザー ID + パスワードのユーザーは、メールドメインに基づいて自動的に SSO にリダイレクトされますが、`https://console.clickhouse.cloud/?with=email`を使用しない場合は例外です。 - -### ユーザーの管理 {#managing-users} - -ClickHouse Cloud は現在、SSO のために SAML を実装しています。ユーザーを管理するための SCIM はまだ実装されていません。これは、SSO ユーザーが ClickHouse Cloud 組織にアクセスするために IdP 内のアプリケーションに割り当てられなければならないことを意味します。ユーザーが ClickHouse Cloud にログインするには、1 回はログインする必要があります。IdP からユーザーが削除されると、ユーザーは SSO を使って ClickHouse Cloud にログインできなくなります。しかし、SSO ユーザーは管理者が手動でユーザーを削除するまで、組織内には表示され続けます。 - -### マルチオーガニゼーション SSO {#multi-org-sso} - -ClickHouse Cloud は、各組織に対して別の接続を提供することでマルチオーガニゼーション SSO をサポートしています。各組織にログインするには、直接リンク (`https://console.clickhouse.cloud/?connection={organizationid}`) を使用します。別の組織にログインする前には、1 つの組織からログアウトすることを確認してください。 - -## 追加情報 {#additional-information} - -認証に関しては、セキュリティが最も重要な優先事項です。このため、SSO を実装する際にいくつかの決定を下しましたので、知っておいていただく必要があります。 - -- **サービスプロバイダーが開始する認証フローのみを処理します。** ユーザーは `https://console.clickhouse.cloud` に移動し、アイデンティティプロバイダーにリダイレクトされるためにメールアドレスを入力する必要があります。URL を覚えておく必要がないように、ブックマークアプリケーションまたはショートカットを追加する手順が提供されています。 - -- **IdP 経由でアプリに割り当てられたすべてのユーザーは、同じメールドメインを持っている必要があります。** ベンダー、契約者、またはコンサルタントが ClickHouse アカウントにアクセスする場合、従業員と同じドメイン (例:user@domain.com) のメールアドレスを持っている必要があります。 - -- **SSO アカウントと非 SSO アカウントを自動的にリンクすることはありません。** 同じメールアドレスを使用している場合でも、ClickHouse ユーザーリストにユーザーの複数のアカウントが表示される可能性があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md.hash deleted file mode 100644 index 3fd822612d4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/saml-sso-setup.md.hash +++ /dev/null @@ -1 +0,0 @@ -1ecb081c60dbbb1e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md deleted file mode 100644 index d6eea83e1dc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -sidebar_label: 'Setting IP Filters' -slug: '/cloud/security/setting-ip-filters' -title: 'Setting IP Filters' -description: 'This page explains how to set IP filters in ClickHouse Cloud to control - access to ClickHouse services.' ---- - -import Image from '@theme/IdealImage'; -import ip_filtering_after_provisioning from '@site/static/images/cloud/security/ip-filtering-after-provisioning.png'; -import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filter-add-single-ip.png'; - -## IPフィルターを設定する {#setting-ip-filters} - -IPアクセスリストは、どのソースアドレスがあなたのClickHouseサービスに接続できるかを指定することによって、ClickHouseサービスへのトラフィックをフィルタリングします。リストは各サービスのために設定可能です。リストはサービスの展開時に設定することも、その後に設定することもできます。プロビジョニング中にIPアクセスリストを設定しない場合や、初期リストを変更したい場合は、サービスを選択し、次に**セキュリティ**タブを選択することで変更を行うことができます。 - -:::important -ClickHouse CloudサービスのためにIPアクセスリストを作成しないと、そのサービスにはトラフィックが許可されません。 -::: - -## 準備 {#prepare} -作業を始める前に、アクセスリストに追加するべきIPアドレスまたは範囲を収集してください。リモート作業者、オンコールの場所、VPNなどを考慮に入れてください。IPアクセスリストのユーザーインターフェースでは、個別のアドレスとCIDR表記を受け付けます。 - -クラスレス・インタードメイン・ルーティング(CIDR)表記を利用すると、従来のクラスA、B、C(8、6、または24)サブネットマスクサイズよりも小さなIPアドレス範囲を指定できます。 [ARIN](https://account.arin.net/public/cidrCalculator)などのいくつかの組織はCIDR計算機を提供していますので、必要な場合は利用してください。また、CIDR表記に関する詳細については、[クラスレス・インタードメイン・ルーティング(CIDR)](https://www.rfc-editor.org/rfc/rfc4632.html) RFCをご覧ください。 - -## IPアクセスリストの作成または変更 {#create-or-modify-an-ip-access-list} - -ClickHouse Cloudサービスのリストからサービスを選択し、次に**設定**を選択します。**セキュリティ**セクションの下に、IPアクセスリストがあります。「*このサービスに接続できます* **(どこからでも | x 特定の場所から)**」というテキストのハイパーリンクをクリックします。 - -構成するためのオプションが表示されるサイドバーが表示されます: - -- サービスへのすべての場所からの着信トラフィックを許可する -- 特定の場所からのサービスへのアクセスを許可する -- サービスへのすべてのアクセスを拒否する - -このスクリーンショットは、"NY Office range"として説明されたIPアドレスの範囲からのトラフィックを許可するアクセスリストを示しています: - -ClickHouse Cloudの既存のアクセスリスト - -### 可能なアクション {#possible-actions} - -1. 追加のエントリを追加するには、**+ 新しいIPを追加**を使用します。 - - この例では、`London server`の説明を持つ単一のIPアドレスを追加します: - -ClickHouse Cloudのアクセスリストに単一のIPを追加 - -1. 既存のエントリを削除します。 - - クロス(x)をクリックすると、エントリが削除されます。 - -1. 既存のエントリを編集します。 - - エントリを直接変更します。 - -1. **どこからでも**アクセスを許可するに切り替えます。 - - これは推奨されませんが、許可されています。ClickHouseの上に構築されたアプリケーションを公開し、バックエンドのClickHouse Cloudサービスへのアクセスを制限することをお勧めします。 - -変更を適用するには、**保存**をクリックする必要があります。 - -## 検証 {#verification} - -フィルタを作成したら、範囲内からの接続を確認し、許可されていない範囲からの接続が拒否されていることを確認します。`curl`コマンドを利用して確認できます: -```bash title="許可リスト外からの拒否された試行" -curl https://.clickhouse.cloud:8443 -``` -```response -curl: (35) error:02FFF036:system library:func(4095):Connection reset by peer -``` -または -```response -curl: (35) LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to HOSTNAME.clickhouse.cloud:8443 -``` - -```bash title="許可リスト内からの許可された試行" -curl https://.clickhouse.cloud:8443 -``` -```response -Ok. -``` - -## 制限事項 {#limitations} - -- 現在、IPアクセスリストはIPv4のみをサポートしています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md.hash deleted file mode 100644 index 37b2eebc271..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/setting-ip-filters.md.hash +++ /dev/null @@ -1 +0,0 @@ -040bfba2cae20130 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md deleted file mode 100644 index 64b179c8d60..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -sidebar_label: 'Shared Responsibility Model' -slug: '/cloud/security/shared-responsibility-model' -title: 'セキュリティ共有責任モデル' -description: 'Learn more about the security model of ClickHouse Cloud' ---- - - - -## サービスタイプ {#service-types} - -ClickHouse Cloud は、Basic、Scale、および Enterprise の 3 つのサービスを提供しています。詳細については、[サービスの種類](/cloud/manage/cloud-tiers) ページをご覧ください。 - -## クラウドアーキテクチャ {#cloud-architecture} - -クラウドアーキテクチャは、コントロールプレーンとデータプレーンで構成されています。コントロールプレーンは、組織の作成、コントロールプレーン内のユーザー管理、サービス管理、API キー管理、請求に関連する責任を負います。データプレーンは、オーケストレーションや管理のためのツールを運用し、顧客サービスをホストします。詳細については、[ClickHouse Cloud アーキテクチャ](/cloud/reference/architecture) 図をご覧ください。 - -## BYOC アーキテクチャ {#byoc-architecture} - -Bring Your Own Cloud (BYOC) は、顧客が自分のクラウドアカウントでデータプレーンを運用できるようにします。詳細については、[BYOC (Bring Your Own Cloud)](/cloud/reference/byoc) ページをご覧ください。 - -## ClickHouse Cloud 共有責任モデル {#clickhouse-cloud-shared-responsibility-model} - -以下のモデルは、一般的に ClickHouse の責任を示し、ClickHouse Cloud および ClickHouse BYOC の顧客がそれぞれ対処すべき責任を示しています。PCI 共有責任モデルの詳細については、[Trust Center](https://trust.clickhouse.com) にある概要コピーをダウンロードしてください。 - -| コントロール | ClickHouse | クラウド顧客 | BYOC 顧客 | -|------------------------------------------------------------------------|--------------------|-------------------|---------------------| -| 環境の分離を維持 | :white_check_mark: | | :white_check_mark: | -| ネットワーク設定を管理 | :white_check_mark: | :white_check_mark:| :white_check_mark: | -| ClickHouse システムへのアクセスを安全に管理 | :white_check_mark: | | | -| コントロールプレーンおよびデータベース内の組織ユーザーを安全に管理 | | :white_check_mark:| :white_check_mark: | -| ユーザー管理および監査 | :white_check_mark: | :white_check_mark:| :white_check_mark: | -| データの転送時および保管時の暗号化 | :white_check_mark: | | | -| 顧客が管理する暗号化キーを安全に扱う | | :white_check_mark:| :white_check_mark: | -| 冗長インフラを提供 | :white_check_mark: | | :white_check_mark: | -| データのバックアップ | :white_check_mark: | :white_check_mark:| :white_check_mark: | -| バックアップ復旧能力を検証 | :white_check_mark: | :white_check_mark:| :white_check_mark: | -| データ保持設定を実施 | | :white_check_mark:| :white_check_mark: | -| セキュリティ構成管理 | :white_check_mark: | | :white_check_mark: | -| ソフトウェアとインフラの脆弱性修正 | :white_check_mark: | | | -| ペネトレーションテストを実施 | :white_check_mark: | | | -| 脅威検出および対応 | :white_check_mark: | | :white_check_mark: | -| セキュリティインシデント対応 | :white_check_mark: | | :white_check_mark: | - -## ClickHouse Cloud 設定可能なセキュリティ機能 {#clickhouse-cloud-configurable-security-features} - -
- ネットワーク接続 - - | 設定 | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|--------------------| - | [IP フィルター](/cloud/security/setting-ip-filters) でサービスへの接続を制限 | 利用可能 | AWS, GCP, Azure | すべて | - | [プライベートリンク](/cloud/security/private-link-overview) でサービスに安全に接続 | 利用可能 | AWS, GCP, Azure | Scale または Enterprise | - -
-
- アクセス管理 - - | 設定 | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|----------------------| - | [標準のロールベースのアクセス](/cloud/security/cloud-access-management) でコントロールプレーン | 利用可能 | AWS, GCP, Azure | すべて | - | [多要素認証 (MFA)](/cloud/security/cloud-authentication#multi-factor-authentication) 利用可能 | 利用可能 | AWS, GCP, Azure | すべて | - | コントロールプレーンへの [SAML シングルサインオン](/cloud/security/saml-setup) 利用可能 | プレビュー | AWS, GCP, Azure | Enterprise | - | データベース内の詳細な [ロールベースアクセス制御](/cloud/security/cloud-access-management/overview#database-permissions) | 利用可能 | AWS, GCP, Azure | すべて | - -
-
- データセキュリティ - - | 設定 | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|----------------------| - | [クラウドプロバイダーとリージョン](/cloud/reference/supported-regions) の選択 | 利用可能 | AWS, GCP, Azure | すべて | - | 限定された [毎日の無料バックアップ](/cloud/manage/backups/overview#default-backup-policy) | 利用可能 | AWS, GCP, Azure | すべて | - | 利用可能な [カスタムバックアップ構成](/cloud/manage/backups/overview#configurable-backups) | 利用可能 | GCP, AWS, Azure | Scale または Enterprise | - | [顧客管理の暗号化キー (CMEK)](/cloud/security/cmek) で透過的なデータ暗号化 | 利用可能 | AWS, GCP | Enterprise | - | [フィールドレベルの暗号化](/sql-reference/functions/encryption-functions) と手動キー管理 | 利用可能 | GCP, AWS, Azure | すべて | - -
-
- データ保持 - - | 設定 | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|----------------------| - | [有効期限 (TTL)](/sql-reference/statements/alter/ttl) 設定で保持を管理 | 利用可能 | AWS, GCP, Azure | すべて | - | [ALTER TABLE DELETE](/sql-reference/statements/alter/delete) 重い削除アクション用 | 利用可能 | AWS, GCP, Azure | すべて | - | [ライトウェイト DELETE](/sql-reference/statements/delete) 測定された削除活動用 | 利用可能 | AWS, GCP, Azure | すべて | - -
-
- 監査とログ - - | 設定 | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|----------------------| - | [監査ログ](/cloud/security/audit-logging) コントロールプレーン活動用 | 利用可能 | AWS, GCP, Azure | すべて | - | [セッションログ](/operations/system-tables/session_log) データベース活動用 | 利用可能 | AWS, GCP, Azure | すべて | - | [クエリログ](/operations/system-tables/query_log) データベース活動用 | 利用可能 | AWS, GCP, Azure | すべて | - -
- -## ClickHouse Cloud コンプライアンス {#clickhouse-cloud-compliance} - - | フレームワーク | ステータス | クラウド | サービスレベル | - |--------------------------------------------------------------------------------------------------|-----------|---------------------|----------------------| - | ISO 27001 コンプライアンス | 利用可能 | AWS, GCP, Azure | すべて | - | SOC 2 Type II コンプライアンス | 利用可能 | AWS, GCP, Azure | すべて | - | GDPR および CCPA コンプライアンス | 利用可能 | AWS, GCP, Azure | すべて | - | HIPAA コンプライアンス | 利用可能 | AWS, GCP | Enterprise | - | PCI コンプライアンス | 利用可能 | AWS | Enterprise | - - サポートされているコンプライアンスフレームワークの詳細については、[セキュリティとコンプライアンス](/cloud/security/security-and-compliance) ページをご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md.hash deleted file mode 100644 index ed391022165..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/security/shared-responsibility-model.md.hash +++ /dev/null @@ -1 +0,0 @@ -e5dce4c2f9cd4b61 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md deleted file mode 100644 index 277f86c7c5c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -sidebar_label: 'クラウドサポート' -title: 'クラウドサポート' -slug: '/cloud/support' -description: 'クラウドサポートについて学ぶ' -hide_title: true ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/about-us/support.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md.hash deleted file mode 100644 index 2f250785b47..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/support.md.hash +++ /dev/null @@ -1 +0,0 @@ -92402ef81da4e7c5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md deleted file mode 100644 index 1190a335108..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -sidebar_label: '用語集' -description: 'このページには、ClickHouseに関する一般的に使用される用語やフレーズのリストが含まれています。' -title: '用語集' -slug: '/concepts/glossary' ---- - - - - -# 用語集 - -## Atomicity {#atomicity} - -Atomicityは、トランザクション(データベース操作の一連のシリーズ)が単一の不可分な単位として扱われることを保証します。つまり、トランザクション内の全ての操作が実行されるか、何も実行されないかのどちらかです。原子的なトランザクションの例は、一つの銀行口座から別の銀行口座にお金を移動することです。移動のどちらかのステップが失敗すると、トランザクションも失敗し、お金は最初の口座に残ります。Atomicityは、いかなるお金も失われたり作成されたりしないことを保証します。 - -## Cluster {#cluster} - -データの保存と処理を一緒に行うノード(サーバー)の集合。 - -## CMEK {#cmek} - -顧客が管理する暗号化キー(CMEK)は、顧客がキー管理サービス(KMS)キーを使用してClickHouseのディスクデータキーを暗号化し、静止データを保護することを可能にします。 - -## Dictionary {#dictionary} - -Dictionaryは、さまざまな種類の参照リストに便利なキーと値のペアのマッピングです。これは、クエリ内でDictionaryを効率的に使用することを可能にする強力な機能であり、参照テーブルとの`JOIN`を使用するよりも効率的であることがよくあります。 - -## Parts {#parts} - -テーブルのデータの一部を保存するディスク上の物理ファイルです。これは、パーティションとは異なり、パーティションキーを使用して作成されたテーブルデータの論理的な分割です。 - -## Replica {#replica} - -ClickHouseデータベースに保存されているデータのコピー。冗長性と信頼性のために、同じデータのレプリカを任意の数だけ持つことができます。レプリカは、ClickHouseが異なるサーバー間でデータの複数のコピーを同期させることを可能にするReplicatedMergeTreeテーブルエンジンとともに使用されます。 - -## Shard {#shard} - -データのサブセット。ClickHouseは、お客様のデータに対して常に少なくとも1つのシャードを持ちます。データを複数のサーバーに分割しない場合、データは1つのシャードに保存されます。データを複数のサーバーにシャードすることは、単一のサーバーの容量を超えた場合に負荷を分散するために使用されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md.hash deleted file mode 100644 index ed775b1fd6b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/glossary.md.hash +++ /dev/null @@ -1 +0,0 @@ -da2b7d0613356ba7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md deleted file mode 100644 index 8c860b15b79..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: '概念' -slug: '/concepts' -description: '概念のランディングページ' -pagination_next: null -pagination_prev: null ---- - - - -このドキュメントのセクションでは、ClickHouseを非常に高速かつ効率的にしている概念について掘り下げていきます。 - -| ページ | 説明 | -|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------| -| [ClickHouseがこれほど速い理由は?](./why-clickhouse-is-so-fast.md) | ClickHouseがこれほど速い理由を学びましょう。 | -| [OLAPとは?](./olap.md) | オンライン分析処理(OLAP)について学びましょう。 | -| [ClickHouseのユニークな点は?](../about-us/distinctive-features.md) | ClickHouseのユニークさについて学びましょう。 | -| [用語集](./glossary.md) | このページには、ドキュメント全体で一般的に出会う用語の用語集が含まれています。 | -| [FAQ](../faq/index.md) | ClickHouseに関してよく寄せられる質問の集約です。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md.hash deleted file mode 100644 index 36e98137932..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -092ce0dbd32a76e2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md deleted file mode 100644 index be4464bb3ab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_position: 2 -sidebar_label: 'OLAPとは何ですか?' -description: 'OLAPとは、Online Analytical Processing の略で、技術的およびビジネスの観点から見ることができる広範な用語です。' -title: 'OLAPとは何ですか?' -slug: '/concepts/olap' ---- - - - - -# OLAPとは? - -[OLAP](https://en.wikipedia.org/wiki/Online_analytical_processing)はオンライン分析処理の略です。これは技術的およびビジネスの2つの観点から見ることができる広範な用語です。最も高いレベルでは、これらの言葉を逆に読むことができます: - -**処理** いくつかのソースデータが処理されます… - -**分析的** …これらを分析的なレポートや洞察を生み出すために… - -**オンライン** …リアルタイムで。 - -## ビジネスの観点から見たOLAP {#olap-from-the-business-perspective} - -近年、ビジネス関係者はデータの価値に気付き始めました。盲目的に意思決定をする企業は、競争についていけずに失敗することが多いです。成功した企業のデータ主導のアプローチは、ビジネス決定に役立つ可能性のあるすべてのデータを収集することを強制し、タイムリーにこのデータを分析するためのメカニズムを必要とします。ここでOLAPデータベース管理システム(DBMS)が登場します。 - -ビジネス的に言えば、OLAPは企業が継続的に業務活動を計画、分析、報告することを可能にし、効率を最大化し、経費を削減し、最終的には市場シェアを獲得することを目指します。これは、社内システムで行うか、SaaSプロバイダ(ウェブ/モバイル分析サービス、CRMサービスなど)にアウトソースするかのいずれかです。OLAPは多くのBIアプリケーション(ビジネスインテリジェンス)の背後にある技術です。 - -ClickHouseは、ドメイン特有のデータを分析するためのこれらのSaaSソリューションのバックエンドとして頻繁に使用されるOLAPデータベース管理システムです。しかし、いくつかの企業は未だに第三者プロバイダとのデータ共有に消極的であり、したがって社内データウェアハウスのシナリオも実行可能です。 - -## 技術的観点から見たOLAP {#olap-from-the-technical-perspective} - -すべてのデータベース管理システムは、OLAP(オンライン **分析的** 処理)とOLTP(オンライン **トランザクション** 処理)の2つのグループに分類することができます。前者は、大量の履歴データに基づいてレポートを構築することに焦点を当てていますが、それをあまり頻繁には行いません。後者は通常、トランザクションの連続ストリームを処理し、データの現在の状態を常に変更します。 - -実際には、OLAPとOLTPは二項対立としては見られず、むしろスペクトルのように捉えられています。ほとんどの実際のシステムは通常、それらの一方に焦点を当てますが、もう一方のワークロードが必要な場合には何らかのソリューションやワークアラウンドを提供します。この状況はしばしば企業が統合された複数のストレージシステムを運用することを強いられます。これ自体はそれほど大きな問題ではありませんが、システムが多くなるとメンテナンスコストが増加します。そのため、近年のトレンドはHTAP(**ハイブリッドトランザクショナル/分析処理**)に向かっており、両方の種類のワークロードが単一のデータベース管理システムによって同等にうまく処理されることを目指しています。 - -DBMSが純粋なOLAPまたは純粋なOLTPとして始まった場合でも、競争に追いつくためにHTAPの方向に移行せざるを得ません。ClickHouseも例外ではありません。初めて設計されたのは[できる限り高速なOLAPシステム](/concepts/why-clickhouse-is-so-fast)であり、現在でも完全なトランザクションサポートはありませんが、一貫した読み書きやデータの更新/削除のための変更といったいくつかの機能が追加されています。 - -OLAPシステムとOLTPシステムの間の基本的なトレードオフは次の通りです: - -- 分析レポートを効率的に構築するには、カラムを別々に読み取ることが重要であり、そのためほとんどのOLAPデータベースは[列指向](https://clickhouse.com/engineering-resources/what-is-columnar-database)です。 -- 一方で、カラムを別々に保存すると、行の追加やインプレースの変更など、行に対する操作のコストがカラムの数に比例して増加します(システムが万が一に備えてイベントのすべての詳細を収集しようとする場合、大きな数になることがあります)。したがって、ほとんどのOLTPシステムはデータを行単位で整理して保存します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md.hash deleted file mode 100644 index 53c81401607..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/olap.md.hash +++ /dev/null @@ -1 +0,0 @@ -4d55e9d013ce211e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md deleted file mode 100644 index 52ff14927fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: 'ClickHouse はなぜ高速なのか?' -description: 'それは速さを目指して設計されました。クエリの実行パフォーマンスは常に開発プロセスの中で最優先事項でしたが、使いやすさ、拡張性、セキュリティなどの他の重要な特性も考慮され、ClickHouse - が実際のプロダクションシステムになることができました。' -title: 'ClickHouse はなぜ高速なのか?' -slug: '/concepts/why-clickhouse-is-so-fast' ---- - - - - -# Why is ClickHouse so fast? {#why-clickhouse-is-so-fast} - -多くの他の要因が、データベースのパフォーマンスに寄与していますが、[そのデータの向き](/intro#row-oriented-vs-column-oriented-storage) もその一つです。次に、ClickHouseが特に他の列指向データベースと比較した場合に非常に速い理由について詳しく説明します。 - -アーキテクチャの観点から、データベースは(少なくとも)ストレージ層とクエリ処理層で構成されています。ストレージ層はテーブルデータの保存、読み込み、管理を担当し、クエリ処理層はユーザークエリを実行します。他のデータベースと比較して、ClickHouseは両方の層で革新を提供しており、非常に速い挿入とSELECTクエリを可能にしています。 - -## Storage Layer: Concurrent inserts are isolated from each other {#storage-layer-concurrent-inserts-are-isolated-from-each-other} - - - -ClickHouseでは、各テーブルは複数の「テーブルパーツ」で構成されています。ユーザーがデータをテーブルに挿入するたびに(INSERT文)、[パート](/parts) が作成されます。クエリは常に、クエリが開始する時点で存在するすべてのテーブルパーツに対して実行されます。 - -あまり多くのパーツが蓄積しないように、ClickHouseはバックグラウンドで[マージ](/merges) 操作を実行し、複数の小さなパーツを単一の大きなパーツに継続的に結合します。 - -このアプローチにはいくつかの利点があります。すべてのデータ処理を[バックグラウンドパートマージにオフロード](/concepts/why-clickhouse-is-so-fast#storage-layer-merge-time-computation) できるため、データの書き込みが軽量で非常に効率的になります。個々のインサートは、「ローカル」なものであり、グローバル、すなわちテーブルごとのデータ構造を更新する必要がありません。その結果、複数の同時挿入は相互同期や既存のテーブルデータとの同期を必要とせず、挿入はほぼディスクI/Oの速度で実行できます。 - - VLDB論文の包括的なパフォーマンス最適化セクション。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[ディスク上フォーマット](/academic_overview#3-1-on-disk-format)セクションで詳しく述べています。 - -## Storage Layer: Concurrent inserts and selects are isolated {#storage-layer-concurrent-inserts-and-selects-are-isolated} - - - -挿入はSELECTクエリから完全に隔離されており、挿入されたデータパーツのマージは、同時クエリに影響を与えることなくバックグラウンドで行われます。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[ストレージ層](/academic_overview#3-storage-layer)セクションで詳しく述べています。 - -## Storage Layer: Merge-time computation {#storage-layer-merge-time-computation} - - - -ClickHouseは、他のデータベースとは異なり、すべての追加データ変換をバックグラウンドプロセスである[マージ](/merges)中に行うことで、データの書き込みを軽量で効率的に保ちます。これには以下の例が含まれます: - -- **Replacing merges** は、入力パーツ内の行の最も最近のバージョンのみを保持し、他の行バージョンは破棄します。Replacing mergesは、マージ時のクリーニング操作と考えることができます。 - -- **Aggregating merges** は、入力部分の中間集計状態を新しい集計状態に結合します。これは理解するのが難しいように見えますが、実際には単に増分集計を実装しています。 - -- **TTL (time-to-live) merges** は、特定の時間ベースのルールに基づいて行を圧縮、移動、または削除します。 - -これらの変換の目的は、ユーザークエリが実行される時間からマージ時間へ作業(計算)を移すことです。これは次の2つの理由で重要です: - -一方では、ユーザークエリが「変換された」データ、例えば事前集約されたデータを利用できる場合、クエリが大幅に速くなる可能性があります。時には1000倍以上です。 - -他方では、マージのランタイムの大部分が入力パーツの読み込みと出力パーツの保存に消費されます。マージ中のデータ変換のための追加の努力は、通常、マージのランタイムにあまり影響しません。これらすべてのマジックは完全に透明であり、クエリの結果に影響を与えることはありません(性能を除いて)。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[マージ時間データ変換](/academic_overview#3-3-merge-time-data-transformation)セクションで詳しく述べています。 - -## Storage Layer: Data pruning {#storage-layer-data-pruning} - - - -実際には、多くのクエリが反復的であり、すなわち、変わらないか、わずかに変更して(例えば、異なるパラメータ値で)定期的に実行されます。同じまたは類似のクエリを何度も実行することで、インデックスを追加したり、頻繁なクエリがより速くアクセスできるようにデータを再整理したりできます。このアプローチは「データプルーニング」としても知られ、ClickHouseは以下の3つの技術を提供します: - -1. [主キーインデックス](/guides/best-practices/sparse-primary-indexes#clickhouse-index-design) は、テーブルデータのソート順を定義します。適切に選択された主キーは、上記のクエリのWHERE文のようなフィルタを、フルカラムスキャンの代わりに高速なバイナリサーチを使用して評価できます。より技術的な用語で言えば、スキャンのランタイムはデータサイズに対して線形ではなく対数になります。 - -2. [テーブルプロジェクション](/sql-reference/statements/alter/projection) は、異なる主キーでソートされた同じデータを保存するテーブルの内部バージョンとしての代替です。プロジェクションは、頻繁なフィルタ条件が1つ以上ある場合に便利です。 - -3. [スキッピングインデックス](/optimize/skipping-indexes) は、カラム内に追加のデータ統計を埋め込むもので、例えば最小および最大のカラム値、一意な値のセットなどがあります。スキッピングインデックスは主キーおよびテーブルプロジェクションとは直交しており、カラム内のデータ分布によっては、フィルタの評価を大幅に高速化できます。 - -これら3つの技術の目的は、フルカラムリード中にできるだけ多くの行をスキップすることであり、データを読み込む最も速い方法は、データをまったく読み込まないことです。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[データプルーニング](/academic_overview#3-2-data-pruning)セクションで詳しく述べています。 - -## Storage Layer: Data compression {#storage-layer-data-compression} - - - -また、ClickHouseのストレージ層は、異なるコーデックを使用して生のテーブルデータを追加的に(かつオプションで)圧縮します。 - -列ストアは、そのタイプとデータ分布が同じ値が一緒に配置されるため、このような圧縮に特に適しています。 - -ユーザーは、[指定](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) することができ、カラムはさまざまな一般的な圧縮アルゴリズム(例:ZSTD)や、浮動小数点値用のGorillaやFPC、整数値用のDeltaやGCD、さらにはAESの暗号化コーデックを使用して圧縮されます。 - -データ圧縮は、データベーステーブルのストレージサイズを減少させるだけでなく、多くの場合、ローカルディスクやネットワークI/Oのスループットが低いため、クエリのパフォーマンスも向上させます。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[ディスク上フォーマット](/academic_overview#3-1-on-disk-format)セクションで詳しく述べています。 - -## State-of-the-art query processing layer {#state-of-the-art-query-processing-layer} - - - -最後に、ClickHouseはベクトル化されたクエリ処理層を使用しており、クエリの実行を可能な限り並列化して、すべてのリソースを最大の速度と効率のために利用しています。 - -「ベクトル化」とは、クエリプランオペレーターが単一の行ではなく、バッチで中間結果行を渡すことを意味します。これにより、CPUキャッシュの利用が改善され、オペレーターは数値を同時に処理するためにSIMD命令を適用できます。実際、多くのオペレーターは、各SIMD命令セット世代ごとに1つのバージョンを持っています。ClickHouseは、実行されているハードウェアの能力に基づいて、最も最近で最速のバージョンを自動的に選択します。 - -現代のシステムには数十のCPUコアがあります。すべてのコアを利用するために、ClickHouseはクエリプランを複数のレーンに展開します。通常、1つのコアにつき1つのレーンです。各レーンはテーブルデータの不重複範囲を処理します。こうすることで、データベースのパフォーマンスは利用可能なコアの数に「垂直」にスケールします。 - -もし単一ノードがテーブルデータを保持するには小さすぎる場合、さらにノードを追加してクラスターを形成できます。テーブルは分割(「シャード」)でき、ノード間で分散されます。ClickHouseはテーブルデータを保存するすべてのノードでクエリを実行し、利用可能なノードの数に「水平」にスケールします。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[クエリ処理層](/academic_overview#4-query-processing-layer)セクションで詳しく述べています。 - -## Meticulous attention to detail {#meticulous-attention-to-detail} - - - -> **"ClickHouseは異常なシステムです - あなたたちは20種類のハッシュテーブルを持っています。あなたたちはほとんどのシステムが持つことのない、すべての素晴らしいものを持っています** **...** **ClickHouseの素晴らしいパフォーマンスは、すべてのこれらの専門的なコンポーネントによるものです"** [Andy Pavlo, Database Professor at CMU](https://www.youtube.com/watch?v=Vy2t_wZx4Is&t=3579s) - -ClickHouseを[特徴付ける](https://www.youtube.com/watch?v=CAS2otEoerM)のは、低レベルの最適化に対する綿密な注意です。単に動作するデータベースを構築することは一つのことですが、多様なクエリタイプ、データ構造、分布、およびインデックス構成にわたって速度を提供するようにエンジニアリングすることこそが、「[異常なシステム](https://youtu.be/Vy2t_wZx4Is?si=K7MyzsBBxgmGcuGU&t=3579)」の芸術が輝くところです。 - -**ハッシュテーブル。** ハッシュテーブルを例に取ってみましょう。ハッシュテーブルは、ジョインや集約で使用される中心的なデータ構造です。プログラマーとして、次のような設計決定を考慮する必要があります: - -* 選択するハッシュ関数、 -* 衝突解決: [オープンアドレッシング](https://en.wikipedia.org/wiki/Open_addressing) または [チェイニング](https://en.wikipedia.org/wiki/Hash_table#Separate_chaining)、 -* メモリレイアウト:キーと値のための1つの配列または別々の配列? -* フィルファクター:いつ、どのようにサイズを変更すべきか?サイズ変更中に値をどのように移動させるか? -* 削除:ハッシュテーブルはエントリを排除することを許可すべきか? - -サードパーティライブラリによって提供された標準的なハッシュテーブルは機能的には動作しますが、高速ではありません。優れたパフォーマンスを発揮するには、綿密なベンチマークテストと実験が必要です。 - -[ClickHouseのハッシュテーブルの実装](https://clickhouse.com/blog/hash-tables-in-clickhouse-and-zero-cost-abstractions) は、クエリとデータの特性に基づいて、 **30以上のあらかじめコンパイルされたハッシュテーブルのバリアント** の中から1つを選択します。 - -**アルゴリズム。** アルゴリズムも同様です。たとえば、ソートに関して考慮すべきことは: - -* 何をソートするのか:数値、タプル、文字列、または構造体? -* データはRAMに存在するか? -* ソートは安定している必要があるか? -* すべてのデータをソートする必要があるのか、それとも部分的なソートで十分か? - -データ特性に依存するアルゴリズムは、一般的なアルゴリズムよりも優れたパフォーマンスを発揮することがよくあります。データ特性が事前に分からない場合、システムはさまざまな実装を試して、その時点で最も効果的なものを選択できます。例として、[ClickHouseにおけるLZ4デコンプレッションの実装についての論文](https://habr.com/en/company/yandex/blog/457612/)を参照してください。 - -🤿 これは、私たちのVLDB 2024論文のウェブ版の[包括的なパフォーマンス最適化](/academic_overview#4-4-holistic-performance-optimization)セクションで詳しく述べています。 - -## VLDB 2024 paper {#vldb-2024-paper} - -2024年8月、私たちは初めての研究論文がVLDBに受理され、公開されました。 -VLDBは非常に大規模なデータベースに関する国際会議であり、データ管理の分野でリーディングカンファレンスの一つと広く見なされています。 -数百件の投稿の中から、VLDBは一般的に約20%の受理率を持っています。 - -論文の[PDF](https://www.vldb.org/pvldb/vol17/p3731-schulze.pdf)や、ClickHouseの最も興味深いアーキテクチャやシステム設計コンポーネントを簡潔に説明する[ウェブ版](/academic_overview)を読むことができます。 - -私たちのCTOでありClickHouseの創設者であるAlexey Milovidovが論文を発表しました(スライドは[こちら](https://raw.githubusercontent.com/ClickHouse/clickhouse-presentations/master/2024-vldb/VLDB_2024_presentation.pdf))その後、Q&Aが行われました(すぐに時間切れになりました!)。 -録画されたプレゼンテーションはこちらで確認できます: - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md.hash deleted file mode 100644 index a1f1bbd2712..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/concepts/why-clickhouse-is-so-fast.md.hash +++ /dev/null @@ -1 +0,0 @@ -4283c6ca5e499dbd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md deleted file mode 100644 index cafb2b0af80..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md +++ /dev/null @@ -1,445 +0,0 @@ ---- -slug: '/data-compression/compression-in-clickhouse' -title: 'ClickHouseにおける圧縮' -description: 'ClickHouseの圧縮アルゴリズムの選択' -keywords: -- 'compression' -- 'codec' -- 'encoding' ---- - - - -One of the secrets to ClickHouse query performance is compression. - -Less data on disk means less I/O and faster queries and inserts. The overhead of any compression algorithm with respect to CPU is in most cases outweighed by the reduction in I/O. Improving the compression of the data should therefore be the first focus when working on ensuring ClickHouse queries are fast. - -> For why ClickHouse compresses data so well, we recommended [this article](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema). In summary, as a column-oriented database, values will be written in column order. If these values are sorted, the same values will be adjacent to each other. Compression algorithms exploit contiguous patterns of data. On top of this, ClickHouse has codecs and granular data types which allow users to tune the compression techniques further. - -Compression in ClickHouse will be impacted by 3 principal factors: -- The ordering key -- The data types -- Which codecs are used - -All of these are configured through the schema. - -## Choose the right data type to optimize compression {#choose-the-right-data-type-to-optimize-compression} - -Let's use the Stack Overflow dataset as an example. Let's compare compression statistics for the following schemas for the `posts` table: - -- `posts` - A non type optimized schema with no ordering key. -- `posts_v3` - A type optimized schema with the appropriate type and bit size for each column with ordering key `(PostTypeId, toDate(CreationDate), CommentCount)`. - -Using the following queries, we can measure the current compressed and uncompressed size of each column. Let's examine the size of the initial optimized schema `posts` with no ordering key. - -```sql -SELECT name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE table = 'posts' -GROUP BY name - -┌─name──────────────────┬─compressed_size─┬─uncompressed_size─┬───ratio────┐ -│ Body │ 46.14 GiB │ 127.31 GiB │ 2.76 │ -│ Title │ 1.20 GiB │ 2.63 GiB │ 2.19 │ -│ Score │ 84.77 MiB │ 736.45 MiB │ 8.69 │ -│ Tags │ 475.56 MiB │ 1.40 GiB │ 3.02 │ -│ ParentId │ 210.91 MiB │ 696.20 MiB │ 3.3 │ -│ Id │ 111.17 MiB │ 736.45 MiB │ 6.62 │ -│ AcceptedAnswerId │ 81.55 MiB │ 736.45 MiB │ 9.03 │ -│ ClosedDate │ 13.99 MiB │ 517.82 MiB │ 37.02 │ -│ LastActivityDate │ 489.84 MiB │ 964.64 MiB │ 1.97 │ -│ CommentCount │ 37.62 MiB │ 565.30 MiB │ 15.03 │ -│ OwnerUserId │ 368.98 MiB │ 736.45 MiB │ 2 │ -│ AnswerCount │ 21.82 MiB │ 622.35 MiB │ 28.53 │ -│ FavoriteCount │ 280.95 KiB │ 508.40 MiB │ 1853.02 │ -│ ViewCount │ 95.77 MiB │ 736.45 MiB │ 7.69 │ -│ LastEditorUserId │ 179.47 MiB │ 736.45 MiB │ 4.1 │ -│ ContentLicense │ 5.45 MiB │ 847.92 MiB │ 155.5 │ -│ OwnerDisplayName │ 14.30 MiB │ 142.58 MiB │ 9.97 │ -│ PostTypeId │ 20.93 MiB │ 565.30 MiB │ 27 │ -│ CreationDate │ 314.17 MiB │ 964.64 MiB │ 3.07 │ -│ LastEditDate │ 346.32 MiB │ 964.64 MiB │ 2.79 │ -│ LastEditorDisplayName │ 5.46 MiB │ 124.25 MiB │ 22.75 │ -│ CommunityOwnedDate │ 2.21 MiB │ 509.60 MiB │ 230.94 │ -└───────────────────────┴─────────────────┴───────────────────┴────────────┘ -``` - -We show both a compressed and uncompressed size here. Both are important. The compressed size equates to what we will need to read off disk - something we want to minimize for query performance (and storage cost). This data will need to be decompressed prior to reading. The size of this uncompressed size will be dependent on the data type used in this case. Minimizing this size will reduce memory overhead of queries and the amount of data which has to be processed by the query, improving utilization of caches and ultimately query times. - -> The above query relies on the table `columns` in the system database. This database is managed by ClickHouse and is a treasure trove of useful information, from query performance metrics to background cluster logs. We recommend ["System Tables and a Window into the Internals of ClickHouse"](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables) and accompanying articles[[1]](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse)[[2]](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse) for the curious reader. - -To summarize the total size of the table, we can simplify the above query: - -```sql -SELECT formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE table = 'posts' - -┌─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ 50.16 GiB │ 143.47 GiB │ 2.86 │ -└─────────────────┴───────────────────┴───────┘ -``` - -Repeating this query for the `posts_v3`, the table with an optimized type and ordering key, we can see a significant reduction in uncompressed and compressed sizes. - -```sql -SELECT - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE `table` = 'posts_v3' - -┌─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ 25.15 GiB │ 68.87 GiB │ 2.74 │ -└─────────────────┴───────────────────┴───────┘ -``` - -The full column breakdown shows considerable savings for the `Body`, `Title`, `Tags` and `CreationDate` columns achieved by ordering the data prior to compression and using the appropriate types. - -```sql -SELECT - name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE `table` = 'posts_v3' -GROUP BY name - -┌─name──────────────────┬─compressed_size─┬─uncompressed_size─┬───ratio─┐ -│ Body │ 23.10 GiB │ 63.63 GiB │ 2.75 │ -│ Title │ 614.65 MiB │ 1.28 GiB │ 2.14 │ -│ Score │ 40.28 MiB │ 227.38 MiB │ 5.65 │ -│ Tags │ 234.05 MiB │ 688.49 MiB │ 2.94 │ -│ ParentId │ 107.78 MiB │ 321.33 MiB │ 2.98 │ -│ Id │ 159.70 MiB │ 227.38 MiB │ 1.42 │ -│ AcceptedAnswerId │ 40.34 MiB │ 227.38 MiB │ 5.64 │ -│ ClosedDate │ 5.93 MiB │ 9.49 MiB │ 1.6 │ -│ LastActivityDate │ 246.55 MiB │ 454.76 MiB │ 1.84 │ -│ CommentCount │ 635.78 KiB │ 56.84 MiB │ 91.55 │ -│ OwnerUserId │ 183.86 MiB │ 227.38 MiB │ 1.24 │ -│ AnswerCount │ 9.67 MiB │ 113.69 MiB │ 11.76 │ -│ FavoriteCount │ 19.77 KiB │ 147.32 KiB │ 7.45 │ -│ ViewCount │ 45.04 MiB │ 227.38 MiB │ 5.05 │ -│ LastEditorUserId │ 86.25 MiB │ 227.38 MiB │ 2.64 │ -│ ContentLicense │ 2.17 MiB │ 57.10 MiB │ 26.37 │ -│ OwnerDisplayName │ 5.95 MiB │ 16.19 MiB │ 2.72 │ -│ PostTypeId │ 39.49 KiB │ 56.84 MiB │ 1474.01 │ -│ CreationDate │ 181.23 MiB │ 454.76 MiB │ 2.51 │ -│ LastEditDate │ 134.07 MiB │ 454.76 MiB │ 3.39 │ -│ LastEditorDisplayName │ 2.15 MiB │ 6.25 MiB │ 2.91 │ -│ CommunityOwnedDate │ 824.60 KiB │ 1.34 MiB │ 1.66 │ -└───────────────────────┴─────────────────┴───────────────────┴─────────┘ -``` - -## Choosing the right column compression codec {#choosing-the-right-column-compression-codec} - -With column compression codecs, we can change the algorithm (and its settings) used to encode and compress each column. - -Encodings and compression work slightly differently with the same objective: to reduce our data size. Encodings apply a mapping to our data, transforming the values based on a function by exploiting properties of the data type. Conversely, compression uses a generic algorithm to compress data at a byte level. - -Typically, encodings are applied first before compression is used. Since different encodings and compression algorithms are effective on different value distributions, we must understand our data. - -ClickHouse supports a large number of codecs and compression algorithms. The following are some recommendations in order of importance: - -Recommendation | Reasoning ---- | --- -**`ZSTD` all the way** | `ZSTD` compression offers the best rates of compression. `ZSTD(1)` should be the default for most common types. Higher rates of compression can be tried by modifying the numeric value. We rarely see sufficient benefits on values higher than 3 for the increased cost of compression (slower insertion). -**`Delta` for date and integer sequences** | `Delta`-based codecs work well whenever you have monotonic sequences or small deltas in consecutive values. More specifically, the Delta codec works well, provided the derivatives yield small numbers. If not, `DoubleDelta` is worth trying (this typically adds little if the first-level derivative from `Delta` is already very small). Sequences where the monotonic increment is uniform, will compress even better e.g. DateTime fields. -**`Delta` improves `ZSTD`** | `ZSTD` is an effective codec on delta data - conversely, delta encoding can improve `ZSTD` compression. In the presence of `ZSTD`, other codecs rarely offer further improvement. -**`LZ4` over `ZSTD` if possible** | if you get comparable compression between `LZ4` and `ZSTD`, favor the former since it offers faster decompression and needs less CPU. However, `ZSTD` will outperform `LZ4` by a significant margin in most cases. Some of these codecs may work faster in combination with `LZ4` while providing similar compression compared to `ZSTD` without a codec. This will be data specific, however, and requires testing. -**`T64` for sparse or small ranges** | `T64` can be effective on sparse data or when the range in a block is small. Avoid `T64` for random numbers. -**`Gorilla` and `T64` for unknown patterns?** | If the data has an unknown pattern, it may be worth trying `Gorilla` and `T64`. -**`Gorilla` for gauge data** | `Gorilla` can be effective on floating point data, specifically that which represents gauge readings, i.e. random spikes. - -See [here](/sql-reference/statements/create/table#column_compression_codec) for further options. - -Below we specify the `Delta` codec for the `Id`, `ViewCount` and `AnswerCount`, hypothesizing these will be linearly correlated with the ordering key and thus should benefit from Delta encoding. - -```sql -CREATE TABLE posts_v4 -( - `Id` Int32 CODEC(Delta, ZSTD), - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta, ZSTD), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC'), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta, ZSTD), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC') -) -ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -``` - -The compression improvements for these columns is shown below: - -```sql -SELECT - `table`, - name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE (name IN ('Id', 'ViewCount', 'AnswerCount')) AND (`table` IN ('posts_v3', 'posts_v4')) -GROUP BY - `table`, - name -ORDER BY - name ASC, - `table` ASC - -┌─table────┬─name────────┬─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ posts_v3 │ AnswerCount │ 9.67 MiB │ 113.69 MiB │ 11.76 │ -│ posts_v4 │ AnswerCount │ 10.39 MiB │ 111.31 MiB │ 10.71 │ -│ posts_v3 │ Id │ 159.70 MiB │ 227.38 MiB │ 1.42 │ -│ posts_v4 │ Id │ 64.91 MiB │ 222.63 MiB │ 3.43 │ -│ posts_v3 │ ViewCount │ 45.04 MiB │ 227.38 MiB │ 5.05 │ -│ posts_v4 │ ViewCount │ 52.72 MiB │ 222.63 MiB │ 4.22 │ -└──────────┴─────────────┴─────────────────┴───────────────────┴───────┘ - -6 rows in set. Elapsed: 0.008 sec -``` - -### Compression in ClickHouse Cloud {#compression-in-clickhouse-cloud} - -In ClickHouse Cloud, we utilize the `ZSTD` compression algorithm (with a default value of 1) by default. While compression speeds can vary for this algorithm, depending on the compression level (higher = slower), it has the advantage of being consistently fast on decompression (around 20% variance) and also benefiting from the ability to be parallelized. Our historical tests also suggest that this algorithm is often sufficiently effective and can even outperform `LZ4` combined with a codec. It is effective on most data types and information distributions, and is thus a sensible general-purpose default and why our initial earlier compression is already excellent even without optimization. - ---- - -ClickHouseのクエリパフォーマンスの秘密の一つは圧縮です。 - -ディスク上のデータが少ないほど、I/Oが少なくなり、クエリや挿入が速くなります。ほとんどの場合、CPUに関するいかなる圧縮アルゴリズムのオーバーヘッドも、I/Oの削減によって打ち消されます。したがって、ClickHouseのクエリを高速に保つために取り組む際には、データの圧縮を改善することがまず最初の焦点となるべきです。 - -> ClickHouseがデータを非常によく圧縮する理由については、[こちらの記事](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)をお勧めします。要約すると、列指向データベースとして、値は列の順序で書き込まれます。これらの値がソートされている場合、同じ値が隣接します。圧縮アルゴリズムは、連続的なデータパターンを利用します。さらに、ClickHouseには、ユーザーが圧縮技術をさらに調整できるコーデックと細分化されたデータ型があります。 - -ClickHouseの圧縮は次の3つの主要な要因に影響を受けます: -- 順序キー -- データ型 -- 使用されるコーデック - -これらすべては、スキーマを通じて構成されます。 - -## 圧縮を最適化するために適切なデータ型を選ぶ {#choose-the-right-data-type-to-optimize-compression} - -Stack Overflowのデータセットを例として使用しましょう。`posts`テーブルの次のスキーマの圧縮統計を比較してみましょう: - -- `posts` - 順序キーがない非型最適化スキーマ。 -- `posts_v3` - 各カラムに対して適切な型およびビットサイズを持ち、順序キー`(PostTypeId, toDate(CreationDate), CommentCount)`を持つ型最適化スキーマ。 - -次のクエリを使用して、各カラムの現在の圧縮されたサイズと圧縮されていないサイズを測定できます。順序キーがない最初の最適化スキーマ`posts`のサイズを確認しましょう。 - -```sql -SELECT name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE table = 'posts' -GROUP BY name - -┌─name──────────────────┬─compressed_size─┬─uncompressed_size─┬───ratio────┐ -│ Body │ 46.14 GiB │ 127.31 GiB │ 2.76 │ -│ Title │ 1.20 GiB │ 2.63 GiB │ 2.19 │ -│ Score │ 84.77 MiB │ 736.45 MiB │ 8.69 │ -│ Tags │ 475.56 MiB │ 1.40 GiB │ 3.02 │ -│ ParentId │ 210.91 MiB │ 696.20 MiB │ 3.3 │ -│ Id │ 111.17 MiB │ 736.45 MiB │ 6.62 │ -│ AcceptedAnswerId │ 81.55 MiB │ 736.45 MiB │ 9.03 │ -│ ClosedDate │ 13.99 MiB │ 517.82 MiB │ 37.02 │ -│ LastActivityDate │ 489.84 MiB │ 964.64 MiB │ 1.97 │ -│ CommentCount │ 37.62 MiB │ 565.30 MiB │ 15.03 │ -│ OwnerUserId │ 368.98 MiB │ 736.45 MiB │ 2 │ -│ AnswerCount │ 21.82 MiB │ 622.35 MiB │ 28.53 │ -│ FavoriteCount │ 280.95 KiB │ 508.40 MiB │ 1853.02 │ -│ ViewCount │ 95.77 MiB │ 736.45 MiB │ 7.69 │ -│ LastEditorUserId │ 179.47 MiB │ 736.45 MiB │ 4.1 │ -│ ContentLicense │ 5.45 MiB │ 847.92 MiB │ 155.5 │ -│ OwnerDisplayName │ 14.30 MiB │ 142.58 MiB │ 9.97 │ -│ PostTypeId │ 20.93 MiB │ 565.30 MiB │ 27 │ -│ CreationDate │ 314.17 MiB │ 964.64 MiB │ 3.07 │ -│ LastEditDate │ 346.32 MiB │ 964.64 MiB │ 2.79 │ -│ LastEditorDisplayName │ 5.46 MiB │ 124.25 MiB │ 22.75 │ -│ CommunityOwnedDate │ 2.21 MiB │ 509.60 MiB │ 230.94 │ -└───────────────────────┴─────────────────┴───────────────────┴────────────┘ -``` - -ここでは、圧縮されたサイズと圧縮されていないサイズの両方を示しています。両方共に重要です。圧縮サイズは、ディスクから読み取る必要があるサイズを表し、クエリパフォーマンス(およびストレージコスト)のために最小化したいものです。このデータは、読み取る前に解凍する必要があります。この圧縮されていないサイズは、使用されるデータ型に依存します。このサイズを最小化すると、クエリのメモリオーバーヘッドと、クエリによって処理される必要があるデータ量が減少し、キャッシュの利用が改善され、最終的にクエリの時間が短縮されます。 - -> 上記のクエリは、システムデータベース内の`columns`テーブルに依存しています。このデータベースはClickHouseによって管理されており、クエリパフォーマンスメトリックからバックグラウンドクラスターのログまで、有用な情報の宝庫です。興味がある読者には、["システムテーブルとClickHouse内部へのウィンドウ"](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables)とそれに伴う記事[[1]](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse)[[2]](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse)をお勧めします。 - -テーブルの総サイズを要約するために、上記のクエリを簡素化できます: - -```sql -SELECT formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE table = 'posts' - -┌─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ 50.16 GiB │ 143.47 GiB │ 2.86 │ -└─────────────────┴───────────────────┴───────┘ -``` - -このクエリを`posts_v3`、すなわち最適化された型と順序キーを持つテーブルに対して繰り返すと、圧縮されていないサイズと圧縮サイズが大幅に減少していることがわかります。 - -```sql -SELECT - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE `table` = 'posts_v3' - -┌─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ 25.15 GiB │ 68.87 GiB │ 2.74 │ -└─────────────────┴───────────────────┴───────┘ -``` - -完全なカラムの内訳は、圧縮前にデータを順序付けし、適切な型を使用することで達成された`Body`、`Title`、`Tags`、`CreationDate`カラムの大幅な節約を示しています。 - -```sql -SELECT - name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE `table` = 'posts_v3' -GROUP BY name - -┌─name──────────────────┬─compressed_size─┬─uncompressed_size─┬───ratio─┐ -│ Body │ 23.10 GiB │ 63.63 GiB │ 2.75 │ -│ Title │ 614.65 MiB │ 1.28 GiB │ 2.14 │ -│ Score │ 40.28 MiB │ 227.38 MiB │ 5.65 │ -│ Tags │ 234.05 MiB │ 688.49 MiB │ 2.94 │ -│ ParentId │ 107.78 MiB │ 321.33 MiB │ 2.98 │ -│ Id │ 159.70 MiB │ 227.38 MiB │ 1.42 │ -│ AcceptedAnswerId │ 40.34 MiB │ 227.38 MiB │ 5.64 │ -│ ClosedDate │ 5.93 MiB │ 9.49 MiB │ 1.6 │ -│ LastActivityDate │ 246.55 MiB │ 454.76 MiB │ 1.84 │ -│ CommentCount │ 635.78 KiB │ 56.84 MiB │ 91.55 │ -│ OwnerUserId │ 183.86 MiB │ 227.38 MiB │ 1.24 │ -│ AnswerCount │ 9.67 MiB │ 113.69 MiB │ 11.76 │ -│ FavoriteCount │ 19.77 KiB │ 147.32 KiB │ 7.45 │ -│ ViewCount │ 45.04 MiB │ 227.38 MiB │ 5.05 │ -│ LastEditorUserId │ 86.25 MiB │ 227.38 MiB │ 2.64 │ -│ ContentLicense │ 2.17 MiB │ 57.10 MiB │ 26.37 │ -│ OwnerDisplayName │ 5.95 MiB │ 16.19 MiB │ 2.72 │ -│ PostTypeId │ 39.49 KiB │ 56.84 MiB │ 1474.01 │ -│ CreationDate │ 181.23 MiB │ 454.76 MiB │ 2.51 │ -│ LastEditDate │ 134.07 MiB │ 454.76 MiB │ 3.39 │ -│ LastEditorDisplayName │ 2.15 MiB │ 6.25 MiB │ 2.91 │ -│ CommunityOwnedDate │ 824.60 KiB │ 1.34 MiB │ 1.66 │ -└───────────────────────┴─────────────────┴───────────────────┴─────────┘ -``` - -## 適切なカラム圧縮コーデックの選定 {#choosing-the-right-column-compression-codec} - -カラム圧縮コーデックを使用すると、各カラムのエンコードおよび圧縮に使用されるアルゴリズム(およびその設定)を変更できます。 - -エンコーディングと圧縮は、同じ目的のためにわずかに異なる方法で機能します:データサイズを削減することです。エンコーディングは、データ型の特性を利用して、関数に基づいて値を変換するマッピングをデータに適用します。対照的に、圧縮はバイトレベルでデータを圧縮するための一般的なアルゴリズムを使用します。 - -通常、エンコーディングは最初に適用され、その後に圧縮が使用されます。異なるエンコーディングおよび圧縮アルゴリズムは、異なる値分布に対して効果的であるため、データを理解する必要があります。 - -ClickHouseは多数のコーデックおよび圧縮アルゴリズムをサポートしています。以下はいくつかの推奨事項です。 - -Recommendation | Reasoning ---- | --- -**`ZSTD`を選ぶべき** | `ZSTD`圧縮は最良の圧縮率を提供します。最も一般的な型の場合、`ZSTD(1)`をデフォルトにするべきです。数値を変更して、より高い圧縮率を試してみることができます。圧縮コストが高く(挿入が遅くなる)、3を超える値での十分な利益は見られないことがほとんどです。 -**日付と整数のシーケンス用の`Delta`** | モノトニックシーケンスまたは連続値の小さなデルタがある場合、`Delta`-ベースのコーデックは効果的です。具体的には、デルタコーデックは導関数が小さい場合に適しています。そうでない場合、`DoubleDelta`を試してみる価値があります(これは通常、第1レベルの導関数が非常に小さい場合にはあまり影響しません)。モノトニック増加が均一であるシーケンスは、さらに圧縮されます(例:DateTimeフィールド)。 -**`Delta`は`ZSTD`を改善する** | `ZSTD`はデルタデータに対して効果的なコーデックです。逆に、デルタエンコーディングは`ZSTD`の圧縮を改善することができます。`ZSTD`が存在する場合、他のコーデックはほとんどさらなる改善を提供しません。 -**`LZ4`が利用可能な場合は`ZSTD`を選ぶ** | `LZ4`と`ZSTD`の間で同等の圧縮が得られる場合は、前者を選択してください。デコンプレッションが速く、CPUの使用が少なくて済むからです。ただし、通常のケースでは、`ZSTD`は`LZ4`を大きく上回ります。これらのコーデックの一部は、コーデックなしで`ZSTD`に対して同様の圧縮を提供しつつ`LZ4`と組み合わせてより高速に動作する可能性があります。ただし、これはデータ特有であり、テストが必要です。 -**スパースまたは小範囲用の`T64`** | `T64`はスパースデータやブロック内の範囲が小さい場合に効果的です。ランダム番号には`T64`を避けてください。 -**未知のパターン用の`Gorilla`および`T64`** | データに未知のパターンがある場合は、`Gorilla`および`T64`を試してみる価値があります。 -**ゲージデータ用の`Gorilla`** | `Gorilla`は浮動小数点データ、特にゲージ読み取りを示すデータ、すなわちランダムなスパイクに対して効果的です。 - -さらなるオプションについては[こちら](/sql-reference/statements/create/table#column_compression_codec)をご覧ください。 - -以下に、`Id`、`ViewCount`、および`AnswerCount`のために`Delta`コーデックを指定し、これらが順序キーと線形相関していると仮定し、したがってデルタエンコーディングの恩恵を受けるべきです。 - -```sql -CREATE TABLE posts_v4 -( - `Id` Int32 CODEC(Delta, ZSTD), - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta, ZSTD), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC'), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta, ZSTD), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC') -) -ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -``` - -これらのカラムに対する圧縮改善は以下の通りです: - -```sql -SELECT - `table`, - name, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE (name IN ('Id', 'ViewCount', 'AnswerCount')) AND (`table` IN ('posts_v3', 'posts_v4')) -GROUP BY - `table`, - name -ORDER BY - name ASC, - `table` ASC - -┌─table────┬─name────────┬─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ posts_v3 │ AnswerCount │ 9.67 MiB │ 113.69 MiB │ 11.76 │ -│ posts_v4 │ AnswerCount │ 10.39 MiB │ 111.31 MiB │ 10.71 │ -│ posts_v3 │ Id │ 159.70 MiB │ 227.38 MiB │ 1.42 │ -│ posts_v4 │ Id │ 64.91 MiB │ 222.63 MiB │ 3.43 │ -│ posts_v3 │ ViewCount │ 45.04 MiB │ 227.38 MiB │ 5.05 │ -│ posts_v4 │ ViewCount │ 52.72 MiB │ 222.63 MiB │ 4.22 │ -└──────────┴─────────────┴─────────────────┴───────────────────┴───────┘ - -6 rows in set. Elapsed: 0.008 sec -``` - -### ClickHouse Cloudでの圧縮 {#compression-in-clickhouse-cloud} - -ClickHouse Cloudでは、デフォルトで`ZSTD`圧縮アルゴリズム(デフォルト値1)を使用しています。このアルゴリズムの圧縮速度は、圧縮レベルによって変動します(高いほど遅くなります)が、デコンプレッション時に一貫して速いという利点があります(約20%の変動)し、並列化可能という利点もあります。当社の過去のテストでも、このアルゴリズムが非常に効果的であることが示されており、実際にはコーデックと組み合わせた`LZ4`を上回ることさえあります。これはほとんどのデータ型および情報分布に対して効果的であるため、合理的な汎用デフォルトであり、最初の圧縮が最適化なしでも優れている理由です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md.hash deleted file mode 100644 index 0996551b337..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-in-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -040d4b2d7ff26e4b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md deleted file mode 100644 index c88d6644751..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -slug: '/data-compression/compression-modes' -sidebar_position: 6 -title: '圧縮モード' -description: 'ClickHouseのカラム圧縮モード' -keywords: -- 'compression' -- 'codec' -- 'encoding' -- 'modes' ---- - -import CompressionBlock from '@site/static/images/data-compression/ch_compression_block.png'; -import Image from '@theme/IdealImage'; - - -# 圧縮モード - -ClickHouseプロトコルは、**データブロック**の圧縮をチェックサムと共にサポートしています。モードを選択する際に不明な場合は、`LZ4`を使用してください。 - -:::tip -利用可能な[カラム圧縮コーデック](/sql-reference/statements/create/table#column_compression_codec)について詳しく学び、テーブルを作成する際やその後に指定してください。 -::: - -## モード {#modes} - -| 値 | 名前 | 説明 | -|-------|--------------------|-------------------------------------------| -| `0x02` | [なし](#none-mode) | 圧縮なし、チェックサムのみ | -| `0x82` | LZ4 | 非常に高速で、良好な圧縮 | -| `0x90` | ZSTD | Zstandard、高速で、最適な圧縮 | - -LZ4とZSTDは同じ著者によって作成されていますが、異なるトレードオフがあります。 [Facebookのベンチマーク](https://facebook.github.io/zstd/#benchmarks)から: - -| 名前 | 比率 | エンコーディング | デコーディング | -|-------------------|-------|----------|-----------| -| **zstd** 1.4.5 -1 | 2.8 | 500 MB/s | 1660 MB/s | -| **lz4** 1.9.2 | 2.1 | 740 MB/s | 4530 MB/s | - -## ブロック {#block} - -| フィールド | 型 | 説明 | -|-----------------|---------|-------------------------------------------| -| checksum | uint128 | [ハッシュ](../native-protocol/hash.md) (ヘッダー + 圧縮データ) | -| raw_size | uint32 | ヘッダーなしの生データサイズ | -| data_size | uint32 | 非圧縮データサイズ | -| mode | byte | 圧縮モード | -| compressed_data | binary | 圧縮データのブロック | - -ClickHouse圧縮ブロック構造を示す図 - -ヘッダーは(raw_size + data_size + mode)であり、生サイズはlen(header + compressed_data)から構成されています。 - -チェックサムは`hash(header + compressed_data)`であり、[ClickHouse CityHash](../native-protocol/hash.md)を使用しています。 - -## なしモード {#none-mode} - -*なし*モードが使用されている場合、`compressed_data`はオリジナルデータと等しくなります。圧縮なしのモードは、チェックサムを使用して追加のデータ整合性を確保するために有用です。なぜなら、ハッシュingのオーバーヘッドは無視できるからです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md.hash deleted file mode 100644 index a716bb8c6cc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-compression/compression-modes.md.hash +++ /dev/null @@ -1 +0,0 @@ -80d6f3c06d227b7d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md deleted file mode 100644 index feca1eced21..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md +++ /dev/null @@ -1,621 +0,0 @@ ---- -slug: '/data-modeling/backfilling' -title: 'バックフィーリングデータ' -description: 'ClickHouse で大規模なデータセットをバックフィルする方法' -keywords: -- 'materialized views' -- 'backfilling' -- 'inserting data' -- 'resilient data load' ---- - -import nullTableMV from '@site/static/images/data-modeling/null_table_mv.png'; -import Image from '@theme/IdealImage'; - - - -# データのバックフィル - -ClickHouseに新しく触れているユーザーや、既存のデプロイメントを担当しているユーザーは、必然的に歴史的データでテーブルをバックフィルする必要があります。場合によっては、比較的シンプルですが、物理的なビューをポピュレートする必要がある場合は、より複雑になることがあります。このガイドでは、ユーザーが自分のユースケースに適用できるこのタスクのためのいくつかのプロセスをドキュメントしています。 - -:::note -このガイドは、ユーザーが[増分物理ビュー](/materialized-view/incremental-materialized-view)や、s3やgcsなどのテーブル関数を使用した[データのロード](/integrations/s3)の概念に既に慣れていることを前提としています。また、ユーザーが[オブジェクトストレージからの挿入パフォーマンスの最適化](/integrations/s3/performance)に関するガイドを読むことをお勧めしており、そのアドバイスはこのガイド全体の挿入に適用できます。 -::: -## 例データセット {#example-dataset} - -このガイドでは、PyPIデータセットを使用します。このデータセットの各行は、`pip`などのツールを使用したPythonパッケージのダウンロードを表します。 - -例えば、サブセットは単一の日 - `2024-12-17`をカバーしており、このデータは`https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/`で公開されています。ユーザーは以下のようにクエリを実行できます: - -```sql -SELECT count() -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/*.parquet') - -┌────count()─┐ -│ 2039988137 │ -- 20.4億 -└────────────┘ - -1行のセット。経過時間: 32.726秒。処理された行数: 20.4億行、170.05 KB (6200万行/s., 5.20 KB/s.) -ピークメモリ使用量: 239.50 MiB. -``` - -このバケットのフルデータセットには、320 GBを超えるパーケットファイルが含まれています。以下の例では、意図的にグロブパターンを使用してサブセットをターゲットにします。 - -ユーザーは、例えばKafkaやオブジェクトストレージからこのデータのストリームを消費していると仮定します。この日以降のデータに対して。データのスキーマは以下に示されています: - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/*.parquet') -FORMAT PrettyCompactNoEscapesMonoBlock -SETTINGS describe_compact_output = 1 - -┌─name───────────────┬─type────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ timestamp │ Nullable(DateTime64(6)) │ -│ country_code │ Nullable(String) │ -│ url │ Nullable(String) │ -│ project │ Nullable(String) │ -│ file │ Tuple(filename Nullable(String), project Nullable(String), version Nullable(String), type Nullable(String)) │ -│ installer │ Tuple(name Nullable(String), version Nullable(String)) │ -│ python │ Nullable(String) │ -│ implementation │ Tuple(name Nullable(String), version Nullable(String)) │ -│ distro │ Tuple(name Nullable(String), version Nullable(String), id Nullable(String), libc Tuple(lib Nullable(String), version Nullable(String))) │ -│ system │ Tuple(name Nullable(String), release Nullable(String)) │ -│ cpu │ Nullable(String) │ -│ openssl_version │ Nullable(String) │ -│ setuptools_version │ Nullable(String) │ -│ rustc_version │ Nullable(String) │ -│ tls_protocol │ Nullable(String) │ -│ tls_cipher │ Nullable(String) │ -└────────────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -:::note -フルPyPIデータセットには、1兆行を超えるデータが含まれており、我々のパブリックデモ環境[clickpy.clickhouse.com](https://clickpy.clickhouse.com)で入手可能です。このデータセットの詳細や、デモで物理ビューを利用してパフォーマンスを向上させる方法、データが毎日ポピュレートされる方法については、[こちら](https://github.com/ClickHouse/clickpy)をご覧ください。 -::: -## バックフィリングシナリオ {#backfilling-scenarios} - -バックフィリングは、特定の時点からデータストリームが消費されるときに一般的に必要です。このデータは、[増分物理ビュー](/materialized-view/incremental-materialized-view)でClickHouseテーブルに挿入され、挿入されたブロックにトリガされます。これらのビューは、挿入の前にデータを変換したり、集計を計算してターゲットテーブルに結果を送信したりします。 - -我々は以下のシナリオをカバーすることを試みます: - -1. **既存のデータ取り込みによるバックフィリング** - 新しいデータがロードされており、歴史的データがバックフィルされる必要があります。この歴史的データは特定されています。 -2. **既存のテーブルに物理ビジュアルを追加** - 歴史的データがポピュレートされ、データが既にストリーミングされている設定に新しい物理ビューを追加する必要があります。 - -データはオブジェクトストレージからバックフィルされると仮定します。すべての場合で、データの挿入を中断しないようにすることを目指しています。 - -オブジェクトストレージから歴史的データをバックフィルすることをお勧めします。データは可能な限りパーケットにエクスポートされ、最適な読み取り性能と圧縮(ネットワーク転送の削減)のために。通常、約150MBのファイルサイズが好まれますが、ClickHouseは[70以上のファイルフォーマット](/interfaces/formats)をサポートしており、すべてのサイズのファイルを処理できます。 -## 重複テーブルとビューの使用 {#using-duplicate-tables-and-views} - -すべてのシナリオにおいて、我々は「重複テーブルとビュー」の概念に依存しています。これらのテーブルとビューは、ライブストリーミングデータに使用されるもののコピーを表し、バックフィルを孤立して実行できるようにし、失敗が発生した場合に復旧のための簡単な手段を提供します。例えば、以下のようなメインの`pypi` テーブルと、Pythonプロジェクトごとのダウンロード数を計算する物理ビューがあります: - -```sql -CREATE TABLE pypi -( - `timestamp` DateTime, - `country_code` LowCardinality(String), - `project` String, - `type` LowCardinality(String), - `installer` LowCardinality(String), - `python_minor` LowCardinality(String), - `system` LowCardinality(String), - `on` String -) -ENGINE = MergeTree -ORDER BY (project, timestamp) - -CREATE TABLE pypi_downloads -( - `project` String, - `count` Int64 -) -ENGINE = SummingMergeTree -ORDER BY project - -CREATE MATERIALIZED VIEW pypi_downloads_mv TO pypi_downloads -AS SELECT - project, - count() AS count -FROM pypi -GROUP BY project -``` - -メインテーブルと関連するビューをデータのサブセットを使用してポピュレートします: - -```sql -INSERT INTO pypi SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-000000000{000..100}.parquet') - -0行のセット。経過時間: 15.702秒。処理された行数: 4123万行、3.94 GB (263万行/s., 251.01 MB/s.) -ピークメモリ使用量: 977.49 MiB. - -SELECT count() FROM pypi - -┌──count()─┐ -│ 20612750 │ -- 2061万 -└──────────┘ - -1行のセット。経過時間: 0.004秒。 - -SELECT sum(count) -FROM pypi_downloads - - -┌─sum(count)─┐ -│ 20612750 │ -- 2061万 -└────────────┘ - -1行のセット。経過時間: 0.006秒。処理された行数: 96150行、769.23 KB (1653万行/s., 132.26 MB/s.) -ピークメモリ使用量: 682.38 KiB. -``` - -他のサブセット `{101..200}` をロードしたいと仮定します。`pypi` に直接挿入できるかもしれませんが、重複テーブルを作成することでこのバックフィルを孤立して実行できます。 - -バックフィルが失敗した場合、メインテーブルには影響を与えず、単純に[truncate](/managing-data/truncate)して重複テーブルを再実行できます。 - -これらのビューの新しいコピーを作成するには、`CREATE TABLE AS`句を使ってサフィックス`_v2`を用います: - -```sql -CREATE TABLE pypi_v2 AS pypi - -CREATE TABLE pypi_downloads_v2 AS pypi_downloads - -CREATE MATERIALIZED VIEW pypi_downloads_mv_v2 TO pypi_downloads_v2 -AS SELECT - project, - count() AS count -FROM pypi_v2 -GROUP BY project -``` - -おおよそ同じサイズの2番目のサブセットでこれをポピュレートし、成功裏にロードされたことを確認します。 - -```sql -INSERT INTO pypi_v2 SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-000000000{101..200}.parquet') - -0行のセット。経過時間: 17.545秒。処理された行数: 4080万行、3.90 GB (233万行/s., 222.29 MB/s.) -ピークメモリ使用量: 991.50 MiB. - -SELECT count() -FROM pypi_v2 - -┌──count()─┐ -│ 20400020 │ -- 2040万 -└──────────┘ - -1行のセット。経過時間: 0.004秒。 - -SELECT sum(count) -FROM pypi_downloads_v2 - -┌─sum(count)─┐ -│ 20400020 │ -- 2040万 -└────────────┘ - -1行のセット。経過時間: 0.006秒。処理された行数: 95490行、763.90 KB (1481万行/s., 118.45 MB/s.) -ピークメモリ使用量: 688.77 KiB. -``` - -2度目のロード中に失敗が発生した場合、単純に[truncate](/managing-data/truncate)して`pypi_v2`と`pypi_downloads_v2`を再度ロードすることができます。 - -データのロードが完了したら、[`ALTER TABLE MOVE PARTITION`](/sql-reference/statements/alter/partition#move-partition-to-table)句を使用して、重複テーブルからメインテーブルにデータを移動できます。 - -```sql -ALTER TABLE pypi_v2 MOVE PARTITION () TO pypi - -0行のセット。経過時間: 1.401秒。 - -ALTER TABLE pypi_downloads_v2 MOVE PARTITION () TO pypi_downloads - -0行のセット。経過時間: 0.389秒。 -``` - -:::note パーティション名 -上記の`MOVE PARTITION`呼び出しは、パーティション名`()`を使用しています。これは、このテーブルの単一パーティションを表します(パーティションはありません)。パーティションされたテーブルの場合、ユーザーは各パーティションごとに複数の`MOVE PARTITION`呼び出しを行う必要があります。現在のパーティション名は、[`system.parts`](/operations/system-tables/parts)テーブルから調べることができます。例:`SELECT DISTINCT partition FROM system.parts WHERE (table = 'pypi_v2')`. -::: - -これで`pypi` と `pypi_downloads`が完全なデータを含んでいることを確認できます。`pypi_downloads_v2` と `pypi_v2`は安全に削除できます。 - -```sql -SELECT count() -FROM pypi - -┌──count()─┐ -│ 41012770 │ -- 4101万 -└──────────┘ - -1行のセット。経過時間: 0.003秒。 - -SELECT sum(count) -FROM pypi_downloads - -┌─sum(count)─┐ -│ 41012770 │ -- 4101万 -└────────────┘ - -1行のセット。経過時間: 0.007秒。処理された行数: 191.64千行、1.53 MB (2734万行/s., 218.74 MB/s.) - -SELECT count() -FROM pypi_v2 -``` - -重要なのは、`MOVE PARTITION`操作は軽量で(ハードリンクを利用)、原子的であり、すなわち中間状態なしに失敗するか成功するかのいずれかです。 - -このプロセスは、以下のバックフィリングシナリオで広く利用されます。 - -このプロセスでは、ユーザーが各挿入操作のサイズを選択する必要があることに注意してください。 - -より大きな挿入、すなわちより多くの行は、必要な`MOVE PARTITION`操作を減らすことを意味します。しかし、これはネットワークの中断による挿入失敗時のコストとバランスを取る必要があります。ユーザーは、リスクを低減するためにファイルをバッチ処理することを補完できます。これは、リストされる範囲のクエリ(例:`WHERE timestamp BETWEEN 2024-12-17 09:00:00 AND 2024-12-17 10:00:00`)やグロブパターンを使用して行うことができます。例えば、 - -```sql -INSERT INTO pypi_v2 SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-000000000{101..200}.parquet') -INSERT INTO pypi_v2 SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-000000000{201..300}.parquet') -INSERT INTO pypi_v2 SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-000000000{301..400}.parquet') ---すべてのファイルがロードされるまで続く OR MOVE PARTITION 呼び出しが実行される -``` - -:::note -ClickPipesは、オブジェクトストレージからデータをロードする際にこのアプローチを使用し、ターゲットテーブルとその物理ビューの重複を自動的に作成し、ユーザーに上記のステップを実行する必要を避けます。異なるサブセットを処理する複数のワーカースレッドを使用することで、データを迅速にロードし、正確に一度だけのセマンティクスを実現します。興味のある方は、[このブログ](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part3)で詳細をご覧ください。 -::: -## シナリオ 1: 既存のデータ取り込みによるバックフィリング {#scenario-1-backfilling-data-with-existing-data-ingestion} - -このシナリオでは、バックフィルするデータが孤立したバケットに存在せず、フィルタリングが必要であると仮定します。データは既に挿入されており、タイムスタンプや単調増加列を特定でき、そこから歴史的データをバックフィルする必要があります。 - -このプロセスは以下のステップに従います: - -1. チェックポイントを特定する - タイムスタンプまたは歴史的データを復元する必要がある列の値。 -2. メインテーブルと物理ビューのターゲットテーブルの重複を作成する。 -3. ステップ(2)で作成したターゲットテーブルを指す物理ビューのコピーを作成する。 -4. ステップ(2)で作成した重複メインテーブルに挿入する。 -5. 重複テーブルから元のバージョンにすべてのパーティションを移動し、重複テーブルを削除する。 - -例えば、PyPIデータで必要なデータがロードされていると仮定します。最小タイムスタンプを特定できるため、我々の「チェックポイント」がわかります。 - -```sql -SELECT min(timestamp) -FROM pypi - -┌──────min(timestamp)─┐ -│ 2024-12-17 09:00:00 │ -└─────────────────────┘ - -1行のセット。経過時間: 0.163秒。処理された行数: 13.4億行、5.37 GB (8.24億行/s., 32.96 GB/s.) -ピークメモリ使用量: 227.84 MiB. -``` - -上記から、`2024-12-17 09:00:00`より前のデータをロードする必要があることがわかります。先ほどのプロセスを用いて、重複テーブルとビューを作成し、タイムスタンプにフィルタをかけたサブセットをロードします。 - -```sql -CREATE TABLE pypi_v2 AS pypi - -CREATE TABLE pypi_downloads_v2 AS pypi_downloads - -CREATE MATERIALIZED VIEW pypi_downloads_mv_v2 TO pypi_downloads_v2 -AS SELECT project, count() AS count -FROM pypi_v2 -GROUP BY project - -INSERT INTO pypi_v2 SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/2024-12-17/1734393600-*.parquet') -WHERE timestamp < '2024-12-17 09:00:00' - -0行のセット。経過時間: 500.152秒。処理された行数: 27.4億行、364.40 GB (5.47万行/s., 728.59 MB/s.) -``` -:::note -パーケットのタイムスタンプ列をフィルタリングすることは非常に効率的です。ClickHouseは、ロードするフルデータ範囲を特定するためにタイムスタンプ列だけを読み取ります。パーケットインデックス(例えばmin-max)もClickHouseクエリエンジンによって利用できます。 -::: - -この挿入が完了したら、関連するパーティションを移動できます。 - -```sql -ALTER TABLE pypi_v2 MOVE PARTITION () TO pypi - -ALTER TABLE pypi_downloads_v2 MOVE PARTITION () TO pypi_downloads -``` - -もし歴史的データが孤立したバケットであれば、上記の時間フィルタは必要ありません。時間または単調増加列が利用できない場合は、歴史的データを分離します。 - -:::note ClickHouse CloudでClickPipesを使うだけ -ClickHouse Cloudのユーザーは、データが自分のバケットに孤立させられる場合、歴史的バックアップを復元するためにClickPipesを使用するべきです(この場合フィルタは必要ありません)。複数のワーカーを用いたロードを並列化し、これによってロード時間を短縮し、ClickPipesは上記のプロセスを自動化します - メインテーブルと物理ビューの両方の重複テーブルを作成します。 -::: -## シナリオ 2: 既存のテーブルに物理ビューを追加 {#scenario-2-adding-materialized-views-to-existing-tables} - -新しい物理ビューを追加する必要がある設定には、かなりのデータがポピュレートされ、データが挿入されることは珍しくありません。時刻または単調増加列が利用できると、ストリーム内のポイントを特定するのに役立ち、データ取り込みの中断を避けることができます。以下の例では、両方のケースが想定されており、データ取り込みを中断を避けるアプローチを優先します。 - -:::note POPULATEを避ける -小さなデータセットで取り込みが一時停止されている場合を除いて、物理ビューのバックフィルに[`POPULATE`](/sql-reference/statements/create/view#materialized-view)コマンドの使用は推奨されません。このオペレーターは、ポピュレートハッシュが完了した後にソーステーブルに挿入された行を見逃す可能性があります。さらに、このポピュレートはすべてのデータに対して実行され、大規模なデータセットでの中断やメモリの制限に対して脆弱です。 -::: -### タイムスタンプまたは単調増加列が利用できる場合 {#timestamp-or-monotonically-increasing-column-available} - -この場合、我々は新しい物理ビューに、未来の任意のデータよりも大きい行のみに制限するフィルタを含めることをお勧めします。この物理ビューは、その後、メインテーブルの歴史的データを使用してこの日からバックフィルされることになります。バックフィルのアプローチは、データサイズと関連クエリの複雑さに依存します。 - -最も単純なアプローチは、次のステップを含みます: - -1. 任意の時間の近い未来よりも大きい行のみを考慮するフィルタを用いて物理ビューを作成します。 -2. `INSERT INTO SELECT`クエリを実行し、物理ビューのターゲットテーブルに挿入し、集約クエリでソーステーブルから読み取ります。 - -これは追加のサブデータにターゲットを定めるためにステップ(2)で強化することができ、または失敗後の復旧を容易にするために物理ビューのための重複したターゲットテーブルを使用することができます(挿入が完了した後に元のテーブルにパーティションをアタッチ)。 - -以下は、毎時最も人気のあるプロジェクトを計算する物理ビューです。 - -```sql -CREATE TABLE pypi_downloads_per_day -( - `hour` DateTime, - `project` String, - `count` Int64 -) -ENGINE = SummingMergeTree -ORDER BY (project, hour) - - -CREATE MATERIALIZED VIEW pypi_downloads_per_day_mv TO pypi_downloads_per_day -AS SELECT - toStartOfHour(timestamp) as hour, - project, - count() AS count -FROM pypi -GROUP BY - hour, - project -``` - -ターゲットテーブルを追加できますが、物理ビューを追加する前に、その`SELECT`節を変更して、任意の近い未来の時間よりも大きい行のみを考慮するようにします。この場合、`2024-12-17 09:00:00`を近くの時間と仮定します。 - -```sql -CREATE MATERIALIZED VIEW pypi_downloads_per_day_mv TO pypi_downloads_per_day -AS SELECT - toStartOfHour(timestamp) as hour, - project, count() AS count -FROM pypi WHERE timestamp >= '2024-12-17 09:00:00' -GROUP BY hour, project -``` - -このビューが追加されたら、上述の日付より前のこのビューのすべてのデータをバックフィルすることができます。 - -最も簡単な方法は、フィルタを追加したメインテーブルから物理ビューのクエリを実行し、`INSERT INTO SELECT`を介してビューのターゲットテーブルに結果を挿入することです。例えば、上記のビューにおいて: - -```sql -INSERT INTO pypi_downloads_per_day SELECT - toStartOfHour(timestamp) AS hour, - project, - count() AS count -FROM pypi -WHERE timestamp < '2024-12-17 09:00:00' -GROUP BY - hour, - project - -Ok. - -0行のセット。経過時間: 2.830秒。処理された行数: 798.89百万行、17.40 GB (282.28万行/s., 6.15 GB/s.) -ピークメモリ使用量: 543.71 MiB. -``` - -:::note -上記の例では、ターゲットテーブルは[SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree)です。この場合、元の集約クエリを単純に使用できます。より複雑なユースケースでは、[AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree)を利用し、集計には`-State`関数を使用します。これについての例は[こちら](/integrations/s3/performance#be-aware-of-merges)で見ることができます。 -::: - -我々の場合、これは比較的軽量な集約で、3秒以内で完了し、600MiB未満のメモリを使用します。より複雑または長時間実行される集約の場合、ユーザーは、このプロセスをより堅牢にするために従来の重複テーブルアプローチを使用し、シャドウターゲットテーブル(例:`pypi_downloads_per_day_v2`)を作成し、このテーブルに挿入し、その結果のパーティションを`pypi_downloads_per_day`にアタッチすることができます。 - -物理ビューのクエリは、より複雑であることが多く(さもなければユーザーはビューを使用しないでしょう!)、リソースを消費することがあります。まれなケースでは、クエリのリソースがサーバーの限界を超えることもあります。これがClickHouseの物理ビューの利点の一つを示しています。それは、インクリメンタルであり、全データセットを一度に処理しないということです! - -この場合、ユーザーは以下の選択肢があります: - -1. クエリを変更してレンジをバックフィルします。例:`WHERE timestamp BETWEEN 2024-12-17 08:00:00 AND 2024-12-17 09:00:00`、`WHERE timestamp BETWEEN 2024-12-17 07:00:00 AND 2024-12-17 08:00:00`など。 -2. [Nullテーブルエンジン](/engines/table-engines/special/null)を使用して物理ビューを埋めます。これは、物理ビューの通常のインクリメンタルな生成を再現し、データブロック(設定可能なサイズ)を繰り返しクエリ実行します。 - -(1)は、最も簡単なアプローチであり、しばしば十分です。簡潔さのために例を含めません。 - -以下で(2)をさらに探ります。 -#### Nullテーブルエンジンを使用して物理ビューを埋める {#using-a-null-table-engine-for-filling-materialized-views} - -[Nullテーブルエンジン](/engines/table-engines/special/null)は、データを永続化しないストレージエンジンを提供します(テーブルエンジンの世界での`/dev/null`だと思ってください)。これは矛盾しているように思えますが、物理ビューはこのテーブルエンジンに挿入されたデータに対しても実行されます。これにより、元のデータを永続化せずに物理ビューを構築でき、I/Oや関連するストレージを回避できます。 - -重要なのは、テーブルエンジンに接続された物理ビューは、挿入時にデータのブロックに対しても実行され、それらの結果をターゲットテーブルに送信します。これらのブロックは設定可能なサイズです。より大きなブロックは、より効率的(そして迅速に処理される)ですが、リソース(主にメモリ)をより消費します。このテーブルエンジンの使用により、物理ビューをインクリメンタルに構築、すなわち1ブロックずつ処理できます。全集計をメモリ内に保持する必要がありません。 - -ClickHouseにおける非正規化 - -
- -以下の例を考えてみましょう: - -```sql -CREATE TABLE pypi_v2 -( - `timestamp` DateTime, - `project` String -) -ENGINE = Null - -CREATE MATERIALIZED VIEW pypi_downloads_per_day_mv_v2 TO pypi_downloads_per_day -AS SELECT - toStartOfHour(timestamp) as hour, - project, - count() AS count -FROM pypi_v2 -GROUP BY - hour, - project -``` - -ここでは、物理ビューを構築するために、行を受け取るためのNullテーブル`pypi_v2`を作成します。必要なカラムだけをスキーマに制限していることに注意してください。我々の物理ビューは、このテーブルに挿入された行に対して集約を実行し(1ブロックずつ)、結果をターゲットテーブル`pypi_downloads_per_day`に送信します。 - -:::note -ここでターゲットテーブルとして`pypi_downloads_per_day`を使用しました。追加の堅牢性のために、ユーザーは重複テーブル`pypi_downloads_per_day_v2`を作成し、物理ビューのターゲットテーブルとしてこのテーブルを使用することができます。挿入が完了した後に、`pypi_downloads_per_day_v2`のパーティションを`pypi_downloads_per_day`に移動できます。これにより、挿入がメモリの問題やサーバーの中断によって失敗した場合の復旧が可能になります。つまり、`pypi_downloads_per_day_v2`をトランケートし、設定を調整して再試行すればいいのです。 -::: - -この物理ビューを埋めるために、次のように`pypi`から`pypi_v2`にバックフィルする関連データを挿入します。 - -```sql -INSERT INTO pypi_v2 SELECT timestamp, project FROM pypi WHERE timestamp < '2024-12-17 09:00:00' - -0行のセット。経過時間: 27.325秒。処理された行数: 15億行、33.48 GB (54.73万行/s., 1.23 GB/s.) -ピークメモリ使用量: 639.47 MiB. -``` - -ここでのメモリ使用量は`639.47 MiB`です。 -##### パフォーマンスとリソースの調整 {#tuning-performance--resources} - -上記のシナリオでのパフォーマンスとリソースの使用は、いくつかの要因によって決まります。調整を試みる前に、読者には[読むためのスレッドの使用](/integrations/s3/performance#using-threads-for-reads)セクションで詳細にドキュメントされた挿入メカニクスを理解することをお勧めします。まとめると: - -- **読み取りの並列性** - 読み取るために使用されるスレッドの数。[`max_threads`](/operations/settings/settings#max_threads)を通じて制御されます。ClickHouse Cloudでは、これはインスタンスサイズによって決定され、デフォルトでvCPUの数になります。この値を増やすことで、メモリ使用量は増加しますが、読み取りパフォーマンスが向上する可能性があります。 -- **挿入の並列性** - 挿入するために使用される挿入スレッドの数。これは[`max_insert_threads`](/operations/settings/settings#max_insert_threads)を通じて制御されます。ClickHouse Cloudでは、これはインスタンスサイズ(2〜4の間)によって決定され、OSSでは1に設定されます。この値を増やすことで、メモリ使用量は増加しますが、パフォーマンスが向上する可能性があります。 -- **挿入ブロックサイズ** - データはループで処理され、データが取得され、解析され、メモリ内の挿入ブロックに作成されます。これらのブロックは、[パーティショニングキー](/engines/table-engines/mergetree-family/custom-partitioning-key)に基づいています。これらのブロックはソートされ、最適化され、圧縮され、新しい[data parts](/parts)としてストレージに書き込まれます。挿入ブロックのサイズは、[`min_insert_block_size_rows`](/operations/settings/settings#min_insert_block_size_rows)と[`min_insert_block_size_bytes`](/operations/settings/settings#min_insert_block_size_bytes)(非圧縮)によって制御され、メモリ使用量とディスクI/Oに影響を与えます。大きなブロックはメモリをより多く使用しますが、部品を減らし、I/Oやバックグラウンドのマージを削減します。これらの設定は最小スレッショルドを表し(どちらかが最初に到達するとフラッシュがトリガされます)。 -- **物理ビューのブロックサイズ** - メイン挿入の上記のメカニクスに加えて、物理ビューに挿入される前に、ブロックもより効率的に処理されるように圧縮されます。これらのブロックのサイズは、[`min_insert_block_size_bytes_for_materialized_views`](/operations/settings/settings#min_insert_block_size_bytes_for_materialized_views)と[`min_insert_block_size_rows_for_materialized_views`](/operations/settings/settings#min_insert_block_size_rows_for_materialized_views)によって決定されます。大きなブロックは、より大きなメモリ使用量の犠牲に、効率的な処理を可能にします。デフォルトでは、これらの設定はソーステーブル設定[`min_insert_block_size_rows`](/operations/settings/settings#min_insert_block_size_rows)および[`min_insert_block_size_bytes`](/operations/settings/settings#min_insert_block_size_bytes)の値に戻ります。 - -パフォーマンスを向上させるために、ユーザーは[挿入のためのスレッドとブロックサイズの調整](/integrations/s3/performance#tuning-threads-and-block-size-for-inserts)セクションで示されたガイドラインに従うことができます。ほとんどの場合、パフォーマンスを改善するために`min_insert_block_size_bytes_for_materialized_views`や`min_insert_block_size_rows_for_materialized_views`を変更する必要はありません。これらを変更する場合は、`min_insert_block_size_rows`や`min_insert_block_size_bytes`と同様のベストプラクティスを使用してください。 - -メモリを最小限に抑えるために、ユーザーはこれらの設定で実験することを望むかもしれません。これにより、間違いなくパフォーマンスが低下します。先ほどのクエリを使用して、以下の例を示します。 - -`max_insert_threads`を1に下げることで、メモリオーバーヘッドを削減します。 - -```sql -INSERT INTO pypi_v2 -SELECT - timestamp, - project -FROM pypi -WHERE timestamp < '2024-12-17 09:00:00' -SETTINGS max_insert_threads = 1 - -0行のセット。経過時間: 27.752秒。処理された行数: 15億行、33.48 GB (53.89万行/s., 1.21 GB/s.) -ピークメモリ使用量: 506.78 MiB. -``` - -さらに、`max_threads`設定を1に下げることでメモリをさらに減らすことができます。 - -```sql -INSERT INTO pypi_v2 -SELECT timestamp, project -FROM pypi -WHERE timestamp < '2024-12-17 09:00:00' -SETTINGS max_insert_threads = 1, max_threads = 1 - -Ok. - -0行のセット。経過時間: 43.907秒。処理された行数: 15億行、33.48 GB (34.06万行/s., 762.54 MB/s.) -ピークメモリ使用量: 272.53 MiB. -``` - -最後に、`min_insert_block_size_rows`を0に設定してブロックサイズの判断要因として無効にし、`min_insert_block_size_bytes`を10485760(10MiB)に設定することで、メモリをさらに減らすことができます。 - -```sql -INSERT INTO pypi_v2 -SELECT - timestamp, - project -FROM pypi -WHERE timestamp < '2024-12-17 09:00:00' -SETTINGS max_insert_threads = 1, max_threads = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 10485760 - -0行のセット。経過時間: 43.293秒。処理された行数: 15億行、33.48 GB (34.54万行/s., 773.36 MB/s.) -ピークメモリ使用量: 218.64 MiB. -``` - -最後に、ブロックサイズを低くすると部品が増え、マージ圧力が増加することに注意してください。これらの設定は慎重に変更する必要があります[こちら](/integrations/s3/performance#be-aware-of-merges)で議論されています。 -``` -### タイムスタンプまたは単調増加カラムなし {#no-timestamp-or-monotonically-increasing-column} - -上記のプロセスは、ユーザーがタイムスタンプまたは単調増加カラムを持っていることに依存しています。場合によっては、これが単純に利用できないことがあります。この場合、ユーザーに取り込みを一時停止する必要がありますが、以前に説明したステップの多くを利用する以下のプロセスをお勧めします。 - -1. メインテーブルへの挿入を一時停止します。 -2. `CREATE AS`構文を使用して、メインターゲットテーブルの複製を作成します。 -3. [`ALTER TABLE ATTACH`](/sql-reference/statements/alter/partition#attach-partitionpart)を使用して、元のターゲットテーブルから複製にパーティションを添付します。 **注:** この添付操作は以前の移動とは異なります。ハードリンクを利用するため、元のテーブルのデータは保持されます。 -4. 新しいマテリアライズドビューを作成します。 -5. 挿入を再開します。 **注:** 挿入はターゲットテーブルのみを更新し、複製は元のデータのみを参照します。 -6. マテリアライズドビューをバックフィルします。タイムスタンプのあるデータに対して上記で使用したのと同じプロセスを適用し、ソースとして複製テーブルを使用します。 - -以下の例では、PyPIと以前の新しいマテリアライズドビュー `pypi_downloads_per_day` を使用します(タイムスタンプが使用できないと仮定します): - -```sql -SELECT count() FROM pypi - -┌────count()─┐ -│ 2039988137 │ -- 20.4 億 -└────────────┘ - -1 行がセットに含まれています。経過時間: 0.003 秒。 - --- (1) 挿入を一時停止 --- (2) 目標テーブルの複製を作成 - -CREATE TABLE pypi_v2 AS pypi - -SELECT count() FROM pypi_v2 - -┌────count()─┐ -│ 2039988137 │ -- 20.4 億 -└────────────┘ - -1 行がセットに含まれています。経過時間: 0.004 秒。 - --- (3) 元のターゲットテーブルから複製にパーティションを添付します。 - -ALTER TABLE pypi_v2 - (ATTACH PARTITION tuple() FROM pypi) - --- (4) 新しいマテリアライズドビューを作成します。 - -CREATE TABLE pypi_downloads_per_day -( - `hour` DateTime, - `project` String, - `count` Int64 -) -ENGINE = SummingMergeTree -ORDER BY (project, hour) - - -CREATE MATERIALIZED VIEW pypi_downloads_per_day_mv TO pypi_downloads_per_day -AS SELECT - toStartOfHour(timestamp) as hour, - project, - count() AS count -FROM pypi -GROUP BY - hour, - project - --- (4) 挿入を再開します。ここで、単一の行を挿入してレプリケートします。 - -INSERT INTO pypi SELECT * -FROM pypi -LIMIT 1 - -SELECT count() FROM pypi - -┌────count()─┐ -│ 2039988138 │ -- 20.4 億 -└────────────┘ - -1 行がセットに含まれています。経過時間: 0.003 秒。 - --- pypi_v2が以前と同じ行数を含んでいることに注意してください。 - -SELECT count() FROM pypi_v2 -┌────count()─┐ -│ 2039988137 │ -- 20.4 億 -└────────────┘ - --- (5) バックフィルをバックアップ pypi_v2 を使用して行います。 - -INSERT INTO pypi_downloads_per_day SELECT - toStartOfHour(timestamp) as hour, - project, - count() AS count -FROM pypi_v2 -GROUP BY - hour, - project - -0 行がセットに含まれています。経過時間: 3.719 秒。処理された行数: 20.4 億、データサイズ: 47.15 GB (548.57 百万行/秒、12.68 GB/秒)。 - -DROP TABLE pypi_v2; -``` - -次の最後から2番目のステップでは、前年に説明されたシンプルな `INSERT INTO SELECT` アプローチを使用して `pypi_downloads_per_day` にバックフィルを行います。これは、前述のNullテーブルアプローチを使用して強化することもでき、より強靭性のために複製テーブルをオプションで使用できます。 - -この操作には挿入を一時停止する必要がありますが、中間操作は通常迅速に完了でき、データの中断を最小限に抑えます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md.hash deleted file mode 100644 index 38fe08d3514..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/backfilling.md.hash +++ /dev/null @@ -1 +0,0 @@ -2948e8a5e5955808 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md deleted file mode 100644 index 769fa1538e3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md +++ /dev/null @@ -1,377 +0,0 @@ ---- -slug: '/data-modeling/denormalization' -title: 'データの非正規化' -description: 'クエリパフォーマンスを向上させるための非正規化の使用方法' -keywords: -- 'data denormalization' -- 'denormalize' -- 'query optimization' ---- - -import denormalizationDiagram from '@site/static/images/data-modeling/denormalization-diagram.png'; -import denormalizationSchema from '@site/static/images/data-modeling/denormalization-schema.png'; -import Image from '@theme/IdealImage'; - - -# データの非正規化 - -データの非正規化は、ClickHouseでフラットなテーブルを使用して、結合を避けることでクエリのレイテンシを最小限に抑えるのを助けるための手法です。 - -## 正規化されたスキーマと非正規化されたスキーマの比較 {#comparing-normalized-vs-denormalized-schemas} - -データの非正規化には、特定のクエリパターンに対するデータベースのパフォーマンスを最適化するために、意図的に正規化プロセスを逆にすることが含まれます。正規化されたデータベースでは、冗長性を最小限に抑え、データの整合性を確保するために、データが複数の関連テーブルに分割されます。非正規化は、結合を行い、データを重複させ、計算されたフィールドを単一のテーブルまたは少数のテーブルに組み込むことによって冗長性を再導入し、クエリ時から挿入時に結合を移動させることを効果的に行います。 - -このプロセスは、クエリ時の複雑な結合の必要性を減少させ、読み取り操作を大幅に高速化することができ、重い読み取り要件と複雑なクエリを持つアプリケーションに最適です。しかし、重複したデータに対する変更はすべてのインスタンスにわたって伝播させる必要があるため、書き込み操作やメンテナンスの複雑さが増す可能性があります。 - -ClickHouseにおける非正規化 - -
- -NoSQLソリューションによって普及した一般的な手法は、`JOIN`のサポートがない場合にデータを非正規化することであり、すべての統計情報または関連行を親行のカラムおよびネストされたオブジェクトとして格納します。たとえば、ブログのスキーマの例では、すべての`Comments`をそれぞれの投稿のオブジェクトの`Array`として保存できます。 - -## 非正規化を使用すべき時 {#when-to-use-denormalization} - -一般的には、以下の場合に非正規化を推奨します: - -- あまり頻繁に変更されないテーブル、または分析クエリにデータが利用可能になるまでの遅延が許容できる場合に非正規化します。つまり、データはバッチで完全に再ロードできます。 -- 多対多のリレーションシップについての非正規化を避けます。これは、単一のソース行が変更された場合に、多くの行を更新する必要が生じる可能性があります。 -- 高いカーディナリティのリレーションシップの非正規化を避けます。もしテーブルの各行が他のテーブルに数千の関連エントリを持つ場合、これらは`Array`として表現する必要があります。一般的に、1000以上のタプルを持つ配列はお勧めしません。 -- ネストされたオブジェクトとしてすべてのカラムを非正規化するのではなく、マテリアライズドビューを使用して統計情報を非正規化することを検討してください(下記参照)。 - -すべての情報が非正規化される必要はありません - 頻繁にアクセスされる必要のある重要な情報だけです。 - -非正規化作業は、ClickHouseまたは上流で行うことができます。たとえば、Apache Flinkを使用する場合です。 - -## 頻繁に更新されるデータに対する非正規化を避ける {#avoid-denormalization-on-frequently-updated-data} - -ClickHouseにおいて、非正規化はクエリパフォーマンスを最適化するためのいくつかのオプションの1つですが、注意して使用する必要があります。データが頻繁に更新され、ほぼリアルタイムで更新される必要がある場合、このアプローチは避けるべきです。主テーブルが主に追加専用であるか、定期的にバッチとして再ロードできる場合(例:日次)、この方法を使用してください。 - -このアプローチには1つの主要な課題があります - 書き込みパフォーマンスとデータの更新です。より具体的には、非正規化はデータ結合の責任をクエリ時から取り込み時にシフトさせます。これによってクエリパフォーマンスが大幅に向上する一方で、取り込みが複雑になり、データパイプラインがその構成に使用された行の1つが変更された場合にClickHouseに行を再挿入する必要があります。これは、1つのソース行の変更がClickHouse内の多くの行を更新する必要があることを意味する可能性があります。複雑なスキーマでは、行が複雑な結合から組み立てられているため、結合のネストされたコンポーネント内での行の変更は、数百万行の更新を必要とする可能性があります。 - -これをリアルタイムで達成するのはしばしば非現実的であり、二つの課題によって大幅な技術的な工夫が必要です: - -1. テーブル行が変更されたときに正しい結合文をトリガーすること。理想的には、結合のすべてのオブジェクトが更新されることを引き起こさないようにすべきであり、影響を受けたものだけが更新されるようにします。正しい行に効率的にフィルタリングするためには、外部のツールやエンジニアリングが必要です。 -1. ClickHouse内の行の更新は慎重に管理する必要があり、追加の複雑さを導入します。 - -
- -したがって、すべての非正規化オブジェクトを定期的に再ロードするバッチ更新プロセスが一般的です。 - -## 非正規化の実用的なケース {#practical-cases-for-denormalization} - -では、非正規化が意味をなすいくつかの実用的な例と、他のものでより望ましいアプローチがある場合を考えてみましょう。 - -`Posts`テーブルが既に`AnswerCount`や`CommentCount`などの統計で非正規化されていると仮定します。この場合、ソースデータはこの形式で提供されます。実際には、情報が頻繁に変更される可能性が高いため、非正規化されている内容を実際には正規化したいかもしれません。これらのカラムの多くは、例えば`PostId`カラムと`Comments`テーブルを通じて投稿のコメントを利用可能です。例の目的のために、投稿はバッチプロセスで再ロードされると仮定します。 - -また、我々は他のテーブルを`Posts`に非正規化することのみを検討します。`Posts`は分析のための主要なテーブルと考えています。逆方向での非正規化も一部のクエリにとって適切であり、上記の考慮事項が適用されます。 - -*以下の各例について、両方のテーブルを結合して利用するクエリが存在するものと仮定します。* - -### 投稿と投票 {#posts-and-votes} - -投稿への投票は、別々のテーブルとして表現されます。これに対する最適化されたスキーマは以下に示されています。また、データをロードするための挿入コマンドも示します: - -```sql -CREATE TABLE votes -( - `Id` UInt32, - `PostId` Int32, - `VoteTypeId` UInt8, - `CreationDate` DateTime64(3, 'UTC'), - `UserId` Int32, - `BountyAmount` UInt8 -) -ENGINE = MergeTree -ORDER BY (VoteTypeId, CreationDate, PostId) - -INSERT INTO votes SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/votes/*.parquet') - -0 rows in set. Elapsed: 26.272 sec. Processed 238.98 million rows, 2.13 GB (9.10 million rows/s., 80.97 MB/s.) -``` - -一見したところ、これらは投稿テーブルの非正規化の候補かもしれません。このアプローチにはいくつかの課題があります。 - -投稿には頻繁に投票が付けられます。時間とともに投稿ごとの投票数は減少するかもしれませんが、次のクエリでは、30,000件の投稿に対して約40,000件/時間の投票があることを示しています。 - -```sql -SELECT round(avg(c)) AS avg_votes_per_hr, round(avg(posts)) AS avg_posts_per_hr -FROM -( - SELECT - toStartOfHour(CreationDate) AS hr, - count() AS c, - uniq(PostId) AS posts - FROM votes - GROUP BY hr -) - -┌─avg_votes_per_hr─┬─avg_posts_per_hr─┐ -│ 41759 │ 33322 │ -└──────────────────┴──────────────────┘ -``` - -もし遅延が許容できるのであれば、バッチ処理で対応することもできますが、すべての投稿を定期的に再ロードしない限り、更新を処理する必要があります(これは望ましいとは限りません)。 - -さらに厄介なのは、一部の投稿には非常に多くの投票がつくことです: - -```sql -SELECT PostId, concat('https://stackoverflow.com/questions/', PostId) AS url, count() AS c -FROM votes -GROUP BY PostId -ORDER BY c DESC -LIMIT 5 - -┌───PostId─┬─url──────────────────────────────────────────┬─────c─┐ -│ 11227902 │ https://stackoverflow.com/questions/11227902 │ 35123 │ -│ 927386 │ https://stackoverflow.com/questions/927386 │ 29090 │ -│ 11227809 │ https://stackoverflow.com/questions/11227809 │ 27475 │ -│ 927358 │ https://stackoverflow.com/questions/927358 │ 26409 │ -│ 2003515 │ https://stackoverflow.com/questions/2003515 │ 25899 │ -└──────────┴──────────────────────────────────────────────┴───────┘ -``` - -ここでの主な観察点は、各投稿の集計投票統計が、ほとんどの分析にとって十分であるということです - すべての投票情報を非正規化する必要はありません。例えば、現在の`Score`カラムはそのような統計を表しています。理想的には、クエリ時に単純なルックアップでこれらの統計を取得できるようにしたいものです([dictionaries](/dictionary)を参照)。 - -### ユーザーとバッジ {#users-and-badges} - -次に、`Users`と`Badges`を考えてみましょう: - -ユーザーとバッジのスキーマ - -

-以下のコマンドでデータを最初に挿入します: -

- -```sql -CREATE TABLE users -( - `Id` Int32, - `Reputation` LowCardinality(String), - `CreationDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `DisplayName` String, - `LastAccessDate` DateTime64(3, 'UTC'), - `AboutMe` String, - `Views` UInt32, - `UpVotes` UInt32, - `DownVotes` UInt32, - `WebsiteUrl` String, - `Location` LowCardinality(String), - `AccountId` Int32 -) -ENGINE = MergeTree -ORDER BY (Id, CreationDate) -``` - -```sql -CREATE TABLE badges -( - `Id` UInt32, - `UserId` Int32, - `Name` LowCardinality(String), - `Date` DateTime64(3, 'UTC'), - `Class` Enum8('Gold' = 1, 'Silver' = 2, 'Bronze' = 3), - `TagBased` Bool -) -ENGINE = MergeTree -ORDER BY UserId - -INSERT INTO users SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/users.parquet') - -0 rows in set. Elapsed: 26.229 sec. Processed 22.48 million rows, 1.36 GB (857.21 thousand rows/s., 51.99 MB/s.) - -INSERT INTO badges SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/badges.parquet') - -0 rows in set. Elapsed: 18.126 sec. Processed 51.29 million rows, 797.05 MB (2.83 million rows/s., 43.97 MB/s.) -``` - -ユーザーは頻繁にバッジを取得するかもしれませんが、これは毎日以上に更新する必要があるデータセットではないでしょう。バッジとユーザーの関係は一対多です。バッジをユーザーにタプルのリストとして単純に非正規化することができるかもしれませんが、最も多くのバッジを持つユーザーを示すクイックチェックではこれは理想的ではないことが示唆されます: - -```sql -SELECT UserId, count() AS c FROM badges GROUP BY UserId ORDER BY c DESC LIMIT 5 - -┌─UserId─┬─────c─┐ -│ 22656 │ 19334 │ -│ 6309 │ 10516 │ -│ 100297 │ 7848 │ -│ 157882 │ 7574 │ -│ 29407 │ 6512 │ -└────────┴───────┘ -``` - -19,000件のオブジェクトを単一の行に非正規化するのは現実的ではない可能性があります。この関係は別のテーブルとして残すか、統計情報を追加するのが最良かもしれません。 - -> 我々はバッジからユーザーへの統計(例:バッジの数)を非正規化したいと考えることがあります。このデータセットに対して挿入時にディクショナリを使用する際の例とします。 - -### 投稿と投稿リンク {#posts-and-postlinks} - -`PostLinks`は、ユーザーが関連または重複していると考える`Posts`を接続します。以下のクエリは、スキーマとロードコマンドを示します: - -```sql -CREATE TABLE postlinks -( - `Id` UInt64, - `CreationDate` DateTime64(3, 'UTC'), - `PostId` Int32, - `RelatedPostId` Int32, - `LinkTypeId` Enum('Linked' = 1, 'Duplicate' = 3) -) -ENGINE = MergeTree -ORDER BY (PostId, RelatedPostId) - -INSERT INTO postlinks SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/postlinks.parquet') - -0 rows in set. Elapsed: 4.726 sec. Processed 6.55 million rows, 129.70 MB (1.39 million rows/s., 27.44 MB/s.) -``` - -非正規化を妨げる過剰なリンクが存在しないことを確認できます: - -```sql -SELECT PostId, count() AS c -FROM postlinks -GROUP BY PostId -ORDER BY c DESC LIMIT 5 - -┌───PostId─┬───c─┐ -│ 22937618 │ 125 │ -│ 9549780 │ 120 │ -│ 3737139 │ 109 │ -│ 18050071 │ 103 │ -│ 25889234 │ 82 │ -└──────────┴─────┘ -``` - -同様に、これらのリンクはあまり頻繁に発生しないイベントです: - -```sql -SELECT - round(avg(c)) AS avg_votes_per_hr, - round(avg(posts)) AS avg_posts_per_hr -FROM -( - SELECT - toStartOfHour(CreationDate) AS hr, - count() AS c, - uniq(PostId) AS posts - FROM postlinks - GROUP BY hr -) - -┌─avg_votes_per_hr─┬─avg_posts_per_hr─┐ -│ 54 │ 44 │ -└──────────────────┴──────────────────┘ -``` - -これを非正規化の例として使用します。 - -### 簡単な統計の例 {#simple-statistic-example} - -ほとんどの場合、非正規化は親行に単一のカラムまたは統計を追加することを必要とします。たとえば、単に投稿の複製された投稿の数で投稿を強化したいだけかもしれません。そうすれば、カラムを追加する必要があります。 - -```sql -CREATE TABLE posts_with_duplicate_count -( - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - ... -他のカラム - `DuplicatePosts` UInt16 -) ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -``` - -このテーブルをポピュレートするために、`INSERT INTO SELECT`を利用して、私たちの複製統計を投稿に結合します。 - -```sql -INSERT INTO posts_with_duplicate_count SELECT - posts.*, - DuplicatePosts -FROM posts AS posts -LEFT JOIN -( - SELECT PostId, countIf(LinkTypeId = 'Duplicate') AS DuplicatePosts - FROM postlinks - GROUP BY PostId -) AS postlinks ON posts.Id = postlinks.PostId -``` - -### 一対多のリレーションシップのための複雑な型を活用する {#exploiting-complex-types-for-one-to-many-relationships} - -非正規化を行うためには、複雑な型を利用することが必要です。一対一のリレーションシップが非正規化される場合、カラム数が少ない場合、ユーザーはこれを単純に元の型で行として追加することができます。しかし、これは一般的に大きなオブジェクトには望ましくないことであり、一対多のリレーションシップには適用できません。 - -複雑なオブジェクトや一対多のリレーションシップの場合、ユーザーは以下を使用できます: - -- 名前付きタプル - 一連のカラムとして関連構造を表すことができます。 -- Array(Tuple)またはNested - 名前付きタプルの配列もしくは、各エントリがオブジェクトを表すNested。適用可能な一対多のリレーションシップ。 - -例として、`PostLinks`を`Posts`に非正規化する方法を示します。 - -各投稿は、前に示した`PostLinks`スキーマのように、他の投稿へのいくつかのリンクを含む可能性があります。ネストされたタイプとして、リンクされた投稿と重複投稿を次のように表現できます: - -```sql -SET flatten_nested=0 -CREATE TABLE posts_with_links -( - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - ... -他のカラム - `LinkedPosts` Nested(CreationDate DateTime64(3, 'UTC'), PostId Int32), - `DuplicatePosts` Nested(CreationDate DateTime64(3, 'UTC'), PostId Int32), -) ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -``` - -> `flatten_nested=0`を使用していることに注意してください。ネストされたデータのフラット化は無効にすることをお勧めします。 - -この非正規化を`INSERT INTO SELECT`を使用して、`OUTER JOIN`クエリを使って実行できます: - -```sql -INSERT INTO posts_with_links -SELECT - posts.*, - arrayMap(p -> (p.1, p.2), arrayFilter(p -> p.3 = 'Linked' AND p.2 != 0, Related)) AS LinkedPosts, - arrayMap(p -> (p.1, p.2), arrayFilter(p -> p.3 = 'Duplicate' AND p.2 != 0, Related)) AS DuplicatePosts -FROM posts -LEFT JOIN ( - SELECT - PostId, - groupArray((CreationDate, RelatedPostId, LinkTypeId)) AS Related - FROM postlinks - GROUP BY PostId -) AS postlinks ON posts.Id = postlinks.PostId - -0 rows in set. Elapsed: 155.372 sec. Processed 66.37 million rows, 76.33 GB (427.18 thousand rows/s., 491.25 MB/s.) -Peak memory usage: 6.98 GiB. -``` - -> ここにタイミングを注目してください。約2分で6600万行を非正規化することに成功しました。後で見るように、これはスケジュールすることができる操作です。 - -`groupArray`関数を使用して、`PostLinks`を各`PostId`ごとに配列にまとめたことに注意してください。これは、その後、2つのサブリストにフィルタリングされます:`LinkedPosts`と`DuplicatePosts`は、外部結合からの空の結果を除外します。 - -新しい非正規化された構造を確認するために、いくつかの行を選択できます: - -```sql -SELECT LinkedPosts, DuplicatePosts -FROM posts_with_links -WHERE (length(LinkedPosts) > 2) AND (length(DuplicatePosts) > 0) -LIMIT 1 -FORMAT Vertical - -Row 1: -────── -LinkedPosts: [('2017-04-11 11:53:09.583',3404508),('2017-04-11 11:49:07.680',3922739),('2017-04-11 11:48:33.353',33058004)] -DuplicatePosts: [('2017-04-11 12:18:37.260',3922739),('2017-04-11 12:18:37.260',33058004)] -``` - -## 非正規化の調整とスケジューリング {#orchestrating-and-scheduling-denormalization} - -### バッチ {#batch} - -非正規化を利用するためには、変換プロセスを行い、調整する必要があります。 - -以上で示したように、ClickHouseを使用して`INSERT INTO SELECT`を通じてデータがロードされた後にこの変換を実行することができます。これは定期的なバッチ変換に適しています。 - -ユーザーは、周期的なバッチロードプロセスが許容される場合、ClickHouse内でこれを調整するためのいくつかのオプションを持っています: - -- **[更新可能なマテリアライズドビュー](/materialized-view/refreshable-materialized-view)** - 更新可能なマテリアライズドビューを使用して、定期的にクエリをスケジュールし、結果をターゲットテーブルに送信できます。クエリが実行されると、ビューはターゲットテーブルを原子的に更新します。これはこの作業をスケジュールするためのClickHouseネイティブな手段を提供します。 -- **外部ツール** - [dbt](https://www.getdbt.com/)や[Airflow](https://airflow.apache.org/)などのツールを利用して、変換を定期的にスケジュールします。[dbtのClickHouse統合](/integrations/dbt)は、ターゲットテーブルの新しいバージョンが作成され、クエリを受け取るバージョンと原子的に交換されることを保証します([EXCHANGE](/sql-reference/statements/exchange)コマンドを介して)。 - -### ストリーミング {#streaming} - -ユーザーは、代わりにClickHouseの外部で挿入前に、[Apache Flink](https://flink.apache.org/)のようなストリーミング技術を使用してこれを行いたいかもしれません。あるいは、データが挿入される際にこのプロセスを実行するために、インクリメンタルな[マテリアライズドビュー](/guides/developer/cascading-materialized-views)を使用することもできます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md.hash deleted file mode 100644 index 40f92e8c310..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/denormalization.md.hash +++ /dev/null @@ -1 +0,0 @@ -a4794c14228d845a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md deleted file mode 100644 index 1235e57232a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -slug: '/data-modeling/overview' -title: 'データモデリングの概要' -description: 'データモデル作成の概要' -keywords: -- 'data modelling' -- 'schema design' -- 'dictionary' -- 'materialized view' -- 'data compression' -- 'denormalizing data' ---- - - - - -# データモデリング - -このセクションでは、ClickHouseにおけるデータモデリングについて説明し、以下のトピックを含みます。 - -| ページ | 説明 | -|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [スキーマ設計](/data-modeling/schema-design) | クエリ、データの更新、レイテンシ、ボリュームなどの要因を考慮し、最適なパフォーマンスのためのClickHouseスキーマ設計について説明します。 | -| [辞書](/dictionary) | クエリのパフォーマンスを向上させ、データを豊かにするために辞書を定義し、使用する方法についての説明です。 | -| [マテリアライズドビュウ](/materialized-views) | ClickHouseにおけるマテリアライズドビュウとリフレッシャブルマテリアライズドビュウに関する情報です。 | -| [プロジェクション](/data-modeling/projections) | ClickHouseにおけるプロジェクションに関する情報です。 | -| [データ圧縮](/data-compression/compression-in-clickhouse) | ClickHouseにおける様々な圧縮モードと、特定のデータタイプやワークロードに対して適切な圧縮方法を選択することでデータストレージとクエリパフォーマンスを最適化する方法について説明します。 | -| [データの非正規化](/data-modeling/denormalization) | 関連データを単一のテーブルに格納することでクエリパフォーマンスを向上させることを目的とした、ClickHouseで使用される非正規化アプローチについて説明します。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md.hash deleted file mode 100644 index 557418b4a08..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -ad46a332f2389db4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md deleted file mode 100644 index 5a056e34774..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md +++ /dev/null @@ -1,445 +0,0 @@ ---- -slug: '/data-modeling/projections' -title: 'Projections' -description: 'Projectionsとは、クエリのパフォーマンスを向上させるためにどのように使用できるか、およびMaterialized Viewsとの違いについて説明するページです。' -keywords: -- 'projection' -- 'projections' -- 'query optimization' ---- - -import projections_1 from '@site/static/images/data-modeling/projections_1.png'; -import projections_2 from '@site/static/images/data-modeling/projections_2.png'; -import Image from '@theme/IdealImage'; - -# プロジェクション - -## はじめに {#introduction} - -ClickHouseは、大量のデータに対する分析クエリをリアルタイムで高速化するさまざまなメカニズムを提供しています。そのようなメカニズムの1つが、_プロジェクション_を使用することです。プロジェクションは、関心のある属性によってデータの並べ替えを行うことでクエリを最適化します。これには次のようなものが含まれます: - -1. 完全な並べ替え -2. 元のテーブルのサブセットで別の順序 -3. 事前に計算された集約(Materialized Viewに似ています)が、集約に沿った順序を持ちます。 - -## プロジェクションはどのように機能しますか? {#how-do-projections-work} - -実際には、プロジェクションは元のテーブルに対する追加の隠れたテーブルと考えることができます。プロジェクションは異なる行の順序を持つことができるため、元のテーブルとは異なる主キーを持ち、自動的に増分的に集約値を事前計算することができます。その結果、プロジェクションを利用することでクエリの実行を高速化するための2つの「調整ノブ」が提供されます: - -- **主インデックスの適切な使用** -- **集約の事前計算** - -プロジェクションは、複数の行の順序を持ち、挿入時に集約を事前計算できる[Materialized Views](/materialized-views)と、ある意味似ています。プロジェクションは自動的に更新され、元のテーブルと同期されます。一方、Materialized Viewsは明示的に更新されます。クエリが元のテーブルをターゲットにすると、ClickHouseは自動的に主キーをサンプリングし、同じ正しい結果を生成できるテーブルを選択しますが、読み取るデータの量が最も少ないものを選びます。以下の図に示すように: - -ClickHouseのプロジェクション - -## プロジェクションを使用するタイミングは? {#when-to-use-projections} - -プロジェクションは、自動的にデータが挿入されるため、新しいユーザーにとって魅力的な機能です。さらに、クエリは単一のテーブルに送信され、可能な限りプロジェクションを利用して応答時間を短縮できます。 - -これは、ユーザーが適切な最適化されたターゲットテーブルを選択する必要があるMaterialized Viewsとは対照的です。この場合、フィルターに応じてクエリを再構築する必要があります。これにより、ユーザーアプリケーションへの重要性が増し、クライアントサイドの複雑性が増加します。 - -これらの利点にもかかわらず、プロジェクションにはいくつかの固有の制限があり、ユーザーはこれを認識し、したがって慎重に展開すべきです。 - -- プロジェクションは、ソーステーブルと(隠れた)ターゲットテーブルに対して異なるTTLを使用することを許可しませんが、Materialized Viewsは異なるTTLを許可します。 -- プロジェクションは現在、(隠れた)ターゲットテーブルに対して `optimize_read_in_order` をサポートしていません。 -- プロジェクションを持つテーブルに対しては、軽量更新と削除がサポートされていません。 -- Materialized Viewsはチェーン化できます:1つのMaterialized Viewのターゲットテーブルは、別のMaterialized Viewのソーステーブルになり得ますが、これはプロジェクションでは不可能です。 -- プロジェクションは結合をサポートしていませんが、Materialized Viewsはサポートしています。 -- プロジェクションはフィルター(`WHERE`句)をサポートしていませんが、Materialized Viewsはサポートしています。 - -プロジェクションを使用することをお勧めするのは次のような場合です: - -- データの完全な再構成が必要な場合。プロジェクションの式は理論上 `GROUP BY` を使用できますが、集約を維持するにはMaterialized Viewsがより効果的です。クエリオプティマイザーは、単純な並べ替えを使用するプロジェクションを利用する可能性が高いです。つまり、`SELECT * ORDER BY x` のようになります。この式で、ストレージフットプリントを減らすために列のサブセットを選択できます。 -- ユーザーがストレージフットプリントの増加とデータの二重書き込みのオーバーヘッドに対して快適である場合。挿入速度に対する影響をテストし、[ストレージオーバーヘッドを評価する](/data-compression/compression-in-clickhouse)。 - -## 例 {#examples} - -### 主キーに含まれていないカラムでのフィルタリング {#filtering-without-using-primary-keys} - -この例では、テーブルにプロジェクションを追加する方法を示します。また、主キーに含まれていないカラムでフィルターを行うクエリを高速化するためにプロジェクションを使用できる方法も見ていきます。 - -この例では、`pickup_datetime` の順序で整理された New York Taxi Data データセットを使用します。このデータセットは [sql.clickhouse.com](https://sql.clickhouse.com/) で利用可能です。 - -では、$200以上チップを渡した乗客の全旅行IDを見つける簡単なクエリを書いてみましょう: - -```sql runnable -SELECT - tip_amount, - trip_id, - dateDiff('minutes', pickup_datetime, dropoff_datetime) AS trip_duration_min -FROM nyc_taxi.trips WHERE tip_amount > 200 AND trip_duration_min > 0 -ORDER BY tip_amount, trip_id ASC -``` - -`ORDER BY` に含まれていない `tip_amount` でフィルタリングしているため、ClickHouseは全行スキャンを行う必要があったことに注意してください。このクエリを高速化しましょう。 - -元のテーブルと結果を保持するために、新しいテーブルを作成し、`INSERT INTO SELECT` を使用してデータをコピーします: - -```sql -CREATE TABLE nyc_taxi.trips_with_projection AS nyc_taxi.trips; -INSERT INTO nyc_taxi.trips_with_projection SELECT * FROM nyc_taxi.trips; -``` - -プロジェクションを追加するには、`ALTER TABLE` ステートメントと `ADD PROJECTION` ステートメントを使用します: - -```sql -ALTER TABLE nyc_taxi.trips_with_projection -ADD PROJECTION prj_tip_amount -( - SELECT * - ORDER BY tip_amount, dateDiff('minutes', pickup_datetime, dropoff_datetime) -) -``` - -プロジェクションを追加した後、`MATERIALIZE PROJECTION` ステートメントを使用して、指定されたクエリに従って物理的にデータが順序づけられて書き直される必要があります: - -```sql -ALTER TABLE nyc.trips_with_projection MATERIALIZE PROJECTION prj_tip_amount -``` - -プロジェクションを追加したので、クエリを再度実行しましょう: - -```sql runnable -SELECT - tip_amount, - trip_id, - dateDiff('minutes', pickup_datetime, dropoff_datetime) AS trip_duration_min -FROM nyc_taxi.trips_with_projection WHERE tip_amount > 200 AND trip_duration_min > 0 -ORDER BY tip_amount, trip_id ASC -``` - -クエリ時間を大幅に短縮でき、スキャンする行数が少なくて済んだことに気づくでしょう。 - -上記のクエリが実際に作成したプロジェクションを使用したことを確認するために、`system.query_log` テーブルをクエリします: - -```sql -SELECT query, projections -FROM system.query_log -WHERE query_id='' -``` - -```response - ┌─query─────────────────────────────────────────────────────────────────────────┬─projections──────────────────────┐ - │ SELECT ↴│ ['default.trips.prj_tip_amount'] │ - │↳ tip_amount, ↴│ │ - │↳ trip_id, ↴│ │ - │↳ dateDiff('minutes', pickup_datetime, dropoff_datetime) AS trip_duration_min↴│ │ - │↳FROM trips WHERE tip_amount > 200 AND trip_duration_min > 0 │ │ - └───────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────┘ -``` - -### UKの支払額クエリを高速化するためのプロジェクションの使用 {#using-projections-to-speed-up-UK-price-paid} - -プロジェクションがクエリパフォーマンスを高速化するためにどのように使用できるかを示すために、実際のデータセットを使用した例を見てみましょう。この例では、私たちの[UK Property Price Paid](https://clickhouse.com/docs/getting-started/example-datasets/uk-price-paid) チュートリアルのテーブルを使用します。これは3003万行のデータセットです。このデータセットは、私たちの[sql.clickhouse.com](https://sql.clickhouse.com/?query_id=6IDMHK3OMR1C97J6M9EUQS)環境内でも利用できます。 - -テーブルが作成され、データが挿入される方法を確認したい場合は、["UK不動産価格データセット"](/getting-started/example-datasets/uk-price-paid) ページを参照してください。 - -このデータセットに対して2つの簡単なクエリを実行できます。最初のクエリはロンドン内の支払いが最も高い郡をリストし、2番目は郡の平均価格を計算します: - -```sql runnable -SELECT - county, - price -FROM uk.uk_price_paid -WHERE town = 'LONDON' -ORDER BY price DESC -LIMIT 3 -``` - -```sql runnable -SELECT - county, - avg(price) -FROM uk.uk_price_paid -GROUP BY county -ORDER BY avg(price) DESC -LIMIT 3 -``` - -注意してください。両方のクエリの結果、30.03百万行全体のフルテーブルスキャンが発生しました。これは、テーブルを作成したときに `town` および `price` が `ORDER BY` ステートメントに含まれていなかったためです: - -```sql -CREATE TABLE uk.uk_price_paid -( - ... -) -ENGINE = MergeTree ---highlight-next-line -ORDER BY (postcode1, postcode2, addr1, addr2); -``` - -プロジェクションを使用してこのクエリを高速化できるか見てみましょう。 - -元のテーブルと結果を保持するために、新しいテーブルを作成し、`INSERT INTO SELECT` を使用してデータをコピーします: - -```sql -CREATE TABLE uk.uk_price_paid_with_projections AS uk_price_paid; -INSERT INTO uk.uk_price_paid_with_projections SELECT * FROM uk.uk_price_paid; -``` - -`prj_obj_town_price`というプロジェクションを作成し、町と価格で並べ替えた主キーを持つ追加の(隠れた)テーブルを生成します。これにより、特定の町での支払額が最も高い郡をリスト化するクエリを最適化します: - -```sql -ALTER TABLE uk.uk_price_paid_with_projections - (ADD PROJECTION prj_obj_town_price - ( - SELECT * - ORDER BY - town, - price - )) -``` - -```sql -ALTER TABLE uk.uk_price_paid_with_projections - (MATERIALIZE PROJECTION prj_obj_town_price) -SETTINGS mutations_sync = 1 -``` - -[`mutations_sync`](/operations/settings/settings#mutations_sync) 設定は、同期実行を強制するために使用されます。 - -`prj_gby_county`という別のプロジェクションを作成し、既存の130のイギリスの郡のaverage(price)集約値を段階的に事前計算する追加の(隠れた)テーブルを構築します: - -```sql -ALTER TABLE uk.uk_price_paid_with_projections - (ADD PROJECTION prj_gby_county - ( - SELECT - county, - avg(price) - GROUP BY county - )) -``` -```sql -ALTER TABLE uk.uk_price_paid_with_projections - (MATERIALIZE PROJECTION prj_gby_county) -SETTINGS mutations_sync = 1 -``` - -:::note -プロジェクションに `GROUP BY` 句が使用されている場合(上記の `prj_gby_county` プロジェクションのように)、その隠れたテーブルの基になるストレージエンジンは `AggregatingMergeTree` となり、すべての集約関数が `AggregateFunction` に変換されます。これは、適切な増分データ集約を保証します。 -::: - -下の図は、主テーブル `uk_price_paid_with_projections` とその2つのプロジェクションの可視化です: - -主テーブルuk_price_paid_with_projectionsとその2つのプロジェクションの可視化 - -ロンドンの支払いが最も高い価格の郡をリスト化するクエリを再度実行すると、クエリパフォーマンスに改善が見られます: - -```sql runnable -SELECT - county, - price -FROM uk.uk_price_paid_with_projections -WHERE town = 'LONDON' -ORDER BY price DESC -LIMIT 3 -``` - -同様に、イギリスの郡での平均支払額が最も高い3つをリスト화するクエリについても: - -```sql runnable -SELECT - county, - avg(price) -FROM uk.uk_price_paid_with_projections -GROUP BY county -ORDER BY avg(price) DESC -LIMIT 3 -``` - -両方のクエリは元のテーブルをターゲットにし、また両方のクエリはフルテーブルスキャンを行ったことに注意してください(30.03百万行すべてがディスクからストリーミングされました)。プロジェクションを2つ作成する前に。 - -また、ロンドンの支払いが最も高い価格の郡をリスト化するクエリは2.17百万行をストリーミングしています。直接、最適化された2つ目のテーブルを使用した場合、ディスクからストリーミングされたのはわずか81.92千行でした。 - -この差の理由は、現在、上記の `optimize_read_in_order` 最適化がプロジェクションにはサポートされていないためです。 - -`system.query_log` テーブルを調べると、ClickHouseが上記の2つのクエリに対して自動的に2つのプロジェクションを使用したことがわかります(下のプロジェクション列を参照): - -```sql -SELECT - tables, - query, - query_duration_ms::String || ' ms' AS query_duration, - formatReadableQuantity(read_rows) AS read_rows, - projections -FROM clusterAllReplicas(default, system.query_log) -WHERE (type = 'QueryFinish') AND (tables = ['default.uk_price_paid_with_projections']) -ORDER BY initial_query_start_time DESC - LIMIT 2 -FORMAT Vertical -``` - -```response -Row 1: -────── -tables: ['uk.uk_price_paid_with_projections'] -query: SELECT - county, - avg(price) -FROM uk_price_paid_with_projections -GROUP BY county -ORDER BY avg(price) DESC -LIMIT 3 -query_duration: 5 ms -read_rows: 132.00 -projections: ['uk.uk_price_paid_with_projections.prj_gby_county'] - -Row 2: -────── -tables: ['uk.uk_price_paid_with_projections'] -query: SELECT - county, - price -FROM uk_price_paid_with_projections -WHERE town = 'LONDON' -ORDER BY price DESC -LIMIT 3 -SETTINGS log_queries=1 -query_duration: 11 ms -read_rows: 2.29 million -projections: ['uk.uk_price_paid_with_projections.prj_obj_town_price'] - -2 rows in set. Elapsed: 0.006 sec. -``` - -### さらなる例 {#further-examples} - -以下の例では、同じUK価格データセットを使用して、プロジェクションありとなしのクエリを対比させます。 - -オリジナルテーブル(およびパフォーマンス)を保存するために、再度 `CREATE AS` と `INSERT INTO SELECT` を使用してテーブルのコピーを作成します。 - -```sql -CREATE TABLE uk.uk_price_paid_with_projections_v2 AS uk.uk_price_paid; -INSERT INTO uk.uk_price_paid_with_projections_v2 SELECT * FROM uk.uk_price_paid; -``` - -#### プロジェクションを構築 {#build-projection} - -`toYear(date)`、`district`、`town` の次元ごとに集約プロジェクションを作成します: - -```sql -ALTER TABLE uk.uk_price_paid_with_projections_v2 - ADD PROJECTION projection_by_year_district_town - ( - SELECT - toYear(date), - district, - town, - avg(price), - sum(price), - count() - GROUP BY - toYear(date), - district, - town - ) -``` - -既存のデータに対してプロジェクションをポピュレートします。(物理的に指定された順序でデータは書き直されません。これにより、新たに挿入されたデータのみに対してプロジェクションが作成されます): - -```sql -ALTER TABLE uk.uk_price_paid_with_projections_v2 - MATERIALIZE PROJECTION projection_by_year_district_town -SETTINGS mutations_sync = 1 -``` - -以下のクエリは、プロジェクションの有無によるパフォーマンスの対比です。プロジェクションを強制的に無効にするには、設定 [`optimize_use_projections`](/operations/settings/settings#optimize_use_projections) を使用します。これはデフォルトで有効になっています。 - -#### クエリ1. 年ごとの平均価格 {#average-price-projections} - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 1000000, 80) -FROM uk.uk_price_paid_with_projections_v2 -GROUP BY year -ORDER BY year ASC -SETTINGS optimize_use_projections=0 -``` - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 1000000, 80) -FROM uk.uk_price_paid_with_projections_v2 -GROUP BY year -ORDER BY year ASC -``` -結果は同様であるべきですが、後者の例の方がパフォーマンスが向上します! - - -#### クエリ2. ロンドン年ごとの平均価格 {#average-price-london-projections} - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 2000000, 100) -FROM uk.uk_price_paid_with_projections_v2 -WHERE town = 'LONDON' -GROUP BY year -ORDER BY year ASC -SETTINGS optimize_use_projections=0 -``` - - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 2000000, 100) -FROM uk.uk_price_paid_with_projections_v2 -WHERE town = 'LONDON' -GROUP BY year -ORDER BY year ASC -``` - -#### クエリ3. 最も高価な地区 {#most-expensive-neighborhoods-projections} - -条件 (date >= '2020-01-01') は、プロジェクションの次元 (toYear(date) >= 2020) に一致するように変更する必要があります: - -```sql runnable -SELECT - town, - district, - count() AS c, - round(avg(price)) AS price, - bar(price, 0, 5000000, 100) -FROM uk.uk_price_paid_with_projections_v2 -WHERE toYear(date) >= 2020 -GROUP BY - town, - district -HAVING c >= 100 -ORDER BY price DESC -LIMIT 100 -SETTINGS optimize_use_projections=0 -``` - -```sql runnable -SELECT - town, - district, - count() AS c, - round(avg(price)) AS price, - bar(price, 0, 5000000, 100) -FROM uk.uk_price_paid_with_projections_v2 -WHERE toYear(date) >= 2020 -GROUP BY - town, - district -HAVING c >= 100 -ORDER BY price DESC -LIMIT 100 -``` - -再び、結果は同じですが、2番目のクエリのクエリパフォーマンスの改善に気づいてください。 - -## 関連コンテンツ {#related-content} -- [ClickHouseにおける主インデックスの実用的な導入](/guides/best-practices/sparse-primary-indexes#option-3-projections) -- [Materialized Views](/materialized-views) -- [ALTER PROJECTION](/sql-reference/statements/alter/projection) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md.hash deleted file mode 100644 index beaa1eb0e6f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/projections.md.hash +++ /dev/null @@ -1 +0,0 @@ -fc1ba9ffdc610418 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md deleted file mode 100644 index 83cb7822268..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -slug: '/data-modeling/schema-design' -title: 'スキーマ設計' -description: 'クエリパフォーマンスの最適化のためのClickHouseスキーマ' -keywords: -- 'schema' -- 'schema design' -- 'query optimization' ---- - -import stackOverflowSchema from '@site/static/images/data-modeling/stackoverflow-schema.png'; -import schemaDesignIndices from '@site/static/images/data-modeling/schema-design-indices.png'; -import Image from '@theme/IdealImage'; - -Understanding effective schema design is key to optimizing ClickHouse performance and includes choices that often involve trade-offs, with the optimal approach depending on the queries being served as well as factors such as data update frequency, latency requirements, and data volume. This guide provides an overview of schema design best practices and data modeling techniques for optimizing ClickHouse performance. -## Stack Overflow dataset {#stack-overflow-dataset} - -For the examples in this guide, we use a subset of the Stack Overflow dataset. This contains every post, vote, user, comment and badge that has occurred on Stack Overflow from 2008 to Apr 2024. This data is available in Parquet using the schemas below under the S3 bucket `s3://datasets-documentation/stackoverflow/parquet/`: - -> The primary keys and relationships indicated are not enforced through constraints (Parquet is file not table format) and purely indicate how the data is related and the unique keys it possesses. - -Stack Overflow Schema - -
- -The Stack Overflow dataset contains a number of related tables. In any data modeling task, we recommend users focus on loading their primary table first. This may not necessarily be the largest table but rather the one on which you expect to receive most analytical queries. This will allow you to familiarize yourself with the main ClickHouse concepts and types, especially important if coming from a predominantly OLTP background. This table may require remodeling as additional tables are added to fully exploit ClickHouse features and obtain optimal performance. - -The above schema is intentionally not optimal for the purposes of this guide. -## Establish initial schema {#establish-initial-schema} - -Since the `posts` table will be the target for most analytics queries, we focus on establishing a schema for this table. This data is available in the public S3 bucket `s3://datasets-documentation/stackoverflow/parquet/posts/*.parquet` with a file per year. - -> Loading data from S3 in Parquet format represents the most common and preferred way to load data into ClickHouse. ClickHouse is optimized for processing Parquet and can potentially read and insert 10s of millions of rows from S3 per second. - -ClickHouse provides a schema inference capability to automatically identify the types for a dataset. This is supported for all data formats, including Parquet. We can exploit this feature to identify the ClickHouse types for the data via s3 table function and[`DESCRIBE`](/sql-reference/statements/describe-table) command. Note below we use the glob pattern `*.parquet` to read all files in the `stackoverflow/parquet/posts` folder. - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') -SETTINGS describe_compact_output = 1 - -┌─name──────────────────┬─type───────────────────────────┐ -│ Id │ Nullable(Int64) │ -│ PostTypeId │ Nullable(Int64) │ -│ AcceptedAnswerId │ Nullable(Int64) │ -│ CreationDate │ Nullable(DateTime64(3, 'UTC')) │ -│ Score │ Nullable(Int64) │ -│ ViewCount │ Nullable(Int64) │ -│ Body │ Nullable(String) │ -│ OwnerUserId │ Nullable(Int64) │ -│ OwnerDisplayName │ Nullable(String) │ -│ LastEditorUserId │ Nullable(Int64) │ -│ LastEditorDisplayName │ Nullable(String) │ -│ LastEditDate │ Nullable(DateTime64(3, 'UTC')) │ -│ LastActivityDate │ Nullable(DateTime64(3, 'UTC')) │ -│ Title │ Nullable(String) │ -│ Tags │ Nullable(String) │ -│ AnswerCount │ Nullable(Int64) │ -│ CommentCount │ Nullable(Int64) │ -│ FavoriteCount │ Nullable(Int64) │ -│ ContentLicense │ Nullable(String) │ -│ ParentId │ Nullable(String) │ -│ CommunityOwnedDate │ Nullable(DateTime64(3, 'UTC')) │ -│ ClosedDate │ Nullable(DateTime64(3, 'UTC')) │ -└───────────────────────┴────────────────────────────────┘ -``` - -> The [s3 table function](/sql-reference/table-functions/s3) allows data in S3 to be queried in-place from ClickHouse. This function is compatible with all of the file formats ClickHouse supports. - -This provides us with an initial non-optimized schema. By default, ClickHouse maps these to equivalent Nullable types. We can create a ClickHouse table using these types with a simple `CREATE EMPTY AS SELECT` command. - -```sql -CREATE TABLE posts -ENGINE = MergeTree -ORDER BY () EMPTY AS -SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') -``` - -A few important points: - -Our posts table is empty after running this command. No data has been loaded. -We have specified the MergeTree as our table engine. MergeTree is the most common ClickHouse table engine you will likely use. It's the multi-tool in your ClickHouse box, capable of handling PB of data, and serves most analytical use cases. Other table engines exist for use cases such as CDC which need to support efficient updates. - -The clause `ORDER BY ()` means we have no index, and more specifically no order in our data. More on this later. For now, just know all queries will require a linear scan. - -To confirm the table has been created: - -```sql -SHOW CREATE TABLE posts - -CREATE TABLE posts -( - `Id` Nullable(Int64), - `PostTypeId` Nullable(Int64), - `AcceptedAnswerId` Nullable(Int64), - `CreationDate` Nullable(DateTime64(3, 'UTC')), - `Score` Nullable(Int64), - `ViewCount` Nullable(Int64), - `Body` Nullable(String), - `OwnerUserId` Nullable(Int64), - `OwnerDisplayName` Nullable(String), - `LastEditorUserId` Nullable(Int64), - `LastEditorDisplayName` Nullable(String), - `LastEditDate` Nullable(DateTime64(3, 'UTC')), - `LastActivityDate` Nullable(DateTime64(3, 'UTC')), - `Title` Nullable(String), - `Tags` Nullable(String), - `AnswerCount` Nullable(Int64), - `CommentCount` Nullable(Int64), - `FavoriteCount` Nullable(Int64), - `ContentLicense` Nullable(String), - `ParentId` Nullable(String), - `CommunityOwnedDate` Nullable(DateTime64(3, 'UTC')), - `ClosedDate` Nullable(DateTime64(3, 'UTC')) -) -ENGINE = MergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') -ORDER BY tuple() -``` - -With our initial schema defined, we can populate the data using an `INSERT INTO SELECT`, reading the data using the s3 table function. The following loads the `posts` data in around 2 mins on an 8-core ClickHouse Cloud instance. - -```sql -INSERT INTO posts SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') - -0 rows in set. Elapsed: 148.140 sec. Processed 59.82 million rows, 38.07 GB (403.80 thousand rows/s., 257.00 MB/s.) -``` - -> The above query loads 60m rows. While small for ClickHouse, users with slower internet connections may wish to load a subset of data. This can be achieved by simply specifying the years they wish to load via a glob pattern e.g. `https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/2008.parquet` or `https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/{2008, 2009}.parquet`. See [here](/sql-reference/table-functions/file#globs-in-path) for how glob patterns can be used to target subsets of files. -## Optimizing Types {#optimizing-types} - -One of the secrets to ClickHouse query performance is compression. - -Less data on disk means less I/O and thus faster queries and inserts. The overhead of any compression algorithm with respect to CPU will in most cases be out weighted by the reduction in IO. Improving the compression of the data should therefore be the first focus when working on ensuring ClickHouse queries are fast. - -> For why ClickHouse compresses data so well, we recommend [this article](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema). In summary, as a column-oriented database, values will be written in column order. If these values are sorted, the same values will be adjacent to each other. Compression algorithms exploit contiguous patterns of data. On top of this, ClickHouse has codecs and granular data types which allow users to tune the compression techniques further. - -Compression in ClickHouse will be impacted by 3 main factors: the ordering key, the data types, and any codecs used. All of these are configured through the schema. - -The largest initial improvement in compression and query performance can be obtained through a simple process of type optimization. A few simple rules can be applied to optimize the schema: - -- **Use strict types** - Our initial schema used Strings for many columns which are clearly numerics. Usage of the correct types will ensure the expected semantics when filtering and aggregating. The same applies to date types, which have been correctly provided in the Parquet files. -- **Avoid Nullable Columns** - By default the above columns have been assumed to be Null. The Nullable type allows queries to determine the difference between an empty and Null value. This creates a separate column of UInt8 type. This additional column has to be processed every time a user works with a nullable column. This leads to additional storage space used and almost always negatively affects query performance. Only use Nullable if there is a difference between the default empty value for a type and Null. For example, a value of 0 for empty values in the `ViewCount` column will likely be sufficient for most queries and not impact results. If empty values should be treated differently, they can often also be excluded from queries with a filter. -Use the minimal precision for numeric types - ClickHouse has a number of numeric types designed for different numeric ranges and precision. Always aim to minimize the number of bits used to represent a column. As well as integers of different size e.g. Int16, ClickHouse offers unsigned variants whose minimum value is 0. These can allow fewer bits to be used for a column e.g. UInt16 has a maximum value of 65535, twice that of an Int16. Prefer these types over larger signed variants if possible. -- **Minimal precision for date types** - ClickHouse supports a number of date and datetime types. Date and Date32 can be used for storing pure dates, with the latter supporting a larger date range at the expense of more bits. DateTime and DateTime64 provide support for date times. DateTime is limited to second granularity and uses 32 bits. DateTime64, as the name suggests, uses 64 bits but provides support up to nanosecond granularity. As ever, choose the more coarse version acceptable for queries, minimizing the number of bits needed. -- **Use LowCardinality** - Numbers, strings, Date or DateTime columns with a low number of unique values can potentially be encoded using the LowCardinality type. This dictionary encodes values, reducing the size on disk. Consider this for columns with less than 10k unique values. -FixedString for special cases - Strings which have a fixed length can be encoded with the FixedString type e.g. language and currency codes. This is efficient when data has the length of precisely N bytes. In all other cases, it is likely to reduce efficiency and LowCardinality is preferred. -- **Enums for data validation** - The Enum type can be used to efficiently encode enumerated types. Enums can either be 8 or 16 bits, depending on the number of unique values they are required to store. Consider using this if you need either the associated validation at insert time (undeclared values will be rejected) or wish to perform queries which exploit a natural ordering in the Enum values e.g. imagine a feedback column containing user responses `Enum(':(' = 1, ':|' = 2, ':)' = 3)`. - -> Tip: To find the range of all columns, and the number of distinct values, users can use the simple query `SELECT * APPLY min, * APPLY max, * APPLY uniq FROM table FORMAT Vertical`. We recommend performing this over a smaller subset of the data as this can be expensive. This query requires numerics to be at least defined as such for an accurate result i.e. not a String. - -By applying these simple rules to our posts table, we can identify an optimal type for each column: - -| Column | Is Numeric | Min, Max | Unique Values | Nulls | Comment | Optimized Type | -|------------------------|------------|------------------------------------------------------------------------|----------------|--------|----------------------------------------------------------------------------------------------|------------------------------------------| -| `PostTypeId` | Yes | 1, 8 | 8 | No | | `Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8)` | -| `AcceptedAnswerId` | Yes | 0, 78285170 | 12282094 | Yes | Differentiate Null with 0 value | UInt32 | -| `CreationDate` | No | 2008-07-31 21:42:52.667000000, 2024-03-31 23:59:17.697000000 | - | No | Millisecond granularity is not required, use DateTime | DateTime | -| `Score` | Yes | -217, 34970 | 3236 | No | | Int32 | -| `ViewCount` | Yes | 2, 13962748 | 170867 | No | | UInt32 | -| `Body` | No | - | - | No | | String | -| `OwnerUserId` | Yes | -1, 4056915 | 6256237 | Yes | | Int32 | -| `OwnerDisplayName` | No | - | 181251 | Yes | Consider Null to be empty string | String | -| `LastEditorUserId` | Yes | -1, 9999993 | 1104694 | Yes | 0 is an unused value can be used for Nulls | Int32 | -| `LastEditorDisplayName` | No | - | 70952 | Yes | Consider Null to be an empty string. Tested LowCardinality and no benefit | String | -| `LastEditDate` | No | 2008-08-01 13:24:35.051000000, 2024-04-06 21:01:22.697000000 | - | No | Millisecond granularity is not required, use DateTime | DateTime | -| `LastActivityDate` | No | 2008-08-01 12:19:17.417000000, 2024-04-06 21:01:22.697000000 | - | No | Millisecond granularity is not required, use DateTime | DateTime | -| `Title` | No | - | - | No | Consider Null to be an empty string | String | -| `Tags` | No | - | - | No | Consider Null to be an empty string | String | -| `AnswerCount` | Yes | 0, 518 | 216 | No | Consider Null and 0 to same | UInt16 | -| `CommentCount` | Yes | 0, 135 | 100 | No | Consider Null and 0 to same | UInt8 | -| `FavoriteCount` | Yes | 0, 225 | 6 | Yes | Consider Null and 0 to same | UInt8 | -| `ContentLicense` | No | - | 3 | No | LowCardinality outperforms FixedString | LowCardinality(String) | -| `ParentId` | No | - | 20696028 | Yes | Consider Null to be an empty string | String | -| `CommunityOwnedDate` | No | 2008-08-12 04:59:35.017000000, 2024-04-01 05:36:41.380000000 | - | Yes | Consider default 1970-01-01 for Nulls. Millisecond granularity is not required, use DateTime | DateTime | -| `ClosedDate` | No | 2008-09-04 20:56:44, 2024-04-06 18:49:25.393000000 | - | Yes | Consider default 1970-01-01 for Nulls. Millisecond granularity is not required, use DateTime | DateTime | - -
- -The above gives us the following schema: - -```sql -CREATE TABLE posts_v2 -( - `Id` Int32, - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime, - `Score` Int32, - `ViewCount` UInt32, - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime, - `LastActivityDate` DateTime, - `Title` String, - `Tags` String, - `AnswerCount` UInt16, - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense`LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime, - `ClosedDate` DateTime -) -ENGINE = MergeTree -ORDER BY tuple() -COMMENT 'Optimized types' -``` - -We can populate this with a simple `INSERT INTO SELECT`, reading the data from our previous table and inserting into this one: - -```sql -INSERT INTO posts_v2 SELECT * FROM posts - -0 rows in set. Elapsed: 146.471 sec. Processed 59.82 million rows, 83.82 GB (408.40 thousand rows/s., 572.25 MB/s.) -``` - -We don't retain any nulls in our new schema. The above insert converts these implicitly to default values for their respective types - 0 for integers and an empty value for strings. ClickHouse also automatically converts any numerics to their target precision. -Primary (Ordering) Keys in ClickHouse -Users coming from OLTP databases often look for the equivalent concept in ClickHouse. -## Choosing an ordering key {#choosing-an-ordering-key} - -At the scale at which ClickHouse is often used, memory and disk efficiency are paramount. Data is written to ClickHouse tables in chunks known as parts, with rules applied for merging the parts in the background. In ClickHouse, each part has its own primary index. When parts are merged, then the merged part's primary indexes are also merged. The primary index for a part has one index entry per group of rows - this technique is called sparse indexing. - -Sparse Indexing in ClickHouse - -The selected key in ClickHouse will determine not only the index, but also order in which data is written on disk. Because of this, it can dramatically impact compression levels which can in turn affect query performance. An ordering key which causes the values of most columns to be written in contiguous order will allow the selected compression algorithm (and codecs) to compress the data more effectively. - -> All columns in a table will be sorted based on the value of the specified ordering key, regardless of whether they are included in the key itself. For instance, if `CreationDate` is used as the key, the order of values in all other columns will correspond to the order of values in the `CreationDate` column. Multiple ordering keys can be specified - this will order with the same semantics as an `ORDER BY` clause in a `SELECT` query. - -Some simple rules can be applied to help choose an ordering key. The following can sometimes be in conflict, so consider these in order. Users can identify a number of keys from this process, with 4-5 typically sufficient: - -- Select columns which align with your common filters. If a column is used frequently in `WHERE` clauses, prioritize including these in your key over those which are used less frequently. -Prefer columns which help exclude a large percentage of the total rows when filtered, thus reducing the amount of data which needs to be read. -- Prefer columns which are likely to be highly correlated with other columns in the table. This will help ensure these values are also stored contiguously, improving compression. -`GROUP BY` and `ORDER BY` operations for columns in the ordering key can be made more memory efficient. - -When identifying the subset of columns for the ordering key, declare the columns in a specific order. This order can significantly influence both the efficiency of the filtering on secondary key columns in queries, and the compression ratio for the table's data files. In general, it is best to order the keys in ascending order of cardinality. This should be balanced against the fact that filtering on columns that appear later in the ordering key will be less efficient than filtering on those that appear earlier in the tuple. Balance these behaviors and consider your access patterns (and most importantly test variants). -### Example {#example} - -Applying the above guidelines to our `posts` table, let's assume that our users wish to perform analytics which filter by date and post type e.g.: - -"Which questions had the most comments in the last 3 months". - -The query for this question using our earlier `posts_v2` table with optimized types but no ordering key: - -```sql -SELECT - Id, - Title, - CommentCount -FROM posts_v2 -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') -ORDER BY CommentCount DESC -LIMIT 3 - -┌───────Id─┬─Title─────────────────────────────────────────────────────────────┬─CommentCount─┐ -│ 78203063 │ How to avoid default initialization of objects in std::vector? │ 74 │ -│ 78183948 │ About memory barrier │ 52 │ -│ 77900279 │ Speed Test for Buffer Alignment: IBM's PowerPC results vs. my CPU │ 49 │ -└──────────┴───────────────────────────────────────────────────────────────────┴────────────── - -10 rows in set. Elapsed: 0.070 sec. Processed 59.82 million rows, 569.21 MB (852.55 million rows/s., 8.11 GB/s.) -Peak memory usage: 429.38 MiB. -``` - -> The query here is very fast even though all 60m rows have been linearly scanned - ClickHouse is just fast :) You'll have to trust us ordering keys is worth it at TB and PB scale! - -Lets select the columns `PostTypeId` and `CreationDate` as our ordering keys. - -Maybe in our case, we expect users to always filter by `PostTypeId`. This has a cardinality of 8 and represents the logical choice for the first entry in our ordering key. Recognizing date granularity filtering is likely to be sufficient (it will still benefit datetime filters) so we use `toDate(CreationDate)` as the 2nd component of our key. This will also produce a smaller index as a date can be represented by 16, speeding up filtering. Our final key entry is the `CommentCount` to assist with finding the most commented posts (the final sort). - -```sql -CREATE TABLE posts_v3 -( - `Id` Int32, - `PostTypeId` Enum('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime, - `Score` Int32, - `ViewCount` UInt32, - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime, - `LastActivityDate` DateTime, - `Title` String, - `Tags` String, - `AnswerCount` UInt16, - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime, - `ClosedDate` DateTime -) -ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -COMMENT 'Ordering Key' - ---populate table from existing table - -INSERT INTO posts_v3 SELECT * FROM posts_v2 - -0 rows in set. Elapsed: 158.074 sec. Processed 59.82 million rows, 76.21 GB (378.42 thousand rows/s., 482.14 MB/s.) -Peak memory usage: 6.41 GiB. - - -Our previous query improves the query response time by over 3x: - -SELECT - Id, - Title, - CommentCount -FROM posts_v3 -WHERE (CreationDate >= '2024-01-01') AND (PostTypeId = 'Question') -ORDER BY CommentCount DESC -LIMIT 3 - -10 rows in set. Elapsed: 0.020 sec. Processed 290.09 thousand rows, 21.03 MB (14.65 million rows/s., 1.06 GB/s.) -``` - -For users interested in the compression improvements achieved by using specific types and appropriate ordering keys, see [Compression in ClickHouse](/data-compression/compression-in-clickhouse). If users need to further improve compression we also recommend the section [Choosing the right column compression codec](/data-compression/compression-in-clickhouse#choosing-the-right-column-compression-codec). -## 次のステップ: データモデリング技術 {#next-data-modeling-techniques} - -これまでのところ、私たちは単一のテーブルのみを移行しました。これにより、いくつかの基本的なClickHouseの概念を紹介することができましたが、ほとんどのスキーマは残念ながらこれほど単純ではありません。 - -以下にリストされた他のガイドでは、最適なClickHouseクエリのために、より広範なスキーマを再構築するためのいくつかの技術を探ります。このプロセスを通じて、`Posts` を中央のテーブルとして維持し、多くの分析クエリがそこで実行されることを目的としています。他のテーブルも分離してクエリできますが、ほとんどの分析は `posts` の文脈で実行されると仮定します。 - -> このセクションでは、他のテーブルの最適化されたバリアントを使用します。これらのスキーマを提供しますが、簡潔さのために行った決定を省略します。これらは前述のルールに基づいており、決定の推測は読者に任せます。 - -以下のアプローチはすべて、読み取りの最適化とクエリパフォーマンスの向上のために、JOINの使用を最小限に抑えることを目指しています。JOINはClickHouseで完全にサポートされていますが、最適なパフォーマンスを達成するために、控えめに使用することをお勧めします(JOINクエリで2~3テーブルの使用は問題ありません)。 - -> ClickHouseには外部キーの概念がありません。これはJOINを禁止するものではありませんが、参照整合性はアプリケーションレベルでユーザーが管理する必要があります。ClickHouseのようなOLAPシステムでは、データの整合性はしばしばアプリケーションレベルまたはデータの取り込みプロセス中に管理され、データベース自体によって強制されることはなく、その場合はかなりのオーバーヘッドが発生します。このアプローチにより、柔軟性と迅速なデータ挿入が可能になります。これは、非常に大きなデータセットに対する読み取りおよび挿入クエリの速度とスケーラビリティに対するClickHouseの焦点と一致します。 - -クエリ時のJOINの使用を最小限に抑えるために、ユーザーはいくつかのツール/アプローチを持っています: - -- [**データの非正規化**](/data-modeling/denormalization) - テーブルを結合し、1:1の関係ではない複雑な型を使用してデータを非正規化します。これは通常、クエリ時から挿入時に任意のJOINを移動することを含みます。 -- [**辞書**](/dictionary) - 直接JOINとキー値の検索を処理するためのClickHouse特有の機能。 -- [**増分マテリアライズドビュー**](/materialized-view/incremental-materialized-view) - クエリ時の計算コストを挿入時にシフトするClickHouseの機能で、集計値を増分的に計算する能力を含みます。 -- [**リフレッシュ可能なマテリアライズドビュー**](/materialized-view/refreshable-materialized-view) - 他のデータベース製品で使用されるマテリアライズドビューに似ており、クエリの結果を定期的に計算し、結果をキャッシュすることができます。 - -これらの各アプローチを各ガイドで探求し、各アプローチが適切な場合を強調し、Stack Overflowデータセットの質問を解決する方法を示す例を提供します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md.hash deleted file mode 100644 index f5a7d94b3d5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/data-modeling/schema-design.md.hash +++ /dev/null @@ -1 +0,0 @@ -7e8fc072dbf0c319 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md deleted file mode 100644 index e45a250af04..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md +++ /dev/null @@ -1,459 +0,0 @@ ---- -slug: '/architecture/horizontal-scaling' -sidebar_label: 'スケーリングアウト' -sidebar_position: 10 -title: 'スケーリングアウト' -description: 'スケーラビリティを提供するために設計された例のアーキテクチャについて説明するページ' ---- - -import Image from '@theme/IdealImage'; -import ReplicationShardingTerminology from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md'; -import ConfigFileNote from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md'; -import scalingOut1 from '@site/static/images/deployment-guides/scaling-out-1.png'; - -## 説明 {#description} -この例のアーキテクチャは、スケーラビリティを提供するように設計されています。 それには、2つの統合されたClickHouseと調整(ClickHouse Keeper)サーバー、および3のクォーラムを完了するためのClickHouse Keeperのみの第三のサーバーが含まれています。この例では、データベース、テーブル、および両方のノードのデータをクエリできる分散テーブルを作成します。 - -## レベル: 基本 {#level-basic} - - - -## 環境 {#environment} -### アーキテクチャ図 {#architecture-diagram} - -2つのシャードと1つのレプリカのためのアーキテクチャ図 - -|Node|説明| -|----|-----------| -|`chnode1`|データ + ClickHouse Keeper| -|`chnode2`|データ + ClickHouse Keeper| -|`chnode3`|ClickHouse Keeperのクォーラム用| - -:::note -本番環境では、ClickHouse Keeperが専用ホストで実行されることを強くお勧めします。この基本構成では、ClickHouse Serverプロセス内でKeeper機能が実行されます。ClickHouse Keeperをスタンドアロンでデプロイするための手順は、[インストールドキュメント](/getting-started/install/install.mdx)で入手できます。 -::: - -## インストール {#install} - -[アーカイブタイプ](/getting-started/install/install.mdx)に関する手順に従って、3つのサーバーにClickHouseをインストールします(.deb、.rpm、.tar.gzなど)。この例では、ClickHouse ServerおよびClientのインストール手順をすべてのマシンで実行します。 - -## 設定ファイルの編集 {#editing-configuration-files} - - - -## chnode1 の設定 {#chnode1-configuration} - -`chnode1`には5つの設定ファイルがあります。これらのファイルを1つのファイルにまとめることもできますが、ドキュメントの明確さのために別々に見る方が簡単かもしれません。設定ファイルを読み進めると、`chnode1`と`chnode2`の間でほとんどの設定が同じであることがわかります。違いは強調表示されます。 - -### ネットワークおよびログ設定 {#network-and-logging-configuration} - -これらの値は希望に応じてカスタマイズできます。この例の構成では、1000Mでロールオーバーするデバッグログを提供します。ClickHouseはポート8123および9000のIPv4ネットワークでリッスンし、ポート9009をサーバー間通信に使用します。 - -```xml title="network-and-logging.xml on chnode1" - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 3 - - clickhouse - 0.0.0.0 - 8123 - 9000 - 9009 - - -### ClickHouse Keeper設定 {#clickhouse-keeper-configuration} - -ClickHouse Keeperは、データレプリケーションおよび分散DDLクエリの実行のための調整システムを提供します。ClickHouse KeeperはApache ZooKeeperと互換性があります。この設定では、ポート9181でClickHouse Keeperを有効にします。強調表示された行は、このKeeperインスタンスの`server_id`が1であることを示しています。これは、3つのサーバー間で`enable-keeper.xml`ファイルのただ一つの違いです。`chnode2`は`server_id`が2に、`chnode3`は`server_id`が3に設定されます。RAFTの構成セクションはすべてのサーバーで同じであり、以下にハイライトされています。 - -:::note -何らかの理由でKeeperノードが置き換えられるか再構築される場合、既存の`server_id`を再利用しないでください。例えば、`server_id`が`2`のKeeperノードが再構築される場合、`4`以上の`server_id`を設定してください。 -::: - -```xml title="enable-keeper.xml on chnode1" - - - 9181 - # highlight-next-line - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - # highlight-start - - 1 - chnode1 - 9234 - - # highlight-end - - 2 - chnode2 - 9234 - - - 3 - chnode3 - 9234 - - - - - -### マクロ設定 {#macros-configuration} - -マクロ`shard`および`replica`は、分散DDLの複雑さを軽減します。構成された値は自動的にDDLクエリで置換され、DDLの簡素化を図ります。この設定のマクロは、各ノードのシャード番号およびレプリカ番号を指定します。この2つのシャード1つのレプリカの例では、レプリカマクロは`chnode1`と`chnode2`の両方で`replica_1`です。シャードマクロは`chnode1`で`1`、`chnode2`で`2`です。 - -```xml title="macros.xml on chnode1" - - - # highlight-next-line - 1 - replica_1 - - - -### レプリケーションおよびシャーディング設定 {#replication-and-sharding-configuration} - -上から順に: -- XMLの`remote_servers`セクションは、環境内の各クラスタを指定します。属性`replace=true`は、デフォルトのClickHouse構成内のサンプル`remote_servers`をこのファイルに指定された`remote_servers`構成で置き換えます。この属性がない場合、このファイル内のリモートサーバーはデフォルトのサンプルのリストに追加されます。 -- この例では、`cluster_2S_1R`という名前のクラスタがあります。 -- クラスタ`cluster_2S_1R`のために、値`mysecretphrase`を持つシークレットが作成されます。このシークレットは、正しいサーバーが一緒に結合されることを確実にするために、環境内のすべてのリモートサーバーで共有されます。 -- クラスタ`cluster_2S_1R`は2つのシャードを持ち、それぞれのシャードは1つのレプリカを持っています。このドキュメントの最初にあるアーキテクチャ図を見て、それをXML内の2つの`shard`定義と比較してください。各シャード定義には1つのレプリカが存在します。その特定のシャードのためのレプリカです。そのレプリカのホストとポートが指定されています。この構成内の最初のシャードのレプリカは`chnode1`にストレージされ、2つ目のシャードのレプリカは`chnode2`にストレージされます。 -- シャードごとの内部レプリケーションは真に設定されています。各シャードは、設定ファイル内で`internal_replication`パラメーターを定義できます。このパラメーターが真に設定されている場合、書き込み操作は最初の健全なレプリカを選択し、そのレプリカにデータを書き込みます。 - -```xml title="remote-servers.xml on chnode1" - - - - mysecretphrase - - true - - chnode1 - 9000 - - - - true - - chnode2 - 9000 - - - - - - -### Keeperの使用設定 {#configuring-the-use-of-keeper} - -上述のいくつかのファイルでClickHouse Keeperが構成されました。この設定ファイル`use-keeper.xml`は、ClickHouse Serverがレプリケーションと分散DDLの調整のためにClickHouse Keeperを使用するように設定しています。このファイルは、ClickHouse Serverがポート9181でノード`chnode1`から`chnode3`でKeeperを使用することを指定しており、`chnode1`および`chnode2`で同じファイルです。 - -```xml title="use-keeper.xml on chnode1" - - - - chnode1 - 9181 - - - chnode2 - 9181 - - - chnode3 - 9181 - - - - -## chnode2 の設定 {#chnode2-configuration} - -`chnode1`と`chnode2`は非常に似た設定であるため、ここでは異なる部分のみを指摘します。 - -### ネットワークおよびログ設定 {#network-and-logging-configuration-1} - -```xml title="network-and-logging.xml on chnode2" - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 3 - - clickhouse - 0.0.0.0 - 8123 - 9000 - 9009 - - -### ClickHouse Keeper設定 {#clickhouse-keeper-configuration-1} - -このファイルは、`chnode1`と`chnode2`の間の2つの違いの1つを含んでいます。Keeper設定で`server_id`が2に設定されています。 - -```xml title="enable-keeper.xml on chnode2" - - - 9181 - # highlight-next-line - 2 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - - 1 - chnode1 - 9234 - - # highlight-start - - 2 - chnode2 - 9234 - - # highlight-end - - 3 - chnode3 - 9234 - - - - - -### マクロ設定 {#macros-configuration-1} - -マクロ設定は`chnode1`と`chnode2`間の違いの1つを持っています。このノードの`shard`は2に設定されています。 - -```xml title="macros.xml on chnode2" - - - # highlight-next-line - 2 - replica_1 - - - -### レプリケーションおよびシャーディング設定 {#replication-and-sharding-configuration-1} - -```xml title="remote-servers.xml on chnode2" - - - - mysecretphrase - - true - - chnode1 - 9000 - - - - true - - chnode2 - 9000 - - - - - - -### Keeperの使用設定 {#configuring-the-use-of-keeper-1} - -```xml title="use-keeper.xml on chnode2" - - - - chnode1 - 9181 - - - chnode2 - 9181 - - - chnode3 - 9181 - - - - -## chnode3 の設定 {#chnode3-configuration} - -`chnode3`はデータを保存せず、クォーラム内の第3のノードを提供するためにのみ使用されるため、`chnode3`には、ネットワークおよびログ設定用の1つとClickHouse Keeper用の1つの2つの構成ファイルしかありません。 - -### ネットワークおよびログ設定 {#network-and-logging-configuration-2} - -```xml title="network-and-logging.xml on chnode3" - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 3 - - clickhouse - 0.0.0.0 - 8123 - 9000 - 9009 - - -### ClickHouse Keeper設定 {#clickhouse-keeper-configuration-2} - -```xml title="enable-keeper.xml on chnode3" - - - 9181 - # highlight-next-line - 3 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - - 1 - chnode1 - 9234 - - - 2 - chnode2 - 9234 - - # highlight-start - - 3 - chnode3 - 9234 - - # highlight-end - - - - -## テスト {#testing} - -1. `chnode1`に接続し、上記で構成されたクラスタ`cluster_2S_1R`が存在することを確認します。 - -```sql title="Query" -SHOW CLUSTERS - -```response title="Response" -┌─cluster───────┐ -│ cluster_2S_1R │ -└───────────────┘ - -2. クラスタでデータベースを作成します。 - -```sql title="Query" -CREATE DATABASE db1 ON CLUSTER cluster_2S_1R - -```response title="Response" -┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode2 │ 9000 │ 0 │ │ 1 │ 0 │ -│ chnode1 │ 9000 │ 0 │ │ 0 │ 0 │ -└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -3. クラスタにMergeTreeテーブルエンジンを持つテーブルを作成します。 -:::note -テーブルエンジンのパラメータを指定する必要はありません。これらは自動的にマクロに基づいて定義されます。 -::: - -```sql title="Query" -CREATE TABLE db1.table1 ON CLUSTER cluster_2S_1R -( - `id` UInt64, - `column1` String -) -ENGINE = MergeTree -ORDER BY id - -```response title="Response" -┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode1 │ 9000 │ 0 │ │ 1 │ 0 │ -│ chnode2 │ 9000 │ 0 │ │ 0 │ 0 │ -└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -4. `chnode1`に接続して行を挿入します。 - -```sql title="Query" -INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc'); - -5. `chnode2`に接続して行を挿入します。 - -```sql title="Query" -INSERT INTO db1.table1 (id, column1) VALUES (2, 'def'); - -6. どちらかのノード、`chnode1`または`chnode2`に接続すると、そのノードのテーブルに挿入された行のみが表示されます。 -例えば、`chnode2`でのクエリ: - -```sql title="Query" -SELECT * FROM db1.table1; - -```response title="Response" -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ - -7. 両方のノードの両方のシャードをクエリするための分散テーブルを作成します。 -(この例では、`rand()`関数がシャーディングキーとして設定されており、各挿入をランダムに分配します。) - -```sql title="Query" -CREATE TABLE db1.table1_dist ON CLUSTER cluster_2S_1R -( - `id` UInt64, - `column1` String -) -ENGINE = Distributed('cluster_2S_1R', 'db1', 'table1', rand()) - -```response title="Response" -┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode2 │ 9000 │ 0 │ │ 1 │ 0 │ -│ chnode1 │ 9000 │ 0 │ │ 0 │ 0 │ -└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -8. `chnode1`または`chnode2`のいずれかに接続し、分散テーブルをクエリして両方の行を表示します。 - -```sql title="Query" -SELECT * FROM db1.table1_dist; - -```reponse title="Response" -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ - -## 詳細情報: {#more-information-about} - -- [分散テーブルエンジン](/engines/table-engines/special/distributed.md) -- [ClickHouse Keeper](/guides/sre/keeper/index.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md.hash deleted file mode 100644 index 8fd82edd48e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/horizontal-scaling.md.hash +++ /dev/null @@ -1 +0,0 @@ -c221f17aa0002fdd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md deleted file mode 100644 index 7b825ff1767..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -slug: '/deployment-guides/index' -title: 'デプロイメントガイドの概要' -description: 'デプロイメントとスケーリングセクションのランディングページ' ---- - - - - -# デプロイメントとスケーリング - -このセクションでは、以下のトピックについて説明します。 - -| トピック | -|------------------------------------------------------------------| -| [イントロダクション](/architecture/introduction) | -| [スケーリングアウト](/architecture/horizontal-scaling) | -| [障害耐性のためのレプリケーション](/architecture/replication) | -| [クラスターのデプロイメント](/architecture/cluster-deployment) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md.hash deleted file mode 100644 index 77ba247dae4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -aed0a6e41b60b100 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx deleted file mode 100644 index d77b284c8d9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx +++ /dev/null @@ -1,402 +0,0 @@ ---- -'slug': '/deployment-guides/parallel-replicas' -'title': '並列レプリカ' -'keywords': -- 'parallel replica' -'description': 'このガイドでは、まずClickHouseがどのように分散テーブルを介して複数のシャードにクエリを分配するかについて説明し、その後、クエリが実行のために複数のレプリカをどのように活用できるかについて説明します。' ---- - -import Image from '@theme/IdealImage'; -import BetaBadge from '@theme/badges/BetaBadge'; -import image_1 from '@site/static/images/deployment-guides/parallel-replicas-1.png' -import image_2 from '@site/static/images/deployment-guides/parallel-replicas-2.png' -import image_3 from '@site/static/images/deployment-guides/parallel-replicas-3.png' -import image_4 from '@site/static/images/deployment-guides/parallel-replicas-4.png' -import image_5 from '@site/static/images/deployment-guides/parallel-replicas-5.png' -import image_6 from '@site/static/images/deployment-guides/parallel-replicas-6.png' -import image_7 from '@site/static/images/deployment-guides/parallel-replicas-7.png' -import image_8 from '@site/static/images/deployment-guides/parallel-replicas-8.png' -import image_9 from '@site/static/images/deployment-guides/parallel-replicas-9.png' - - -## はじめに {#introduction} - -ClickHouseはクエリを非常に迅速に処理しますが、これらのクエリはどのように複数のサーバーに分散および並列化されるのでしょうか? - -> このガイドでは、まずClickHouseがどのように分散テーブルを介してクエリを複数のシャードに分配するか、次にクエリがその実行のために複数のレプリカをどのように活用できるかについて説明します。 -## シャーディングアーキテクチャ {#sharded-architecture} - -共有何もないアーキテクチャでは、クラスタは一般的に複数のシャードに分割され、各シャードには全データのサブセットが含まれます。分散テーブルはこれらのシャードの上に存在し、完全なデータの統一ビューを提供します。 - -読み取りはローカルテーブルに送信できます。クエリの実行は指定されたシャードだけで行われるか、分散テーブルに送信され、その場合は各シャードが指定されたクエリを実行します。分散テーブルがクエリされたサーバーは、データを集計し、クライアントに応答します: - -sharded archtiecture - -上の図は、クライアントが分散テーブルをクエリしたときに何が起こるかを示しています: - -
    -
  1. - SELECTクエリは、ノード上の分散テーブルにランダムに送信されます - (ラウンドロビン戦略を介して、またはロードバランサーによって特定のサーバーにルーティングされた後)。このノードは、今後コーディネーターとして機能します。 -
  2. -
  3. - ノードは、分散テーブルによって指定された情報を介して、クエリを実行する必要がある各シャードを特定し、クエリを各シャードに送信します。 -
  4. -
  5. - 各シャードはデータをローカルで読み、フィルタリングし、集計し、その後、コーディネーターにマージ可能な状態を返します。 -
  6. -
  7. - コーディネートノードはデータをマージし、クライアントに応答を送信します。 -
  8. -
- -レプリカが混ざる場合、プロセスはほぼ同様で、唯一の違いは各シャードからの単一のレプリカのみがクエリを実行することです。これにより、より多くのクエリを並列に処理できるようになります。 -## 非シャーディングアーキテクチャ {#non-sharded-architecture} - -ClickHouse Cloudは、上記のアーキテクチャとは非常に異なるアーキテクチャを持っています。 -(詳細については ["ClickHouse Cloud Architecture"](https://clickhouse.com/docs/cloud/reference/architecture) を参照してください)。計算とストレージの分離、および実質的に無限のストレージにより、シャードの必要性は重要性を減少させます。 - -以下の図はClickHouse Cloudのアーキテクチャを示しています: - -non sharded architecture - -このアーキテクチャでは、レプリカをほぼ瞬時に追加および削除でき、高いクラスターのスケーラビリティを確保します。ClickHouse Keeperクラスター(右に示されています)は、メタデータの単一の真実のソースを確保します。レプリカはClickHouse Keeperクラスターからメタデータを取得し、すべてが同じデータを維持します。データ自体はオブジェクトストレージに保存され、SSDキャッシュによりクエリが高速化されます。 - -ただし、クエリの実行を複数のサーバーに分散するには、どうすればよいのでしょうか? シャーディングアーキテクチャでは、各シャードがデータのサブセットに対してクエリを実行できるため、それは非常に明白でした。シャーディングがない場合、これはどのように機能するのでしょうか? -## 並列レプリカの導入 {#introducing-parallel-replicas} - -複数のサーバーを通じてクエリ実行を並列化するには、まずコーディネーターとして機能するサーバーを指定できる必要があります。コーディネーターは、実行される必要があるタスクのリストを作成し、それらがすべて実行され、集約され、結果がクライアントに返されることを保証します。ほとんどの分散システムと同様に、これは初期クエリを受け取ったノードの役割となります。また、作業の単位を定義する必要があります。シャーディングアーキテクチャでは、作業の単位はシャードであり、データのサブセットです。並列レプリカでは、[グラニュール](/guides/best-practices/sparse-primary-indexes#data-is-organized-into-granules-for-parallel-data-processing)と呼ばれるテーブルの小さな部分を作業の単位として使用します。 - -次に、以下の図を使って、実践でどのように機能するかを見てみましょう: - -Parallel replicas - -並列レプリカを使用すると: - -
    -
  1. - クライアントからのクエリは、ロードバランサーを通過した後、1つのノードに送信されます。このノードはこのクエリのコーディネーターになります。 -
  2. -
  3. - ノードは各パートのインデックスを分析し、処理すべき適切なパーツとグラニュールを選択します。 -
  4. -
  5. - コーディネーターは、異なるレプリカに割り当てることができるグラニュールのセットに作業負荷を分割します。 -
  6. -
  7. - 各グラニュールセットは対応するレプリカによって処理され、完了したときにマージ可能な状態がコーディネーターに送信されます。 -
  8. -
  9. - 最後に、コーディネーターはすべてのレプリカからの結果をマージし、クライアントに応答を返します。 -
  10. -
- -上記のステップは、理論における並列レプリカの機能を概説しています。 -しかし、実際には、そうしたロジックが完璧に機能することを妨げる多くの要因があります: - -
    -
  1. - 一部のレプリカが利用できない場合があります。 -
  2. -
  3. - ClickHouseにおけるレプリケーションは非同期であり、一部のレプリカは、ある時点で同じパーツを持っていないかもしれません。 -
  4. -
  5. - レプリカ間の遅延は何らかの方法で処理する必要があります。 -
  6. -
  7. - ファイルシステムキャッシュは各レプリカのアクティビティに基づいて異なるため、ランダムなタスク割り当てがキャッシュの局所性の観点から最適なパフォーマンスを実現できない可能性があります。 -
  8. -
- -これらの要因を克服する方法については、以下のセクションで探ります。 -### アナウンスメント {#announcements} - -上記のリストの(1)および(2)の問題に対処するために、アナウンスメントの概念を導入しました。以下の図を使って、これがどのように機能するかを視覚化してみましょう: - -Announcements - -
    -
  1. - クライアントからのクエリは、ロードバランサーを通過した後、1つのノードに送信されます。このノードがこのクエリのコーディネーターになります。 -
  2. -
  3. - コーディネートノードは、クラスター内のすべてのレプリカからアナウンスメントを取得するリクエストを送信します。レプリカは、テーブルの現在のパーツのセットに対してやや異なるビューを持つ可能性があります。そのため、正しくスケジュールされた決定を避けるためにこの情報を収集する必要があります。 -
  4. -
  5. - コーディネートノードはアナウンスメントを使用して、異なるレプリカに割り当てることができるグラニュールのセットを定義します。例えば、ここでは、パート3のグラニュールがレプリカ2に割り当てられなかったことが確認できます。なぜなら、このレプリカがそのアナウンスメントにこのパートを提供しなかったからです。また、レプリカ3にタスクが割り当てられなかったことにも注意してください。なぜなら、このレプリカがアナウンスメントを提供しなかったからです。 -
  6. -
  7. - 各レプリカが自分のグラニュールのサブセットに対してクエリを処理し、マージ可能な状態をコーディネーターに送信した後、コーディネーターは結果をマージし、応答をクライアントに送信します。 -
  8. -
-### 動的コーディネーション {#dynamic-coordination} - -遅延の問題に対処するために、動的コーディネーションを追加しました。これは、すべてのグラニュールが一度のリクエストでレプリカに送信されるのではなく、各レプリカがコーディネーターに新しいタスク(処理すべきグラニュールのセット)を要求できることを意味します。コーディネーターは、受信したアナウンスメントに基づいてレプリカにグラニュールセットを提供します。 - -すべてのレプリカがすべてのパーツでアナウンスメントを送信した段階にいると仮定しましょう。 - -以下の図は、動的コーディネーションがどのように機能するかを視覚化しています: - -Dynamic Coordination - part 1 - -
    -
  1. - レプリカは、コーディネーターノードにタスクを処理できることを知らせ、処理できる作業量を指定することもできます。 -
  2. -
  3. - コーディネーターはレプリカにタスクを割り当てます。 -
  4. -
- -Dynamic Coordination - part 2 - -
    -
  1. - レプリカ1と2は非常に迅速にタスクを完了します。レプリカは、コーディネーターからさらに別のタスクを要求します。 -
  2. -
  3. - コーディネーターは、レプリカ1と2に新しいタスクを割り当てます。 -
  4. -
- -Dynamic Coordination - part 3 - -
    -
  1. - すべてのレプリカはタスクの処理を完了しました。タスクをさらに要求します。 -
  2. -
  3. - コーディネーターはアナウンスメントを使用して、処理する残りのタスクを確認しますが、残りのタスクはありません。 -
  4. -
  5. - コーディネーターはレプリカにすべてが処理されたことを伝えます。これからマージ可能な状態をすべてマージし、クエリに応答します。 -
  6. -
-### キャッシュの局所性の管理 {#managing-cache-locality} - -最後の潜在的な問題は、キャッシュの局所性をどのように扱うかです。もしクエリが複数回実行される場合、どのようにして同じタスクを同じレプリカにルーティングするかを確保できるのでしょうか?前の例では、以下のタスクが割り当てられました: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
レプリカ 1レプリカ 2レプリカ 3
パート 1g1, g6, g7g2, g4, g5g3
パート 2g1g2, g4, g5g3
パート 3g1, g6g2, g4, g5g3
- -同じタスクが同じレプリカに割り当てられるようにするために、2つのことが行われます。パート + グラニュールのセット(タスク)のハッシュが計算されます。そして、タスク割り当てに対してレプリカ数の剰余が適用されます。 - -これは理論上は良いことに思えますが、実際には、一つのレプリカに突発的な負荷がかかるか、ネットワークの劣化が発生した場合、特定のタスクを実行するために一貫して使用される同じレプリカによって遅延が発生する可能性があります。`max_parallel_replicas`がレプリカ数より少ない場合、クエリの実行にはランダムなレプリカが選択されます。 -### タスクの奪取 {#task-stealing} - -もし一部のレプリカが他のレプリカよりタスクを処理するのが遅い場合、他のレプリカはそのレプリカに属するはずのタスクをハッシュで「奪う」ことを試みて、遅延を減少させます。 -### 制限事項 {#limitations} - -この機能には既知の制限がありますが、その主要なものはこのセクションに記載されています。 - -:::note -もし以下に示した制限のいずれでもない問題が発生し、並列レプリカが原因と思われる場合は、`comp-parallel-replicas`ラベルを使用してGitHubで問題をオープンしてください。 -::: - -| 制限事項 | 説明 | -|--------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 複雑なクエリ | 現在、並列レプリカは単純なクエリにはかなりうまく機能します。CTE、サブクエリ、JOIN、非平坦クエリなどの複雑さがクエリ性能に悪影響を及ぼす可能性があります。 | -| 小規模なクエリ | 多くの行を処理しないクエリを実行する場合、複数のレプリカで実行すると、レプリカ間のコーディネーションのネットワーク時間がクエリ実行に追加のサイクルをもたらす可能性があるため、パフォーマンスが向上しない場合があります。これらの問題を制限するために、設定を使用することができます:[`parallel_replicas_min_number_of_rows_per_replica`](/operations/settings/settings#parallel_replicas_min_number_of_rows_per_replica)。 | -| FINALで並列レプリカは無効 | | -| 高いカーディナリティデータと複雑な集計 | 多くのデータを送信する必要がある高いカーディナリティの集計が、クエリを著しく遅くする可能性があります。 | -| 新しいアナライザーとの互換性 | 新しいアナライザーは、特定のシナリオでクエリ実行を大幅に遅くしたり、早くしたりする可能性があります。 | -## 並列レプリカに関連する設定 {#settings-related-to-parallel-replicas} - -| 設定 | 説明 | -|----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `enable_parallel_replicas` | `0`: 無効
`1`: 有効
`2`: 並列レプリカの使用を強制します。使用されない場合は例外を投げます。 | -| `cluster_for_parallel_replicas` | 並列レプリケーションに使用するクラスタ名。ClickHouse Cloudを使用している場合は、`default`を使用します。 | -| `max_parallel_replicas` | 複数のレプリカでクエリ実行に使用する最大レプリカ数。クラスター内のレプリカ数より少ない数が指定されている場合、ノードはランダムに選択されます。この値は、水平スケーリングを考慮してオーバーコミットされることもあります。 | -| `parallel_replicas_min_number_of_rows_per_replica` | 処理する必要がある行数に基づいて使用されるレプリカ数を制限します。使用されるレプリカの数は、次のように定義されます:
`推定読み取り行数` / `最小行数(レプリカあたり)`。 | -| `allow_experimental_analyzer` | `0`: 古いアナライザーを使用
`1`: 新しいアナライザーを使用します。

並列レプリカの動作は使用するアナライザーによって変わる可能性があります。 | -## 並列レプリカの問題調査 {#investigating-issues-with-parallel-replicas} - -各クエリに使用されている設定を確認するには、[`system.query_log`](/operations/system-tables/query_log) テーブルを使用できます。また、[`system.events`](/operations/system-tables/events) テーブルを見ることで、サーバー上で発生したすべてのイベントを確認できます。さらに、[`clusterAllReplicas`](/sql-reference/table-functions/cluster) テーブル関数を使用して、すべてのレプリカ上のテーブルを確認できます(クラウドユーザーの場合は、`default`を使用します)。 - -```sql title="クエリ" -SELECT - hostname(), - * -FROM clusterAllReplicas('default', system.events) -WHERE event ILIKE '%ParallelReplicas%' - -
-レスポンス -```response title="レスポンス" -┌─hostname()───────────────────────┬─event──────────────────────────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────┐ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasHandleRequestMicroseconds │ 438 │ レプリカからのマークのリクエスト処理にかかった時間 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasHandleAnnouncementMicroseconds │ 558 │ レプリカアナウンスメントの処理にかかった時間 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasReadUnassignedMarks │ 240 │ すべてのレプリカでスケジュールされた未割り当てマークの合計 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasReadAssignedForStealingMarks │ 4 │ 一貫したハッシュによってスチール用にスケジュールされたマークが割り当てられた合計 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasStealingByHashMicroseconds │ 5 │ ハッシュによってスチール用のセグメント収集にかかった時間 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasProcessingPartsMicroseconds │ 5 │ データパーツ処理にかかった時間 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasStealingLeftoversMicroseconds │ 3 │ 孤立したセグメントの収集にかかった時間 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasUsedCount │ 2 │ タスクベースの並列レプリカでクエリを実行するために使用されたレプリカの数 │ -│ c-crimson-vd-86-server-rdhnsx3-0 │ ParallelReplicasAvailableCount │ 6 │ タスクベースの並列レプリカでクエリを実行するために使用可能なレプリカの数 │ -└──────────────────────────────────┴────────────────────────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘ -┌─hostname()───────────────────────┬─event──────────────────────────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────┐ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasHandleRequestMicroseconds │ 698 │ レプリカからのマークのリクエスト処理にかかった時間 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasHandleAnnouncementMicroseconds │ 644 │ レプリカアナウンスメントの処理にかかった時間 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasReadUnassignedMarks │ 190 │ すべてのレプリカでスケジュールされた未割り当てマークの合計 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasReadAssignedForStealingMarks │ 54 │ 一貫したハッシュによってスチール用にスケジュールされたマークが割り当てられた合計 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasStealingByHashMicroseconds │ 8 │ ハッシュによってスチール用のセグメント収集にかかった時間 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasProcessingPartsMicroseconds │ 4 │ データパーツ処理にかかった時間 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasStealingLeftoversMicroseconds │ 2 │ 孤立したセグメントの収集にかかった時間 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasUsedCount │ 2 │ タスクベースの並列レプリカでクエリを実行するために使用されたレプリカの数 │ -│ c-crimson-vd-86-server-e9kp5f0-0 │ ParallelReplicasAvailableCount │ 6 │ タスクベースの並列レプリカでクエリを実行するために使用可能なレプリカの数 │ -└──────────────────────────────────┴────────────────────────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘ -┌─hostname()───────────────────────┬─event──────────────────────────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────┐ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasHandleRequestMicroseconds │ 620 │ レプリカからのマークのリクエスト処理にかかった時間 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasHandleAnnouncementMicroseconds │ 656 │ レプリカアナウンスメントの処理にかかった時間 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasReadUnassignedMarks │ 1 │ すべてのレプリカでスケジュールされた未割り当てマークの合計 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasReadAssignedForStealingMarks │ 1 │ 一貫したハッシュによってスチール用にスケジュールされたマークが割り当てられた合計 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasStealingByHashMicroseconds │ 4 │ ハッシュによってスチール用のセグメント収集にかかった時間 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasProcessingPartsMicroseconds │ 3 │ データパーツ処理にかかった時間 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasStealingLeftoversMicroseconds │ 1 │ 孤立したセグメントの収集にかかった時間 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasUsedCount │ 2 │ タスクベースの並列レプリカでクエリを実行するために使用されたレプリカの数 │ -│ c-crimson-vd-86-server-ybtm18n-0 │ ParallelReplicasAvailableCount │ 12 │ タスクベースの並列レプリカでクエリを実行するために使用可能なレプリカの数 │ -└──────────────────────────────────┴────────────────────────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘ -┌─hostname()───────────────────────┬─event──────────────────────────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────┐ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasHandleRequestMicroseconds │ 696 │ レプリカからのマークのリクエスト処理にかかった時間 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasHandleAnnouncementMicroseconds │ 717 │ レプリカアナウンスメントの処理にかかった時間 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasReadUnassignedMarks │ 2 │ すべてのレプリカでスケジュールされた未割り当てマークの合計 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasReadAssignedForStealingMarks │ 2 │ 一貫したハッシュによってスチール用にスケジュールされたマークが割り当てられた合計 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasStealingByHashMicroseconds │ 10 │ ハッシュによってスチール用のセグメント収集にかかった時間 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasProcessingPartsMicroseconds │ 6 │ データパーツ処理にかかった時間 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasStealingLeftoversMicroseconds │ 2 │ 孤立したセグメントの収集にかかった時間 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasUsedCount │ 2 │ タスクベースの並列レプリカでクエリを実行するために使用されたレプリカの数 │ -│ c-crimson-vd-86-server-16j1ncj-0 │ ParallelReplicasAvailableCount │ 12 │ タスクベースの並列レプリカでクエリを実行するために使用可能なレプリカの数 │ -└──────────────────────────────────┴────────────────────────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -
- -[`system.text_log`](/operations/system-tables/text_log) テーブルには、並列レプリカを使用したクエリの実行に関する情報も含まれています: - -```sql title="クエリ" -SELECT message -FROM clusterAllReplicas('default', system.text_log) -WHERE query_id = 'ad40c712-d25d-45c4-b1a1-a28ba8d4019c' -ORDER BY event_time_microseconds ASC - -
-レスポンス -```response title="レスポンス" -┌─message────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ (from 54.218.178.249:59198) SELECT * FROM session_events WHERE type='type2' LIMIT 10 SETTINGS allow_experimental_parallel_reading_from_replicas=2; (stage: Complete) │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') SETTINGS allow_experimental_parallel_reading_from_replicas = 2 to stage Complete │ -│ アクセスが許可されました: SELECT(clientId, sessionId, pageId, timestamp, type) ON default.session_events │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') to stage WithMergeableState only analyze │ -│ アクセスが許可されました: SELECT(clientId, sessionId, pageId, timestamp, type) ON default.session_events │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') from stage FetchColumns to stage WithMergeableState only analyze │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') SETTINGS allow_experimental_parallel_reading_from_replicas = 2 to stage WithMergeableState only analyze │ -│ アクセスが許可されました: SELECT(clientId, sessionId, pageId, timestamp, type) ON default.session_events │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') SETTINGS allow_experimental_parallel_reading_from_replicas = 2 from stage FetchColumns to stage WithMergeableState only analyze │ -│ クエリ SELECT __table1.clientId AS clientId, __table1.sessionId AS sessionId, __table1.pageId AS pageId, __table1.timestamp AS timestamp, __table1.type AS type FROM default.session_events AS __table1 WHERE __table1.type = 'type2' LIMIT _CAST(10, 'UInt64') SETTINGS allow_experimental_parallel_reading_from_replicas = 2 from stage WithMergeableState to stage Complete │ -│ リクエストしたレプリカの数 (100) は、クラスター内で利用可能な実際の数 (6) よりも大きいです。クエリの実行には後者の数を使用します。 │ -│ 初期リクエストはレプリカ 4 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 4 から受信 │ -│ 読み取り状態が完全に初期化されています: part all_0_2_1 with ranges [(0, 182)] in replicas [4]; part all_3_3_0 with ranges [(0, 62)] in replicas [4] │ -│ 初期リクエストを送信しました: 1 レプリカ数: 6 │ -│ 初期リクエストはレプリカ 2 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 2 から受信 │ -│ 初期リクエストを送信しました: 2 レプリカ数: 6 │ -│ レプリカ 4 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 4 に 1 パーツ: [part all_0_2_1 with ranges [(128, 182)]] に応答を返します。終了: false; mine_marks=0, stolen_by_hash=54, stolen_rest=0 │ -│ 初期リクエストはレプリカ 1 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 1 から受信 │ -│ 初期リクエストを送信しました: 3 レプリカ数: 6 │ -│ レプリカ 4 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 4 に 2 パーツ: [part all_0_2_1 with ranges [(0, 128)], part all_3_3_0 with ranges [(0, 62)]] に応答を返します。終了: false; mine_marks=0, stolen_by_hash=0, stolen_rest=190 │ -│ 初期リクエストはレプリカ 0 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 0 から受信 │ -│ 初期リクエストを送信しました: 4 レプリカ数: 6 │ -│ 初期リクエストはレプリカ 5 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 5 から受信 │ -│ 初期リクエストを送信しました: 5 レプリカ数: 6 │ -│ レプリカ 2 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 2 に 0 パーツ: [] に応答を返します。終了: true; mine_marks=0, stolen_by_hash=0, stolen_rest=0 │ -│ 初期リクエストはレプリカ 3 から: 2 パーツ: [part all_0_2_1 with ranges [(0, 182)], part all_3_3_0 with ranges [(0, 62)]]---------- -レプリカ 3 から受信 │ -│ 初期リクエストを送信しました: 6 レプリカ数: 6 │ -│ 読むべき総行数: 2000000 │ -│ レプリカ 5 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 5 に 0 パーツ: [] に応答を返します。終了: true; mine_marks=0, stolen_by_hash=0, stolen_rest=0 │ -│ レプリカ 0 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 0 に 0 パーツ: [] に応答を返します。終了: true; mine_marks=0, stolen_by_hash=0, stolen_rest=0 │ -│ レプリカ 1 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 1 に 0 パーツ: [] に応答を返します。終了: true; mine_marks=0, stolen_by_hash=0, stolen_rest=0 │ -│ レプリカ 3 からのリクエストを処理中、最小マークサイズは240です │ -│ レプリカ 3 に 0 パーツ: [] に応答を返します。終了: true; mine_marks=0, stolen_by_hash=0, stolen_rest=0 │ -│ (c-crimson-vd-86-server-rdhnsx3-0.c-crimson-vd-86-server-headless.ns-crimson-vd-86.svc.cluster.local:9000) 読み取るデータが十分であるため、クエリをキャンセルします。 │ -│ 81920 行を読み取り、5.16 MiB を 0.013166 秒で読み取り、6222087.194288318 行/sec., 391.63 MiB/sec. │ -│ 調整完了: 統計: レプリカ 0 - {requests: 2 marks: 0 assigned_to_me: 0 stolen_by_hash: 0 stolen_unassigned: 0}; レプリカ 1 - {requests: 2 marks: 0 assigned_to_me: 0 stolen_by_hash: 0 stolen_unassigned: 0}; レプリカ 2 - {requests: 2 marks: 0 assigned_to_me: 0 stolen_by_hash: 0 stolen_unassigned: 0}; レプリカ 3 - {requests: 2 marks: 0 assigned_to_me: 0 stolen_by_hash: 0 stolen_unassigned: 0}; レプリカ 4 - {requests: 3 marks: 244 assigned_to_me: 0 stolen_by_hash: 54 stolen_unassigned: 190}; レプリカ 5 - {requests: 2 marks: 0 assigned_to_me: 0 stolen_by_hash: 0 stolen_unassigned: 0} │ -│ クエリのピークメモリ使用量: 1.81 MiB。 │ -│ 0.024095586 秒で処理されました。 │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -
- -最後に、`EXPLAIN PIPELINE` を使用することもできます。これにより、ClickHouse がクエリをどのように実行し、実行にどのリソースが使用されるかが強調表示されます。以下のクエリを例に見てみましょう: - -```sql -SELECT count(), uniq(pageId) , min(timestamp), max(timestamp) -FROM session_events -WHERE type='type3' -GROUP BY toYear(timestamp) LIMIT 10 - -並列レプリカなしでのクエリパイプラインを見てみましょう: - -```sql title="EXPLAIN PIPELINE (並列レプリカなし)" -EXPLAIN PIPELINE graph = 1, compact = 0 -SELECT count(), uniq(pageId) , min(timestamp), max(timestamp) -FROM session_events -WHERE type='type3' -GROUP BY toYear(timestamp) -LIMIT 10 -SETTINGS allow_experimental_parallel_reading_from_replicas=0 -FORMAT TSV; - -EXPLAIN without parallel_replica - -並列レプリカありの場合: - -```sql title="EXPLAIN PIPELINE (並列レプリカあり)" -EXPLAIN PIPELINE graph = 1, compact = 0 -SELECT count(), uniq(pageId) , min(timestamp), max(timestamp) -FROM session_events -WHERE type='type3' -GROUP BY toYear(timestamp) -LIMIT 10 -SETTINGS allow_experimental_parallel_reading_from_replicas=2 -FORMAT TSV; - -EXPLAIN with parallel_replica diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx.hash deleted file mode 100644 index 23c726836f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/parallel-replicas.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -6364ec63eca1d1bd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md deleted file mode 100644 index da0308d3fa9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md +++ /dev/null @@ -1,544 +0,0 @@ ---- -slug: '/architecture/replication' -sidebar_label: '障害耐性のためのレプリケーション' -sidebar_position: 10 -title: '障害耐性のためのレプリケーション' -description: '5台のサーバーが構成された例のアーキテクチャについてのページ。2台はデータのコピーをホストするために使用され、残りのサーバーはデータのレプリケーションを調整するために使用されます。' ---- - -import Image from '@theme/IdealImage'; -import ReplicationShardingTerminology from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md'; -import ConfigFileNote from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_config-files.md'; -import KeeperConfigFileNote from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_keeper-config-files.md'; -import ReplicationArchitecture from '@site/static/images/deployment-guides/architecture_1s_2r_3_nodes.png'; - - -## 説明 {#description} -このアーキテクチャには、5台のサーバーが構成されています。2台はデータのコピーをホストするために使用され、他の3台はデータのレプリケーションをコーディネートするために使用されます。この例では、ReplicatedMergeTree テーブルエンジンを使用して、データノード間でレプリケートされるデータベースとテーブルを作成します。 - -## レベル: 基本 {#level-basic} - - - -## 環境 {#environment} -### アーキテクチャ図 {#architecture-diagram} - -ReplicatedMergeTreeを使用した1シャードと2レプリカのアーキテクチャ図 - -|ノード|説明| -|----|-----------| -|clickhouse-01|データ| -|clickhouse-02|データ| -|clickhouse-keeper-01|分散コーディネーション| -|clickhouse-keeper-02|分散コーディネーション| -|clickhouse-keeper-03|分散コーディネーション| - -:::note -本番環境では、ClickHouse Keeper用の*専用*ホストの使用を強く推奨します。テスト環境では、ClickHouse ServerとClickHouse Keeperを同一のサーバー上で実行することが許容されます。他の基本的な例、[スケーリングアウト](/deployment-guides/horizontal-scaling.md)でもこの方法が使用されています。この例では、KeeperをClickHouse Serverから分離する推奨メソッドを示しています。Keeperサーバーはより小型で、ClickHouse Serversが非常に大きくなるまで、各Keeperサーバーに4GBのRAMが一般的に十分です。 -::: - -## インストール {#install} - -`clickhouse-01`および`clickhouse-02`の2台のサーバーにClickHouse Serverとクライアントをインストールします。手順については、[アーカイブタイプに関する手順](/getting-started/install/install.mdx)を参照してください(.deb、.rpm、.tar.gzなど)。 - -`clickhouse-keeper-01`、`clickhouse-keeper-02`、`clickhouse-keeper-03`の3台のサーバーにClickHouse Keeperをインストールします。手順については、[アーカイブタイプに関する手順](/getting-started/install/install.mdx)を参照してください(.deb、.rpm、.tar.gzなど)。 - -## 設定ファイルの編集 {#editing-configuration-files} - - - -## clickhouse-01の設定 {#clickhouse-01-configuration} - -clickhouse-01には5つの設定ファイルがあります。これらのファイルを1つのファイルにまとめることもできますが、ドキュメントの明確さを保つために、別々に見る方が簡単かもしれません。設定ファイルを読み進めると、clickhouse-01とclickhouse-02の間でほとんどの設定が同じであることがわかります。違いは強調表示されます。 - -### ネットワークとロギングの設定 {#network-and-logging-configuration} - -これらの値は、お好みに応じてカスタマイズ可能です。この例の設定では、次のようになります: -- サイズ1000Mで3回ロールオーバーするデバッグログ -- `clickhouse-client`で接続したときに表示される名前は`cluster_1S_2R node 1`です。 -- ClickHouseは、ポート8123および9000でIPV4ネットワーク上でリッスンします。 - -```xml title="/etc/clickhouse-server/config.d/network-and-logging.xml on clickhouse-01" - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 3 - - cluster_1S_2R node 1 - 0.0.0.0 - 8123 - 9000 - - -### マクロ設定 {#macros-configuration} - -マクロ`shard`および`replica`は、分散DDLの複雑さを軽減します。設定された値はDDLクエリに自動的に置き換えられ、DDLを簡素化します。この設定のマクロは、各ノードのシャードとレプリカ番号を指定します。 -この1シャード2レプリカの例では、レプリカマクロはclickhouse-01で`replica_1`、clickhouse-02で`replica_2`になります。シャードマクロは両方のclickhouse-01およびclickhouse-02で`1`です(シャードは1つしかありません)。 - -```xml title="/etc/clickhouse-server/config.d/macros.xml on clickhouse-01" - - - 01 - - 01 - cluster_1S_2R - - - -### レプリケーションとシャーディングの設定 {#replication-and-sharding-configuration} - -最初から: -- XML内の`remote_servers`セクションは、環境内の各クラスターを指定します。属性`replace=true`は、デフォルトのClickHouse設定内のサンプル`remote_servers`を、このファイルで指定された`remote_server`構成に置き換えます。この属性なしでは、このファイルのリモートサーバーはデフォルトのサンプルリストに追加されます。 -- この例には、`cluster_1S_2R`という名前のクラスターがあります。 -- クラスター`cluster_1S_2R`には、値`mysecretphrase`のための秘密が作成されます。この秘密は、環境内のすべてのリモートサーバー間で共有され、正しいサーバーが一緒に参加していることを確認します。 -- クラスター`cluster_1S_2R`には1つのシャードと2つのレプリカがあります。このドキュメントの最初にあるアーキテクチャ図を見て、以下のXMLでの`shard`定義と比較してみてください。シャード定義には2つのレプリカが含まれています。各レプリカのホストとポートが指定されています。1つのレプリカは`clickhouse-01`に保存され、もう1つのレプリカは`clickhouse-02`に保存されます。 -- シャードの内部レプリケーションはtrueに設定されています。各シャードは、設定ファイルに`internal_replication`パラメータを定義できます。このパラメータがtrueに設定されている場合、書き込み操作は最初の正常なレプリカを選択し、データを書き込みます。 - -```xml title="/etc/clickhouse-server/config.d/remote-servers.xml on clickhouse-01" - - - - mysecretphrase - - true - - clickhouse-01 - 9000 - - - clickhouse-02 - 9000 - - - - - - -### Keeperの使用設定 {#configuring-the-use-of-keeper} - -この設定ファイル`use-keeper.xml`は、ClickHouse Serverがレプリケーションと分散DDLのコーディネーションのためにClickHouse Keeperを使用するように設定されています。このファイルは、ClickHouse Serverがノードclickhouse-keeper-01 - 03のポート9181でKeeperを使用するべきであることを指定しており、ファイルは`clickhouse-01`および`clickhouse-02`で同じです。 - -```xml title="/etc/clickhouse-server/config.d/use-keeper.xml on clickhouse-01" - - - - - clickhouse-keeper-01 - 9181 - - - clickhouse-keeper-02 - 9181 - - - clickhouse-keeper-03 - 9181 - - - - -## clickhouse-02の設定 {#clickhouse-02-configuration} - -設定はclickhouse-01とclickhouse-02で非常に似ているため、ここでは違いのみ指摘します。 - -### ネットワークとロギングの設定 {#network-and-logging-configuration-1} - -このファイルは、`display_name`の例外を除いて、clickhouse-01とclickhouse-02の両方で同じです。 - -```xml title="/etc/clickhouse-server/config.d/network-and-logging.xml on clickhouse-02" - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 3 - - - cluster_1S_2R node 2 - 0.0.0.0 - 8123 - 9000 - - -### マクロ設定 {#macros-configuration-1} - -マクロ設定は、clickhouse-01とclickhouse-02で異なります。このノードでは`replica`が`02`に設定されています。 - -```xml title="/etc/clickhouse-server/config.d/macros.xml on clickhouse-02" - - - 01 - - 02 - cluster_1S_2R - - - -### レプリケーションとシャーディングの設定 {#replication-and-sharding-configuration-1} - -このファイルは、clickhouse-01とclickhouse-02の両方で同じです。 - -```xml title="/etc/clickhouse-server/config.d/remote-servers.xml on clickhouse-02" - - - - mysecretphrase - - true - - clickhouse-01 - 9000 - - - clickhouse-02 - 9000 - - - - - - -### Keeperの使用設定 {#configuring-the-use-of-keeper-1} - -このファイルは、clickhouse-01とclickhouse-02で同じです。 - -```xml title="/etc/clickhouse-server/config.d/use-keeper.xml on clickhouse-02" - - - - - clickhouse-keeper-01 - 9181 - - - clickhouse-keeper-02 - 9181 - - - clickhouse-keeper-03 - 9181 - - - - -## clickhouse-keeper-01の設定 {#clickhouse-keeper-01-configuration} - - - -ClickHouse Keeperは、データのレプリケーションと分散DDLクエリの実行のためのコーディネーションシステムを提供します。ClickHouse KeeperはApache ZooKeeperと互換性があります。この設定は、ClickHouse Keeperをポート9181で有効にします。強調表示された行は、このKeeperインスタンスの`server_id`が1であることを指定しています。この`enable-keeper.xml`ファイルの唯一の違いは、3台のサーバー間で`server_id`の設定です。`clickhouse-keeper-02`は`server_id`が`2`に設定され、`clickhouse-keeper-03`は`server_id`が`3`に設定されます。raft構成セクションは3台のサーバーで同じであり、以下に強調表示されています。 - -:::note -何らかの理由でKeeperノードが置き換えられるか再構築される場合は、既存の`server_id`を再利用しないでください。たとえば、`server_id`が`2`のKeeperノードを再構築する場合は、`4`またはそれ以上のserver_idを付与してください。 -::: - -```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-01" - - - trace - /var/log/clickhouse-keeper/clickhouse-keeper.log - /var/log/clickhouse-keeper/clickhouse-keeper.err.log - 1000M - 3 - - 0.0.0.0 - - 9181 - - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - 10000 - 30000 - trace - - - - - 1 - clickhouse-keeper-01 - 9234 - - - - 2 - clickhouse-keeper-02 - 9234 - - - 3 - clickhouse-keeper-03 - 9234 - - - - - -## clickhouse-keeper-02の設定 {#clickhouse-keeper-02-configuration} - -`clickhouse-keeper-01`と`clickhouse-keeper-02`の間には一行の違いしかありません。このノードでは`server_id`が`2`に設定されています。 - -```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-02" - - - trace - /var/log/clickhouse-keeper/clickhouse-keeper.log - /var/log/clickhouse-keeper/clickhouse-keeper.err.log - 1000M - 3 - - 0.0.0.0 - - 9181 - - 2 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - 10000 - 30000 - trace - - - - 1 - clickhouse-keeper-01 - 9234 - - - - 2 - clickhouse-keeper-02 - 9234 - - - - 3 - clickhouse-keeper-03 - 9234 - - - - - -## clickhouse-keeper-03の設定 {#clickhouse-keeper-03-configuration} - -`clickhouse-keeper-01`と`clickhouse-keeper-03`の間には一行の違いしかありません。このノードでは`server_id`が`3`に設定されています。 - -```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-03" - - - trace - /var/log/clickhouse-keeper/clickhouse-keeper.log - /var/log/clickhouse-keeper/clickhouse-keeper.err.log - 1000M - 3 - - 0.0.0.0 - - 9181 - - 3 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - 10000 - 30000 - trace - - - - 1 - clickhouse-keeper-01 - 9234 - - - 2 - clickhouse-keeper-02 - 9234 - - - - 3 - clickhouse-keeper-03 - 9234 - - - - - - -## テスト {#testing} - -ReplicatedMergeTreeとClickHouse Keeperを体験するために、以下のコマンドを実行して次のようにします: -- 上記で構成されたクラスターにデータベースを作成します -- ReplicatedMergeTreeテーブルエンジンを使用してデータベースにテーブルを作成します -- 1つのノードにデータを挿入し、別のノードで照会します -- 1つのClickHouseサーバーノードを停止します -- 動作中のノードにさらにデータを挿入します -- 停止したノードを再起動します -- 再起動したノードでデータが利用可能であることを確認します - -### ClickHouse Keeperが実行中であることを確認する {#verify-that-clickhouse-keeper-is-running} - -`mntr`コマンドは、ClickHouse Keeperが実行中であることを確認し、3つのKeeperノードの関係に関する状態情報を取得するために使用されます。この例で使用される設定では、3つのノードが協力して作業しています。ノードはリーダーを選出し、残りのノードはフォロワーになります。`mntr`コマンドは、パフォーマンスに関連する情報や、特定のノードがフォロワーかリーダーであるかどうかを提供します。 - -:::tip -`mntr`コマンドをKeeperに送信するためには、`netcat`をインストールする必要があるかもしれません。ダウンロード情報は[nmap.org](https://nmap.org/ncat/)のページを参照してください。 -::: - -```bash title="clickhouse-keeper-01、clickhouse-keeper-02、およびclickhouse-keeper-03のシェルから実行" -echo mntr | nc localhost 9181 - -```response title="フォロワーからの応答" -zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726 -zk_avg_latency 0 -zk_max_latency 0 -zk_min_latency 0 -zk_packets_received 0 -zk_packets_sent 0 -zk_num_alive_connections 0 -zk_outstanding_requests 0 - -# highlight-next-line -zk_server_state follower -zk_znode_count 6 -zk_watch_count 0 -zk_ephemerals_count 0 -zk_approximate_data_size 1271 -zk_key_arena_size 4096 -zk_latest_snapshot_size 0 -zk_open_file_descriptor_count 46 -zk_max_file_descriptor_count 18446744073709551615 - -```response title="リーダーからの応答" -zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726 -zk_avg_latency 0 -zk_max_latency 0 -zk_min_latency 0 -zk_packets_received 0 -zk_packets_sent 0 -zk_num_alive_connections 0 -zk_outstanding_requests 0 - -# highlight-next-line -zk_server_state leader -zk_znode_count 6 -zk_watch_count 0 -zk_ephemerals_count 0 -zk_approximate_data_size 1271 -zk_key_arena_size 4096 -zk_latest_snapshot_size 0 -zk_open_file_descriptor_count 48 -zk_max_file_descriptor_count 18446744073709551615 - -# highlight-start -zk_followers 2 -zk_synced_followers 2 - -# highlight-end - -### ClickHouseクラスターの機能を確認する {#verify-clickhouse-cluster-functionality} - -1つのシェルで`clickhouse client`を使用してノード`clickhouse-01`に接続し、別のシェルでノード`clickhouse-02`に接続します。 - -1. 上記で構成したクラスターにデータベースを作成します - -```sql title="ノードclickhouse-01またはclickhouse-02で実行" -CREATE DATABASE db1 ON CLUSTER cluster_1S_2R - -```response -┌─host──────────┬─port─┬─状態─┬─エラー─┬─残りのホスト数─┬─アクティブなホスト数─┐ -│ clickhouse-02 │ 9000 │ 0 │ │ 1 │ 0 │ -│ clickhouse-01 │ 9000 │ 0 │ │ 0 │ 0 │ -└───────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -2. ReplicatedMergeTreeテーブルエンジンを使用してデータベースにテーブルを作成します -```sql title="ノードclickhouse-01またはclickhouse-02で実行" -CREATE TABLE db1.table1 ON CLUSTER cluster_1S_2R -( - `id` UInt64, - `column1` String -) -ENGINE = ReplicatedMergeTree -ORDER BY id - -```response -┌─host──────────┬─port─┬─状態─┬─エラー─┬─残りのホスト数─┬─アクティブなホスト数─┐ -│ clickhouse-02 │ 9000 │ 0 │ │ 1 │ 0 │ -│ clickhouse-01 │ 9000 │ 0 │ │ 0 │ 0 │ -└───────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -3. 1つのノードにデータを挿入し、別のノードで照会します -```sql title="ノードclickhouse-01で実行" -INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc'); - -4. ノード`clickhouse-02`でテーブルを照会します -```sql title="ノードclickhouse-02で実行" -SELECT * -FROM db1.table1 - -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ - -5. 別のノードにデータを挿入し、ノード`clickhouse-01`で照会します -```sql title="ノードclickhouse-02で実行" -INSERT INTO db1.table1 (id, column1) VALUES (2, 'def'); - -```sql title="ノードclickhouse-01で実行" -SELECT * -FROM db1.table1 - -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ - -6. 1つのClickHouseサーバーノードを停止します -ノードを起動するのに使用したのと同様のオペレーティングシステムコマンドを実行して、1つのClickHouseサーバーノードを停止します。`systemctl start`を使用してノードを起動した場合は、`systemctl stop`を使用して停止します。 - -7. 動作中のノードにさらにデータを挿入します -```sql title="動作中のノードで実行" -INSERT INTO db1.table1 (id, column1) VALUES (3, 'ghi'); - -データを選択します: -```sql title="動作中のノードで実行" -SELECT * -FROM db1.table1 - -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 3 │ ghi │ -└────┴─────────┘ - -8. 停止したノードを再起動し、そこからも選択します - -```sql title="再起動したノードで実行" -SELECT * -FROM db1.table1 - -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 3 │ ghi │ -└────┴─────────┘ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md.hash deleted file mode 100644 index 6bc5667c5f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/replicated.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ff877351882fea7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md deleted file mode 100644 index fe9e9f11aec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -slug: '/architecture/introduction' -sidebar_label: '紹介' -title: '紹介' -sidebar_position: 1 -description: 'ClickHouseのサポートおよびサービス機関から提供されたアドバイスに基づいて、展開の例を示すページ' ---- - -import ReplicationShardingTerminology from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_replication-sharding-terminology.md'; - -これらのデプロイメントの例は、ClickHouseサポートおよびサービス組織がClickHouseユーザーに提供したアドバイスに基づいています。これらは動作する例であり、試してみてからニーズに合わせて調整することをお勧めします。こちらに、あなたの要件にぴったり合う例を見つけられるかもしれません。Alternatively, もしデータを2回ではなく3回レプリケートする必要がある場合は、ここで示されたパターンに従うことで、別のレプリカを追加できるはずです。 - - - -## 例 {#examples} - -### 基本 {#basic} - -- [**スケーリングアウト**](/deployment-guides/horizontal-scaling.md) の例は、データを2つのノードにシャードし、分散テーブルを使用する方法を示しています。これにより、2つのClickHouseノード上にデータが存在することになります。2つのClickHouseノードは、分散同期を提供するClickHouse Keeperも実行しています。また、3番目のノードは、ClickHouse Keeperのクオラムを完成させるためにスタンドアロンの状態でClickHouse Keeperを実行しています。 - -- [**フォールトトレランスのためのレプリケーション**](/deployment-guides/replicated.md) の例は、データを2つのノードにレプリケートし、ReplicatedMergeTreeテーブルを使用する方法を示しています。これにより、2つのClickHouseノード上にデータが存在することになります。2つのClickHouseサーバーノードに加えて、レプリケーションを管理するための3つのスタンドアロンのClickHouse Keeperノードがあります。 - -
- -
- -### 中級 {#intermediate} - -- 近日公開予定 - -### 上級 {#advanced} - -- 近日公開予定 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md.hash deleted file mode 100644 index c813bde4b34..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-guides/terminology.md.hash +++ /dev/null @@ -1 +0,0 @@ -32998f267bc5d198 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md deleted file mode 100644 index d5031484301..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -slug: '/deployment-modes' -sidebar_label: 'デプロイメントモード' -description: 'ClickHouseは、すべて同じ強力なデータベースエンジンを使用する4つのデプロイメントオプションを提供しており、特定のニーズに合わせて異なる形でパッケージ化されています。' -title: 'デプロイメントモード' ---- - -import chServer from '@site/static/images/deployment-modes/ch-server.png'; -import chCloud from '@site/static/images/deployment-modes/ch-cloud.png'; -import chLocal from '@site/static/images/deployment-modes/ch-local.png'; -import chDB from '@site/static/images/deployment-modes/chdb.png'; -import Image from '@theme/IdealImage'; - -ClickHouseは、ニーズに応じていくつかの異なる方法で展開できる多目的なデータベースシステムです。その核となるのは、すべての展開オプションが**同じ強力なClickHouseデータベースエンジンを使用する**ことです - 異なるのは、それとの対話方法と実行場所です。 - -大規模な分析を本番環境で実行している場合でも、ローカルデータ分析を行っている場合でも、アプリケーションを構築している場合でも、あなたのユースケースに合った展開オプションがあります。基盤となるエンジンの一貫性により、すべての展開モードで同様の高いパフォーマンスとSQL互換性が得られます。 -このガイドでは、ClickHouseを展開および利用する主要な4つの方法を探ります: - -* 伝統的なクライアント/サーバー展開のためのClickHouse Server -* 完全に管理されたデータベース操作のためのClickHouse Cloud -* コマンドラインデータ処理用のclickhouse-local -* アプリケーションに直接ClickHouseを埋め込むためのchDB - -各展開モードには自身の強みと理想的なユースケースがあり、以下で詳しく探ります。 - - - -## ClickHouse Server {#clickhouse-server} - -ClickHouse Serverは伝統的なクライアント/サーバーアーキテクチャを表し、本番環境に最適です。この展開モードは、高スループットおよび低レイテンシのクエリを伴う完全なOLAPデータベース機能を提供し、ClickHouseの特徴です。 - -ClickHouse Server - -
- -展開の柔軟性に関しては、ClickHouse Serverは、開発やテストのためにローカルマシンにインストールしたり、AWS、GCP、Azureなどの主要なクラウドプロバイダーに展開したり、オンプレミスのハードウェアに設定したりできます。より大規模な運用の場合、分散クラスターとして設定し、負荷の増加に対応し、高可用性を提供できます。 - -この展開モードは、信頼性、パフォーマンス、およびフル機能アクセスが重要な本番環境の標準的な選択肢です。 - -## ClickHouse Cloud {#clickhouse-cloud} - -[ClickHouse Cloud](/cloud/overview)は、独自の展開を運用するためのオーバーヘッドを取り除いた完全管理型のClickHouseバージョンです。ClickHouse Serverのすべてのコア機能を保持しつつ、開発と運用をスムーズにする追加機能で体験を強化します。 - -ClickHouse Cloud - -ClickHouse Cloudの主な利点は、統合されたツールです。[ClickPipes](/cloud/get-started/cloud-quick-start#clickpipes)は、複雑なETLパイプラインを管理せずに、さまざまなソースからデータを簡単に接続し、ストリームするための堅牢なデータ取り込みフレームワークを提供します。このプラットフォームは、アプリケーションを構築する際に大幅に簡素化された専用の[クエリAPI](/cloud/get-started/query-endpoints)も提供します。 - -ClickHouse CloudのSQLコンソールには、クエリをインタラクティブな視覚化に変換できる強力な[ダッシュボード](/cloud/manage/dashboards)機能が含まれています。保存されたクエリから構築されたダッシュボードを作成して共有することができ、クエリパラメータを通じてインタラクティブな要素を追加できます。これらのダッシュボードはグローバルフィルターを使用してダイナミックにすることができ、ユーザーはカスタマイズ可能なビューを通じてデータを探索できます - ただし、視覚化を表示するには、少なくとも保存されたクエリへの読み取りアクセスが必要です。 - -監視と最適化のために、ClickHouse Cloudには組み込みのチャートと[クエリインサイト](/cloud/get-started/query-insights)が含まれています。これらのツールは、クラスターのパフォーマンスに対する深い可視性を提供し、クエリパターン、リソースの使用状況、および最適化機会を理解する手助けをします。このレベルの可観測性は、高性能の分析運用を維持する必要があるチームにとって特に価値があります。 - -サービスの管理された性質により、更新、バックアップ、スケーリング、またはセキュリティパッチについて心配する必要はありません - これらはすべて自動的に処理されます。これにより、データやアプリケーションに集中したい組織にとって理想的な選択肢となります。 - -## clickhouse-local {#clickhouse-local} - -[clickhouse-local](/operations/utilities/clickhouse-local)は、スタンドアロン実行可能ファイルでClickHouseの完全な機能を提供する強力なコマンドラインツールです。基本的にはClickHouse Serverと同じデータベースですが、サーバーインスタンスを実行せずにコマンドラインからClickHouseのすべての機能を直接活用できるようにパッケージ化されています。 - -clickHouse-local - -このツールは、ローカルファイルやクラウドストレージサービスに保存されたデータでのアドホックデータ分析に優れています。ClickHouseのSQL方言を使用して、さまざまな形式(CSV、JSON、Parquetなど)のファイルを直接クエリすることができ、迅速なデータ探索や一時的な分析タスクに最適な選択肢です。 - -clickhouse-localにはClickHouseのすべての機能が含まれているため、データ変換、形式変換、または通常ClickHouse Serverで行う他のデータベース操作に使用できます。主に一時的な操作に使用されますが、必要に応じてClickHouse Serverと同じストレージエンジンを使用してデータを保持することも可能です。 - -リモートテーブル関数とローカルファイルシステムへのアクセスの組み合わせにより、clickhouse-localはClickHouse Serverとローカルマシンのファイル間でデータを結合する必要があるシナリオで特に便利です。これは、サーバーにアップロードしたくない機密性の高いまたは一時的なローカルデータを扱う際に特に価値があります。 - -## chDB {#chdb} - -[chDB](/chdb)は、プロセス内データベースエンジンとして埋め込まれたClickHouseであり、主にPythonが実装されていますが、Go、Rust、NodeJS、Bunでも利用可能です。この展開オプションは、ClickHouseの強力なOLAP機能をアプリケーションのプロセス内に直接取り込み、別のデータベースインストールの必要を排除します。 - -chDB - Embedded ClickHouse - -chDBはアプリケーションのエコシステムとのシームレスな統合を提供します。例えば、Pythonでは、PandasやArrowなどの一般的なデータサイエンスツールと効率的に連携するように最適化されており、Pythonのmemoryviewを介してデータコピーのオーバーヘッドを最小限に抑えています。これにより、ClickHouseのクエリパフォーマンスを既存のワークフロー内で利用したいデータサイエンティストやアナリストにとって特に価値があります。 - -chDBはまた、clickhouse-localで作成されたデータベースに接続できるため、データを扱う方法に柔軟性をもたらします。これにより、ローカル開発、Pythonでのデータ探索、およびより永続的なストレージソリューション間でシームレスに移行でき、データアクセスパターンを変更することなく利用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md.hash deleted file mode 100644 index 15ec870dd5f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/deployment-modes.md.hash +++ /dev/null @@ -1 +0,0 @@ -1ea30233181e2ab5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/development/_category_.yml deleted file mode 100644 index be88c4014c7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 101 -label: 'Building ClickHouse' -collapsible: true -collapsed: true -link: - type: generated-index - title: Building ClickHouse - slug: /development diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/adding_test_queries.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/adding_test_queries.md.hash deleted file mode 100644 index 0604598ca8c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/adding_test_queries.md.hash +++ /dev/null @@ -1 +0,0 @@ -530ca4c3ce9eab59 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md deleted file mode 100644 index 96eab8c2fca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -description: 'ClickHouseのアーキテクチャと列指向設計の包括的な概要' -sidebar_label: 'アーキテクチャの概要' -sidebar_position: 50 -slug: '/development/architecture' -title: 'アーキテクチャの概要' ---- - - - - -# アーキテクチャ概要 - -ClickHouseは真の列指向DBMSです。データはカラム単位で保存され、配列(カラムのベクトルまたはチャンク)を実行する際に処理されます。 -可能な限り、個々の値ではなく、配列に対して操作がディスパッチされます。 -これを「ベクトル化クエリ実行」と呼び、実際のデータ処理コストを低下させるのに役立ちます。 - -このアイデアは新しいものではありません。 -1957年の `APL`(A programming language)やその子孫(`A +`(APL方言)、`J`(1990年)、`K`(1993年)、`Q`(Kx Systemsのプログラミング言語、2003年))にまで遡ります。 -配列プログラミングは科学的データ処理で使用されており、このアイデアはリレーショナルデータベースにおいても新しいものではありません。例えば、`VectorWise`システム(Actian CorporationによるActian Vector Analytic Databaseとしても知られる)がこのアイデアを使用しています。 - -クエリ処理を迅速化するための2つの異なるアプローチがあります:ベクトル化クエリ実行とランタイムコード生成です。後者はすべての間接呼び出しと動的ディスパッチを取り除きます。これらのアプローチのどちらも、必ずしも他のどちらより優れているわけではありません。ランタイムコード生成は多くの操作を融合させ、CPU実行ユニットとパイプラインを完全に活用する場合に優れています。一方で、ベクトル化クエリ実行は一時的なベクトルをキャッシュに書き込み、再読み込みする必要があるため、実用性が低くなる場合があります。もし一時データがL2キャッシュに収まらない場合、これが問題となります。しかし、ベクトル化クエリ実行はCPUのSIMD機能をより活用しやすいです。友人たちが書いた[研究論文](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf)によれば、この2つのアプローチを組み合わせるのが最も効果的であることが示されています。ClickHouseはベクトル化クエリ実行を使用し、初期段階でランタイムコード生成に対する限定的なサポートがあります。 -## カラム {#columns} - -`IColumn`インターフェースは、メモリ内のカラム(実際にはカラムのチャンク)を表現するために使用されます。このインターフェースは、さまざまなリレーショナル演算子の実装に役立つメソッドを提供します。ほとんどの操作は不変であり、元のカラムを修正するのではなく、新たに修正されたカラムを作成します。例えば、`IColumn :: filter`メソッドはフィルタバイトマスクを受け取ります。これは`WHERE`および`HAVING`リレーショナル演算子で使用されます。その他の例としては、`ORDER BY`をサポートするための`IColumn :: permute`メソッドや、`LIMIT`をサポートするための`IColumn :: cut`メソッドがあります。 - -さまざまな`IColumn`の実装(`ColumnUInt8`、`ColumnString`など)は、カラムのメモリレイアウトを担当します。メモリレイアウトは通常、連続した配列です。整数型のカラムの場合、それは一つの連続配列(`std::vector`のようなもの)で表現されます。 `String`や`Array`カラムについては、連続的に配置された配列のすべての要素用の一つと、各配列の先頭へのオフセット用の二つのベクトルがあります。また、`ColumnConst`はメモリに1つの値だけを保存しますが、カラムのように見えます。 -## フィールド {#field} - -それでも、個々の値を操作することも可能です。個々の値を表現するために、`Field`が使用されます。`Field`は単に`UInt64`、`Int64`、`Float64`、`String`、`Array`の識別されたユニオンです。`IColumn`には、n番目の値を`Field`として取得するための`operator []`メソッドと、`Field`をカラムの末尾に追加するための`insert`メソッドがあります。これらのメソッドは、一個の値を表す一時的な`Field`オブジェクトを扱う必要があるため、非常に効率的ではありません。`insertFrom`や`insertRangeFrom`など、より効率的なメソッドもあります。 - -`Field`は、テーブル用の特定のデータ型に関する十分な情報を持っていません。例えば、`UInt8`、`UInt16`、`UInt32`、`UInt64`はすべて`Field`では`UInt64`として表されます。 -## 漏れた抽象 {#leaky-abstractions} - -`IColumn`はデータの一般的なリレーショナル変換のメソッドを持っていますが、すべてのニーズに応えるわけではありません。例えば、`ColumnUInt64`には2つのカラムの合計を計算するメソッドがなく、`ColumnString`には部分文字列検索を実行するメソッドがありません。これらの無数のルーチンは、`IColumn`の外部で実装されています。 - -カラムに対するさまざまな関数は、`IColumn`メソッドを使用して`Field`値を抽出する一般的かつ非効率的な方法で実装するか、特定の`IColumn`実装でデータの内部メモリレイアウトに関する知識を利用して特化した方法で実装することができます。これは、特定の`IColumn`型へのキャスト関数を使用して内部表現を直接扱うことで実現されます。例えば、`ColumnUInt64`には、内部配列への参照を返す`getData`メソッドがあり、その後、別のルーチンがその配列を直接読み取ったり埋めたりします。「漏れた抽象」の存在により、さまざまなルーチンの効率的な特化が可能になります。 -## データ型 {#data_types} - -`IDataType`は、シリアル化とデシリアル化を担当します:バイナリまたはテキスト形式でカラムのチャンクや個々の値を読み書きします。`IDataType`は、テーブル内のデータ型に直接対応します。例えば、`DataTypeUInt32`、`DataTypeDateTime`、`DataTypeString`などがあります。 - -`IDataType`と`IColumn`は互いに緩やかに関連しています。異なるデータ型は、同じ`IColumn`実装でメモリ内に表現されることがあります。例えば、`DataTypeUInt32`と`DataTypeDateTime`はどちらも`ColumnUInt32`または`ColumnConstUInt32`によって表されます。さらに、同じデータ型は異なる`IColumn`実装によって表されることもあります。たとえば、`DataTypeUInt8`は`ColumnUInt8`または`ColumnConstUInt8`で表すことができます。 - -`IDataType`はメタデータのみを保存します。例えば、`DataTypeUInt8`は何も保存せず(仮想ポインタ`vptr`を除く)、`DataTypeFixedString`は`N`(固定サイズの文字列のサイズ)だけを保存します。 - -`IDataType`には、さまざまなデータ形式用のヘルパーメソッドがあります。例としては、値を可能な引用符を付けてシリアル化するメソッド、JSON用に値をシリアル化するメソッド、XML形式の一部として値をシリアル化するメソッドがあります。データ形式との直接的な対応はありません。例えば、異なるデータ形式`Pretty`と`TabSeparated`は、`IDataType`インターフェースの`serializeTextEscaped`ヘルパーメソッドを共用することができます。 -## ブロック {#block} - -`Block`は、メモリ内のテーブルのサブセット(チャンク)を表すコンテナです。これは、`(IColumn, IDataType, カラム名)`の三組からなるセットです。クエリの実行中、データは`Block`によって処理されます。`Block`があれば、データ(`IColumn`オブジェクトに格納されています)、そのタイプについての情報(`IDataType`に格納され、どのようにそのカラムを扱うかを示します)、そしてカラム名があります。これは、元のテーブルのカラム名のままであることもあれば、計算結果の一時的な結果を取得するために割り当てられた人工的な名前であることもあります。 - -ブロック内のカラムに対して関数を計算する場合、結果をブロックに追加するための別のカラムを追加し、関数の引数に対するカラムは触れません。なぜなら、操作は不変だからです。後で不要になったカラムはブロックから削除できますが、修正はできません。これは、共通の部分式を排除するのに便利です。 - -データの処理ごとにブロックが作成されます。同じ計算のために、カラム名とタイプは異なるブロックで同じままで、カラムデータだけが変更されます。ブロックデータとブロックヘッダーを分離する方が良いです。なぜなら、小さなブロックサイズは一時的な文字列のコピーに対して高いオーバーヘッドを引き起こすためです(shared_ptrやカラム名のため)。 -## プロセッサー {#processors} - -詳細は[https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h)を参照してください。 -## フォーマット {#formats} - -データフォーマットはプロセッサーで実装されています。 -## I/O {#io} - -バイト指向の入出力のために、`ReadBuffer`と`WriteBuffer`の抽象クラスがあります。これらはC++の`iostream`の代わりに使用されます。心配しないでください。成熟したC++プロジェクトは、人道的な理由から`iostream`の他の方法を使用しています。 - -`ReadBuffer`と`WriteBuffer`は、単に連続したバッファと、そのバッファ内の位置を指すカーソルです。実装は、バッファ用のメモリを所有する場合と所有しない場合があります。バッファに以下のデータを埋め込むための仮想メソッドがあります(`ReadBuffer`の場合)またはどこかにバッファをフラッシュするためのメソッドがあります(`WriteBuffer`の場合)。仮想メソッドは滅多に呼び出されません。 - -`ReadBuffer`/`WriteBuffer`の実装は、ファイルやファイルディスクリプタ、ネットワークソケットとの作業、圧縮の実装(`CompressedWriteBuffer`は他のWriteBufferで初期化され、データを書き込む前に圧縮を実行します)などの目的で使用されます。名前`ConcatReadBuffer`、`LimitReadBuffer`、および`HashingWriteBuffer`は便宜上決まっています。 - -Read/WriteBuffersはバイトのみを扱います。入力/出力をフォーマットするのに役立つ関数は、`ReadHelpers`および`WriteHelpers`ヘッダファイルにあります。例えば、数字を10進形式で書き込むためのヘルパーがあります。 - -`JSON`形式で結果セットをstdoutに書き込もうとすると、何が起こるかを見てみましょう。 -結果セットは、プル型の`QueryPipeline`から取得する準備が整っています。 -まず、バイトをstdoutに書き込むために`WriteBufferFromFileDescriptor(STDOUT_FILENO)`を作成します。 -次に、クエリパイプラインからの結果を、`JSONRowOutputFormat`に接続します。これは、その`WriteBuffer`で初期化され、行を`JSON`形式でstdoutに書き込むためのものです。 -これは、`complete`メソッドを介して行うことができ、これによりプル型の`QueryPipeline`は完了した`QueryPipeline`になります。 -内部的に、`JSONRowOutputFormat`はさまざまなJSON区切り文字を出力し、`IDataType::serializeTextJSON`メソッドを呼び出します。このとき、`IColumn`への参照と行番号を引数として渡します。結果として、`IDataType::serializeTextJSON`は、`WriteHelpers.h`からのメソッドを呼び出します。例えば、数値型には`writeText`、`DataTypeString`には`writeJSONString`が使用されます。 -## テーブル {#tables} - -`IStorage`インターフェースはテーブルを表します。このインターフェースの異なる実装は異なるテーブルエンジンです。例として`StorageMergeTree`、`StorageMemory`などがあります。これらのクラスのインスタンスは、単なるテーブルです。 - -`IStorage`の主要なメソッドは`read`と`write`であり、`alter`、`rename`、`drop`など他のメソッドもあります。`read`メソッドは、次の引数を受け取ります:テーブルから読み取るカラムのセット、考慮すべき`AST`クエリ、および希望するストリームの数。`Pipe`を返します。 - -ほとんどの場合、readメソッドは指定されたカラムをテーブルから読み取ることだけを担当し、さらなるデータ処理は行いません。 -すべての後続のデータ処理は、`IStorage`の責任外のパイプラインの別の部分によって処理されます。 - -ただし、注目すべき例外があります: - -- `AST`クエリが`read`メソッドに渡され、テーブルエンジンはそれを使用してインデックス使用を導出し、テーブルから少ないデータを読み込むことができます。 -- ときどき、テーブルエンジンはデータを特定の段階まで自ら処理することもあります。たとえば、`StorageDistributed`はリモートサーバーにクエリを送信し、異なるリモートサーバーからのデータをマージできる段階まで処理を依頼し、その前処理されたデータを返すことができます。クエリインタプリタはその後データ処理を完了します。 - -テーブルの`read`メソッドは、複数の`Processors`から成る`Pipe`を返すことができます。これらの`Processors`はテーブルから並行して読み取ることができます。 -次に、これらのプロセッサーを様々な他の変換(式評価やフィルタリングなど)と接続できます。これらは独立して計算できます。 -その後、これらの上に`QueryPipeline`を作成し、`PipelineExecutor`を介して実行します。 - -また、`TableFunction`もあります。これらは、クエリの`FROM`句で使用するための一時的な`IStorage`オブジェクトを返す関数です。 - -テーブルエンジンを実装する方法をすばやく理解するには、`StorageMemory`や`StorageTinyLog`のようなシンプルなものを参照してください。 - -> `read`メソッドの結果として、`IStorage`は`QueryProcessingStage`を返します。これは、ストレージ内で既に計算されたクエリの部分に関する情報です。 -## パーサー {#parsers} - -手書きの再帰的降下パーサーがクエリを解析します。例えば、`ParserSelectQuery`は、クエリのさまざまな部分の基本的なパーサーを再帰的に呼び出します。パーサーは`AST`を生成します。`AST`は`IAST`のインスタンスであるノードで構成されています。 - -> パーサージェネレーターは歴史的な理由で使用されていません。 -## インタプリター {#interpreters} - -インタプリターは、ASTからクエリ実行パイプラインを作成する責任があります。`InterpreterExistsQuery`や`InterpreterDropQuery`のようなシンプルなインタプリターと、より高度な`InterpreterSelectQuery`があります。 - -クエリ実行パイプラインは、チャンク(特定タイプのカラムのセット)を消費および生成できるプロセッサーの組み合わせです。 -プロセッサーはポートを介して通信し、複数の入力ポートと複数の出力ポートを持つことができます。 -詳細な説明は[src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h)にあります。 - -例えば、`SELECT`クエリを解釈した結果は、「プル型」の`QueryPipeline`であり、結果セットを読み取る特別な出力ポートを持っています。 -`INSERT`クエリの結果は、「プッシュ型」の`QueryPipeline`であり、挿入データを書くための入力ポートを持っています。 -`INSERT SELECT`クエリの解釈結果は、入力や出力を持たず、`SELECT`から`INSERT`へ同時にデータをコピーする「完了した」`QueryPipeline`です。 - -`InterpreterSelectQuery`は、クエリの分析や変換のために`ExpressionAnalyzer`および`ExpressionActions`メカニズムを使用します。ここで、ほとんどのルールベースのクエリ最適化が行われます。`ExpressionAnalyzer`は非常に複雑であり、書き直す必要があります。さまざまなクエリの変換や最適化は、モジュラーな変換を許可するために別個のクラスに抽出されるべきです。 - -インタプリターにおける問題に対処するために、新しい`InterpreterSelectQueryAnalyzer`が開発されました。これは、新しい`InterpreterSelectQuery`のバージョンで、`ExpressionAnalyzer`を使用せず、`AST`と`QueryPipeline`との間に`QueryTree`という追加の抽象層を導入します。これは、実際に生産環境で使用できる準備が整っていますが、万が一のために、`enable_analyzer`設定の値を`false`に設定することでオフにできます。 -## 関数 {#functions} - -普通の関数と集約関数があります。集約関数については次のセクションを参照してください。 - -普通の関数は行数を変更せず、各行を独立して処理しているかのように動作します。実際、関数は個々の行ではなく、データの`Block`に対して呼び出され、ベクトル化クエリ実行を実現します。 - -`[blockSize](/sql-reference/functions/other-functions#blockSize)`、`[rowNumberInBlock](/sql-reference/functions/other-functions#rowNumberInBlock)`、および`[runningAccumulate](/sql-reference/functions/other-functions#runningaccumulate)`といった付随的な関数もあり、これらはブロック処理を利用し、行の独立性を破っています。 - -ClickHouseは強い型付けを持っているため、暗黙的な型変換はありません。関数が特定の型の組み合わせをサポートしていない場合、例外をスローします。しかし、関数は多くの異なる型の組み合わせに対して機能します(オーバーロード可能です)。たとえば、`plus`関数(`+`演算子を実装するため) は任意の数値型の組み合わせで機能します:`UInt8` + `Float32`、`UInt16` + `Int8`など。また、一部の可変引数関数は、任意の数の引数を受け取ることができます。たとえば、`concat`関数です。 - -関数を実装することは、サポートされるデータ型やサポートされる`IColumns`を明示的にディスパッチするため、やや不便な場合があります。たとえば、`plus`関数には、数値型の各組み合わせや左および右の引数が定数か非定数かで、C++テンプレートの展開によって生成されたコードがあります。 - -ランタイムコード生成を実装する良い場所です。これにより、テンプレートコードの膨張を回避でき、融合関数(融合乗算-加算など)や、1回のループイテレーションで複数の比較を行うことが可能になります。 - -ベクトル化クエリ実行のため、関数はショートサーキットされません。たとえば、`WHERE f(x) AND g(y)`と書いた場合、`f(x)`がゼロである行であっても、両方の側が計算されます(`f(x)`がゼロの定数式でない限り)。しかし、`f(x)`条件の選択性が高く、`f(x)`の計算が`g(y)`よりもはるかに安価であれば、マルチパス計算を実施する方が良いでしょう。最初に`f(x)`を計算し、結果でカラムをフィルタリングし、次に小さなフィルタリングされたデータチャンクのためにのみ`g(y)`を計算します。 -## 集約関数 {#aggregate-functions} - -集約関数は状態を持つ関数です。これらは渡された値をどこかの状態に累積し、その状態から結果を取得できるようにします。これらは`IAggregateFunction`インターフェースによって管理されます。状態は非常にシンプルな場合もあれば(`AggregateFunctionCount`の状態は単なる1つの`UInt64`値です)、かなり複雑な場合もあります(`AggregateFunctionUniqCombined`の状態は、線形配列、ハッシュテーブル、および`HyperLogLog`確率的データ構造の組み合わせです)。 - -状態は、ハイカードinalityの`GROUP BY`クエリを実行する際に複数の状態を扱うために`Arena`(メモリプール)に割り当てられます。状態は、何らかの注意が必要なコンストラクタとデストラクタを持つことがあります。複雑な集約状態は、追加のメモリを自分自身で割り当てることができます。状態の作成および破棄と、それらの所有権と破棄順序の適切な引き渡しに注目する必要があります。 - -集約状態は、分散クエリ実行中にネットワークを越えて渡すためや、十分なRAMがないディスクに書き込むためにシリアル化およびデシリアル化できます。集約関数のデータ型である`DataTypeAggregateFunction`にエクスポートして、データの増分集計を可能にすることさえできます。 - -> 集約関数の状態に対するシリアル化データ形式は、現在はバージョン管理されていません。集約状態が一時的に保持されている場合は問題ありませんが、我々は増分集計用の`AggregatingMergeTree`テーブルエンジンを持ち、既に生産環境で使用されています。そのため、将来的に集約関数のシリアル化形式を変更する際には後方互換性が求められます。 -## サーバー {#server} - -サーバは、いくつかの異なるインターフェースを実装しています: - -- 外部クライアント用のHTTPインターフェース。 -- ネイティブClickHouseクライアントおよび分散クエリ実行中のサーバー間通信のためのTCPインターフェース。 -- レプリケーションのためのデータ転送インターフェース。 - -内部的には、コルーチンやファイバーのない単純なマルチスレッドサーバーです。サーバーは高いレートの単純なクエリを処理するようには設計されておらず、比較的低いレートの複雑なクエリを処理するように設計されており、それぞれが分析のために大量のデータを処理できます。 - -サーバーはクエリ実行に必要な環境を備えた`Context`クラスを初期化します:使用可能なデータベースのリスト、ユーザーとアクセス権、設定、クラスター、プロセスリスト、クエリログなど。インタプリターはこの環境を使用します。 - -サーバーTCPプロトコルに対して完全な後方および前方の互換性を維持しています:古いクライアントは新しいサーバーと対話でき、新しいクライアントは古いサーバーと対話できます。しかし、我々は永遠にこれを維持したくなく、約1年後には古いバージョンのサポートを削除します。 - -:::note -ほとんどの外部アプリケーションには、HTTPインターフェースを使用することをお勧めします。これはシンプルで使いやすいためです。TCPプロトコルは内部データ構造に密接に結びついています:データのブロックを渡すための内部形式を使用し、圧縮データのためのカスタムフレーミングを使用します。Cライブラリをそのプロトコルのためにリリースしていないのは、ClickHouseのコードベースの大部分にリンクする必要があり、実用的でないからです。 -::: -## 設定 {#configuration} - -ClickHouse ServerはPOCO C++ Librariesに基づき、`Poco::Util::AbstractConfiguration`を使用してその設定を表現します。設定は、`DaemonBase`クラスから派生した`Poco::Util::ServerApplication`クラスによって保持されます。このクラスは`DB::Server`クラスを実装し、clickhouse-serverそのものを実現します。したがって、設定は`ServerApplication::config()`メソッドを使用してアクセスできます。 - -設定は複数のファイル(XMLまたはYAML形式)から読み取られ、`ConfigProcessor`クラスによって単一の`AbstractConfiguration`にマージされます。設定はサーバーの起動時に読み込まれ、設定ファイルのいずれかが更新されたり削除されたり追加されたりした場合に再読み込みされることがあります。`ConfigReloader`クラスは、これらの変更を定期的に監視し、再読み込み手順も担当します。また、`SYSTEM RELOAD CONFIG`クエリも設定を再読み込みさせます。 - -クエリや`Server`以外のサブシステムの設定は、`Context::getConfigRef()`メソッドを使用してアクセスできます。サーバーの再起動なしに設定を再読み込みできるすべてのサブシステムは、`Server::main()`メソッド内で再読み込みコールバックに自身を登録する必要があります。新しい設定にエラーがある場合、ほとんどのサブシステムは新しい設定を無視し、警告メッセージをログに記録し、以前に読み込まれた設定で動作し続けます。`AbstractConfiguration`の性質上、特定のセクションへの参照を渡すことはできないため、通常は`String config_prefix`が代わりに使用されます。 -## スレッドとジョブ {#threads-and-jobs} - -クエリを実行し、副次的な活動を行うために、ClickHouseはスレッドプールの1つからスレッドを割り当て、頻繁なスレッドの作成と破棄を避けます。目的やジョブの構造に応じて、いくつかのスレッドプールがあります: - * クライアントセッション用のサーバープール。 - * 一般的なジョブ、バックグラウンド活動、スタンドアロンスレッドのためのグローバルスレッドプール。 - * 主にIOでブロックされ、CPU集約的でないジョブのためのIOスレッドプール。 - * 定期的なタスクのためのバックグラウンドプール。 - * ステップに分割できる先読み可能なタスクのためのプール。 - -サーバープールは、`Server::main()`メソッド内で定義された`Poco::ThreadPool`クラスのインスタンスです。このプールは最大`max_connection`スレッドを持つことができます。各スレッドは単一のアクティブ接続に専念します。 - -グローバルスレッドプールは`GlobalThreadPool`シングルトンクラスです。これからスレッドを割り当てるために`ThreadFromGlobalPool`が使用されます。このクラスは`std::thread`に似たインターフェースを持ちますが、グローバルプールからスレッドを引き出し、すべての必要な初期化を行います。これは次の設定で構成されています: - * `max_thread_pool_size` - プール内のスレッド数の制限。 - * `max_thread_pool_free_size` - 新しいジョブを待っているアイドルスレッドの制限。 - * `thread_pool_queue_size` - 予約されたジョブ数の制限。 - -グローバルプールはユニバーサルであり、以下で説明するすべてのプールはこれを基に実装されています。これはプールの階層と考えることができます。すべての専門プールは`ThreadPool`クラスを使用してグローバルプールからスレッドを取得します。したがって、すべての専門プールの主な目的は、同時ジョブの数に制限を適用し、ジョブのスケジューリングを行うことです。プール内のスレッド数がジョブ数よりも少ない場合、`ThreadPool`は優先順位付きのキューにジョブを蓄積します。各ジョブには整数の優先順位があり、デフォルトの優先順位はゼロです。優先順位値が高いすべてのジョブは、低い優先順位のジョブが実行される前に開始されます。しかし、すでに実行中のジョブには違いがないため、優先順位はプールが過負荷になっているときのみ重要です。 - -IOスレッドプールは、`IOThreadPool::get()`メソッドを介してアクセス可能な単純な`ThreadPool`として実装されています。これは、グローバルプールと同様に、`max_io_thread_pool_size`、`max_io_thread_pool_free_size`、および`io_thread_pool_queue_size`設定で構成されます。IOスレッドプールの主な目的は、IOジョブによってグローバルプールが枯渇することを避け、クエリがCPUを完全に活用できるようにすることです。S3へのバックアップはかなりの量のIO操作を行い、対話型クエリに影響を与えないようにするため、`max_backups_io_thread_pool_size`、`max_backups_io_thread_pool_free_size`、`backups_io_thread_pool_queue_size`設定で構成された別の`BackupsIOThreadPool`があります。 - -定期的なタスク実行には、`BackgroundSchedulePool`クラスがあります。`BackgroundSchedulePool::TaskHolder`オブジェクトを使用してタスクを登録でき、このプールは同時に二つのジョブを実行しないことを保証します。また、特定の未来の瞬間にタスクの実行を延期したり、一時的にタスクを無効にしたりすることもあります。グローバル`Context`は、このクラスの異なる目的のためにいくつかのインスタンスを提供します。一般的な目的のタスクには`Context::getSchedulePool()`が使用されます。 - -前読み可能なタスクのための専門パラメータプールもあります。`IExecutableTask`タスクは、ジョブの順序付けられたステップのシーケンスに分割できます。短いタスクが長いタスクよりも優先されるようにこれらのタスクをスケジューリングするには、`MergeTreeBackgroundExecutor`が使用されます。その名の通り、これはマージや変更、取得、移動といったバックグラウンドのMergeTree関連操作のために使用されます。プールインスタンスは、`Context::getCommonExecutor()`やその他の類似のメソッドを用いてアクセスできます。 - -ジョブに使用されるプールが何であれ、開始時にそのジョブの`ThreadStatus`インスタンスが作成されます。これは、スレッドごとの情報をカプセル化します:スレッドID、クエリID、パフォーマンスカウンター、リソース消費、その他の便利なデータなど。ジョブは`CurrentThread::get()`コールによって、スレッドローカルポインタを介してこれをアクセスできますので、すべての関数に渡す必要はありません。 - -もしスレッドがクエリ実行に関連している場合、`ThreadStatus`に添付される最も重要なものはクエリコンテキスト`ContextPtr`です。各クエリにはサーバープール内にマスタースレッドがあります。マスタースレッドは、`ThreadStatus::QueryScope query_scope(query_context)`オブジェクトを保持してアタッチします。マスタースレッドはまた、`ThreadGroupStatus`オブジェクトで表されるスレッドグループを作成します。このクエリ実行中に割り当てられたすべての追加スレッドは、`CurrentThread::attachTo(thread_group)`コールによって、それのスレッドグループに接続されます。スレッドグループは、単一のタスクに割り当てられたすべてのスレッドによるメモリ消費を追跡し、プロファイルイベントカウンターを集約するために使用されます(詳細については`MemoryTracker`および`ProfileEvents::Counters`クラスを参照してください)。 -## 同時実行制御 {#concurrency-control} - -並行化できるクエリは、`max_threads`設定を使用して自らを制限します。この設定のデフォルト値は、単一のクエリが最良の方法で全てのCPUコアを利用できるよう選択されます。しかし、もし複数の同時クエリがあり、それぞれがデフォルトの`max_threads`設定値を使用した場合はどうでしょうか?その場合、クエリはCPUリソースを共有します。OSはスレッドを常に切り替えて公平性を確保しますが、これにはある程度のパフォーマンスペナルティが伴います。`ConcurrencyControl`は、これらのペナルティに対処し、多くのスレッドを割り当てるのを避けるのに役立ちます。設定`concurrent_threads_soft_limit_num`は、CPUの圧力がかかる前に同時に割り当てられるスレッド数を制限するために使用されます。 - -CPUの`スロット`という概念が導入されます。スロットは同時実行の単位です:スレッドが実行されるには、事前にスロットを取得し、スレッドが停止するときにそれを解放する必要があります。スロットの数は、サーバ内で全体として限られています。複数の同時クエリがあり、要求の合計がスロットの総数を超える場合、`ConcurrencyControl`は公正にCPUスロットスケジューリングを行う責任を担います。 - -各スロットは、次の状態を持つ独立した状態機械として見なすことができます: - * `free`:スロットは任意のクエリに割り当てることができます。 - * `granted`:スロットは特定のクエリによって`allocated`されていますが、まだいかなるスレッドにも取得されていません。 - * `acquired`:スロットは特定のクエリによって`allocated`され、スレッドによって取得されています。 - -注意すべきことは、`allocated`スロットが2つの異なる状態、`granted`と`acquired`にあることです。前者は、実質的に短いはずの遷移状態です(スロットがクエリに割り当てられてから、スロットがそのクエリの任意のスレッドによってアップスケーリング手続きが行われるまで)。 - -```mermaid -stateDiagram-v2 - direction LR - [*] --> free - free --> allocated: allocate - state allocated { - direction LR - [*] --> granted - granted --> acquired: acquire - acquired --> [*] - } - allocated --> free: release -``` - -`ConcurrencyControl`のAPIは、次の関数から構成されています: -1. クエリのためのリソース割当てを作成します:`auto slots = ConcurrencyControl::instance().allocate(1, max_threads);`これは、最初のスロットが即時に許可されますが、残りのスロットは後で許可されるかもしれませんので、1つ以上のスロットが割り当てられます。つまり、制限はソフトであり、すべてのクエリは少なくとも1つのスレッドを取得します。 -2. 各スレッドのために、割当てからスロットを取得する必要があります:`while (auto slot = slots->tryAcquire()) spawnThread([slot = std::move(slot)] { ... });`。 -3. スロットの総数を更新します:`ConcurrencyControl::setMaxConcurrency(concurrent_threads_soft_limit_num)`。サーバーを再起動せずに実行中に行えます。 - -このAPIにより、クエリは少なくとも1つのスレッドから始め、後で`max_threads`までスケールアップすることができます。 -## Distributed Query Execution {#distributed-query-execution} - -クラスター設定内のサーバーはほとんど独立しています。クラスター内の1つまたはすべてのサーバーに `Distributed` テーブルを作成できます。 `Distributed` テーブルはデータ自体を保存せず、クラスター内の複数のノードにあるすべてのローカルテーブルへの「ビュー」を提供するだけです。 `Distributed` テーブルからSELECTすると、そのクエリは書き換えられ、負荷分散設定に従ってリモートノードを選択し、クエリが送信されます。 `Distributed` テーブルは、リモートサーバーにクエリを処理するよう要求し、異なるサーバーからの中間結果をマージできる段階まで処理を行います。その後、中間結果を受け取り、それらをマージします。分散テーブルは、可能な限り多くの作業をリモートサーバーに分散し、ネットワーク上で多くの中間データを送信しません。 - -IN や JOIN 句にサブクエリがある場合、そしてそれぞれが `Distributed` テーブルを使用する場合、事態はより複雑になります。これらのクエリの実行には異なる戦略があります。 - -分散クエリ実行のためのグローバルクエリプランはありません。各ノードは、ジョブの一部に対するローカルクエリプランを持っています。単純な1パスの分散クエリ実行のみが存在します:リモートノードにクエリを送信し、その後結果をマージします。しかし、これは高カーディナリティの `GROUP BY` や大きな一時データ量を伴う複雑なクエリには適していません。そのような場合、サーバー間でデータを「再シャッフル」する必要があり、追加の調整が必要です。ClickHouseはそのようなクエリ実行をサポートしておらず、改善する必要があります。 - -## Merge Tree {#merge-tree} - -`MergeTree` は、主キーによるインデックスをサポートするストレージエンジンのファミリーです。主キーは任意のカラムまたは式のタプルになることができます。 `MergeTree` テーブル内のデータは「パーツ」に保存されます。各パーツは主キー順にデータを保存するため、データは主キーのタプルによって辞書順に並べられます。すべてのテーブルカラムは、これらのパーツ内の別々の `column.bin` ファイルに保存されます。ファイルは圧縮ブロックで構成され、各ブロックは通常、平均値のサイズに応じて64KBから1MBの未圧縮データを含みます。ブロックは、カラム値が順に配置されているものです。カラム値は各カラムで同じ順序になっているため(主キーが順序を定義)、複数のカラムを反復処理する際には、対応する行に対する値を取得できます。 - -主キー自体は「スパース」です。それはすべての行を指し示すのではなく、特定のデータ範囲のみに対応します。別の `primary.idx` ファイルには、N番目の行の主キーの値があります。ここでNは `index_granularity`(通常、N = 8192)と呼ばれます。また、各カラムには、データファイル内のN番目の行に対するオフセットである「マーク」を持つ `column.mrk` ファイルがあります。各マークは、圧縮ブロックの開始位置へのオフセットと、データの開始位置へのオフセットのペアです。通常、圧縮ブロックはマークに整列され、解凍されたブロックのオフセットはゼロです。 `primary.idx` のデータは常にメモリに常駐しており、 `column.mrk` ファイルのデータはキャッシュされます。 - - `MergeTree` 内のパートから何かを読み取るつもりのときは、 `primary.idx` データを確認し、要求されたデータを含む可能性のある範囲を特定し、その後 `column.mrk` データを確認して、これらの範囲を読み始めるためのオフセットを計算します。スパースなため、余分なデータが読み取られる場合があります。ClickHouseは単純なポイントクエリの高負荷には適していません。各キーに対して `index_granularity` 行が含まれる全範囲を読み取る必要があり、各カラムに対して全圧縮ブロックを解凍する必要があります。インデックスをスパースにしたのは、単一サーバーで兆単位の行を目立ったメモリ消費なしに保持できなければならなかったからです。また、主キーがスパースであるため、ユニークではなく、INSERT時にテーブル内のキーの存在を確認できません。テーブル内に同じキーを持つ行が多数存在する可能性があります。 - - `MergeTree` にデータを `INSERT` すると、そのデータの集まりは主キー順に整列され、新しいパートを形成します。バックグラウンドスレッドは定期的にいくつかのパーツを選択し、単一のソートされたパートにマージして、パーツの数を比較的低く保ちます。これが `MergeTree` と呼ばれる理由です。もちろん、マージは「書き込み増幅」を引き起こします。すべてのパーツは不変です。作成および削除されるだけで、修正はされません。SELECTが実行されると、テーブルのスナップショット(一連のパーツ)を保持します。マージ後、障害発生時に回復を容易にするために、古いパーツも一時的に保持しますので、マージされたパートが壊れていると思われる場合は、それを元のパーツと置き換えることができます。 - - `MergeTree`は LSM ツリーではありません。なぜなら、MEMTABLE や LOG を含まないからです:挿入されたデータはファイルシステムに直接書き込まれます。この振る舞いにより、MergeTree はバッチでのデータ挿入により適しています。したがって、少量の行を頻繁に挿入することは、MergeTree にとって理想的ではありません。たとえば、1秒あたり数行は問題ありませんが、1秒あたり千回行うことは MergeTree にとって最適ではありません。ただし、小さな挿入のための非同期挿入モードがあります。この制限を克服するためにこのようにしました。なぜなら、私たちのアプリケーションで既にバッチでデータを挿入しているからです。 - -バックグラウンドマージ中に追加の作業を行っている MergeTree エンジンがいくつかあります。例として、`CollapsingMergeTree` および `AggregatingMergeTree` があります。これは、更新の特別なサポートとして扱うことができます。これらは実際の更新ではないことを心に留めておいてください。ユーザーは通常、バックグラウンドマージが実行される時間を制御できず、 `MergeTree` テーブル内のデータはほとんど常に単一のパートではなく、複数のパートに保存されます。 - -## Replication {#replication} - -ClickHouse のレプリケーションは、テーブル単位で構成できます。同じサーバー上に一部はレプリケートされたテーブルと一部はレプリケートされていないテーブルを持つことができます。また、1つのテーブルが二重レプリケーションされている一方で、別のテーブルは三重レプリケーションされている場合もあります。 - -レプリケーションは、`ReplicatedMergeTree` ストレージエンジンで実装されています。ストレージエンジンのパラメータとして `ZooKeeper` でのパスが指定されます。同じパスを持つすべてのテーブルは、互いのレプリカになります。これにより、データは同期され、一貫性が保たれます。レプリカは、テーブルを作成または削除することで動的に追加および削除できます。 - -レプリケーションは非同期のマルチマスター方式を使用しています。 `ZooKeeper` とセッションを持つ任意のレプリカにデータを挿入でき、そのデータは他のすべてのレプリカに非同期に複製されます。ClickHouse は UPDATE をサポートしていないため、レプリケーションは競合がありません。デフォルトでは挿入の過半数の承認はありませんので、一つのノードが故障した場合には直前に挿入されたデータが失われる可能性があります。 `insert_quorum` 設定を使って挿入の過半数を有効にできます。 - -レプリケーションのメタデータは ZooKeeper に保存されます。アクションは、パートを取得すること、パーツをマージすること、パーティションを削除することなど、実行するアクションのリストを示すレプリケーションログがあります。各レプリカは、そのキューにレプリケーションログをコピーし、キューからアクションを実行します。たとえば、挿入時には、「パートを取得」のアクションがログに作成され、すべてのレプリカがそのパートをダウンロードします。マージは、バイト単位で同一の結果を得るために、レプリカ間で調整されます。すべての部分は、すべてのレプリカで同じ方法でマージされます。リーダーの1人が最初に新しいマージを開始し、「マージパーツ」アクションをログに書き込みます。複数のレプリカ(またはすべて)が同時にリーダーになることができます。レプリカがリーダーにならないように制限するには、`merge_tree` 設定の `replicated_can_become_leader` を使用します。リーダーはバックグラウンドマージのスケジューリングを担当します。 - -レプリケーションは物理的です:ノード間で転送されるのは圧縮パーツのみで、クエリではありません。ほとんどのケースでは、マージは各レプリカで独立して処理され、ネットワークコストを削減してネットワーク増幅を回避します。大きなマージパーツは、重要なレプリケーション遅延がある場合にのみ、ネットワーク経由で送信されます。 - -さらに、各レプリカは、パーツのセットとそのチェックサムとして自分の状態を ZooKeeper に保存します。ローカルファイルシステムの状態が ZooKeeper の参照状態から外れた場合、レプリカは他のレプリカから不足しているパーツや壊れたパーツをダウンロードして一貫性を回復します。ローカルファイルシステムに予期しないデータや壊れたデータがある場合、ClickHouse はそれを削除せず、別のディレクトリに移動して忘れます。 - -:::note -ClickHouse クラスターは独立したシャードで構成されており、各シャードはレプリカで構成されています。クラスターは **エラスティックではない** ため、新しいシャードを追加した後、データは自動的にシャード間で再バランスされません。その代わり、クラスターの負荷は均一でないように調整されることが想定されています。この実装は、より多くの制御を提供し、比較的小さなクラスター(数十ノード)には適しています。しかし、我々が生産で使用している数百ノードのクラスターでは、このアプローチは重要な欠点となります。クラスター全体に広がるテーブルエンジンを実装し、自動的に分割およびバランスが取れる動的にレプリケートされた領域を持つ必要があります。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md.hash deleted file mode 100644 index 7b047e0d491..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/architecture.md.hash +++ /dev/null @@ -1 +0,0 @@ -72393d0a2cdfc8f0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md deleted file mode 100644 index 30801a51794..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: 'Guide for building ClickHouse from source for the AARCH64 architecture' -sidebar_label: 'Build on Linux for AARCH64' -sidebar_position: 25 -slug: '/development/build-cross-arm' -title: 'How to Build ClickHouse on Linux for AARCH64' ---- - - - - -# AARCH64向けにLinuxでClickHouseをビルドする方法 - -Aarch64マシンでClickHouseをビルドするために特別な手順は必要ありません。 - -x86 Linuxマシン上でAArch64向けにClickHouseをクロスコンパイルするには、`cmake`に次のフラグを渡します: `-DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md.hash deleted file mode 100644 index 8e6547343e2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-arm.md.hash +++ /dev/null @@ -1 +0,0 @@ -ca9e49d837a59d57 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md deleted file mode 100644 index df018b6ddcf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: 'LoongArch64アーキテクチャ向けにソースからClickHouseをビルドするためのガイド' -sidebar_label: 'LoongArch64向けLinuxでのビルド' -sidebar_position: 35 -slug: '/development/build-cross-loongarch' -title: 'LoongArch64向けLinuxでのビルド' ---- - - - - -# LinuxでのLoongArch64用ビルド - -ClickHouseはLoongArch64に対して実験的なサポートを提供しています。 - -## ClickHouseをビルドする {#build-clickhouse} - -ビルドに必要なllvmのバージョンは19.1.0以上である必要があります。 - -```bash -cd ClickHouse -mkdir build-loongarch64 -CC=clang-19 CXX=clang++-19 cmake . -Bbuild-loongarch64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-loongarch64.cmake -ninja -C build-loongarch64 -``` - -生成されたバイナリは、LoongArch64 CPUアーキテクチャを搭載したLinuxでのみ実行されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md.hash deleted file mode 100644 index 500bcb15d0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-loongarch.md.hash +++ /dev/null @@ -1 +0,0 @@ -210904acfc2ffdf4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md deleted file mode 100644 index 0ceebc5856c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -description: 'macOS システムのために Linux からのクロスコンパイルガイド' -sidebar_label: 'Linux で macOS 用にビルドする' -sidebar_position: 20 -slug: '/development/build-cross-osx' -title: 'Linux から macOS 用にビルドする' ---- - - - - -# How to Build ClickHouse on Linux for macOS - -これは、Linuxマシンを持っていて、OS X上で実行される`clickhouse`バイナリをビルドしたい場合に関するものです。 -主な使用ケースは、Linuxマシンで実行される継続的インテグレーションチェックです。 -macOS上で直接ClickHouseをビルドしたい場合は、[ネイティブビルド手順](../development/build-osx.md)に進んでください。 - -macOS用のクロスビルドは、[ビルド手順](../development/build.md)に基づいていますので、まずはそれに従ってください。 - -以下のセクションでは、ClickHouseを`x86_64` macOS用にビルドする手順を説明します。 -ARMアーキテクチャをターゲットにする場合は、手順中のすべての`x86_64`の出現を`aarch64`に置き換えてください。 -例えば、手順全体で`x86_64-apple-darwin`を`aarch64-apple-darwin`に置き換えます。 - -## Install Cross-Compilation Toolset {#install-cross-compilation-toolset} - -`cctools`をインストールするパスを`${CCTOOLS}`として記憶します。 - -```bash -mkdir ~/cctools -export CCTOOLS=$(cd ~/cctools && pwd) -cd ${CCTOOLS} - -git clone https://github.com/tpoechtrager/apple-libtapi.git -cd apple-libtapi -git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 -INSTALLPREFIX=${CCTOOLS} ./build.sh -./install.sh -cd .. - -git clone https://github.com/tpoechtrager/cctools-port.git -cd cctools-port/cctools -git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 -./configure --prefix=$(readlink -f ${CCTOOLS}) --with-libtapi=$(readlink -f ${CCTOOLS}) --target=x86_64-apple-darwin -make install -``` - -次に、作業ツリーにmacOS X SDKをダウンロードする必要があります。 - -```bash -cd ClickHouse/cmake/toolchain/darwin-x86_64 -curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz' | tar xJ --strip-components=1 -``` - -## Build ClickHouse {#build-clickhouse} - -```bash -cd ClickHouse -mkdir build-darwin -cd build-darwin -CC=clang-19 CXX=clang++-19 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake .. -ninja -``` - -生成されたバイナリはMach-O実行可能形式となり、Linux上では実行できません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md.hash deleted file mode 100644 index 2a59109b7f7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-osx.md.hash +++ /dev/null @@ -1 +0,0 @@ -9104e2ea4f4c202b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md deleted file mode 100644 index eeca30f0fad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: 'Guide for building ClickHouse from source for the RISC-V 64 architecture' -sidebar_label: 'Build on Linux for RISC-V 64' -sidebar_position: 30 -slug: '/development/build-cross-riscv' -title: 'How to Build ClickHouse on Linux for RISC-V 64' ---- - - - - -# RISC-V 64用のLinuxでのClickHouseのビルド方法 - -ClickHouseはRISC-Vの実験的サポートを提供しています。すべての機能を有効にできるわけではありません。 - -## ClickHouseのビルド {#build-clickhouse} - -非RISC-VマシンでRISC-V向けにクロスコンパイルするには: - -```bash -cd ClickHouse -mkdir build-riscv64 -CC=clang-19 CXX=clang++-19 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF -ninja -C build-riscv64 -``` - -生成されたバイナリは、RISC-V 64 CPUアーキテクチャを持つLinuxでのみ実行されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md.hash deleted file mode 100644 index 59b08362f76..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-riscv.md.hash +++ /dev/null @@ -1 +0,0 @@ -4e0197713651bc5c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md deleted file mode 100644 index aeeee02e17a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -description: 'Guide for building ClickHouse from source for the s390x architecture' -sidebar_label: 'Build on Linux for s390x (zLinux)' -sidebar_position: 30 -slug: '/development/build-cross-s390x' -title: 'Build on Linux for s390x (zLinux)' ---- - - - - -# Linuxでs390x(zLinux)用にビルド - -ClickHouseはs390xの実験的サポートを提供しています。 - -## s390x用にClickHouseをビルドする {#building-clickhouse-for-s390x} - -s390xには2つのOpenSSL関連のビルドオプションがあります: -- デフォルトでは、OpenSSLはs390xで共有ライブラリとしてビルドされます。これは、すべての他のプラットフォームでOpenSSLが静的ライブラリとしてビルドされるのとは異なります。 -- OpenSSLを静的ライブラリとしてビルドするには、必ず`-DENABLE_OPENSSL_DYNAMIC=0`をCMakeに渡してください。 - -これらの手順は、ホストマシンがx86_64であり、[ビルド指示](../development/build.md)に基づいてネイティブにビルドするために必要なすべてのツールが揃っていると仮定しています。また、ホストがUbuntu 22.04であると仮定していますが、以下の手順はUbuntu 20.04でも動作するはずです。 - -ネイティブビルドに使用するツールをインストールすることに加えて、以下の追加パッケージをインストールする必要があります: - -```bash -apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static -``` - -rustコードをクロスコンパイルしたい場合は、s390x用のrustクロスコンパイルターゲットをインストールしてください: - -```bash -rustup target add s390x-unknown-linux-gnu -``` - -s390xビルドではmoldリンカを使用します。これをhttps://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gzからダウンロードし、あなたの`$PATH`に置いてください。 - -s390x用にビルドするには: - -```bash -cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake .. -ninja -``` - -## 実行する {#running} - -ビルドが完了したら、バイナリを以下のように実行できます: - -```bash -qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse -``` - -## デバッグ {#debugging} - -LLDBをインストールします: - -```bash -apt-get install lldb-15 -``` - -s390x実行ファイルをデバッグするには、QEMUを使用してクリックハウスをデバッグモードで実行します: - -```bash -qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse -``` - -別のシェルでLLDBを実行し、アタッチします。`` と `` をあなたの環境に対応する値に置き換えてください。 - -```bash -lldb-15 -(lldb) target create ./clickhouse -Current executable set to '//ClickHouse//programs/clickhouse' (s390x). -(lldb) settings set target.source-map //ClickHouse -(lldb) gdb-remote 31338 -Process 1 stopped -* thread #1, stop reason = signal SIGTRAP - frame #0: 0x0000004020e74cd0 --> 0x4020e74cd0: lgr %r2, %r15 - 0x4020e74cd4: aghi %r15, -160 - 0x4020e74cd8: xc 0(8,%r15), 0(%r15) - 0x4020e74cde: brasl %r14, 275429939040 -(lldb) b main -Breakpoint 1: 9 locations. -(lldb) c -Process 1 resuming -Process 1 stopped -* thread #1, stop reason = breakpoint 1.1 - frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17 - 447 #if !defined(FUZZING_MODE) - 448 int main(int argc_, char ** argv_) - 449 { --> 450 inside_main = true; - 451 SCOPE_EXIT({ inside_main = false; }); - 452 - 453 /// PHDRキャッシュは、クエリプロファイラが信頼性を持って機能するために必要です -``` - -## Visual Studio Code統合 {#visual-studio-code-integration} - -- [CodeLLDB](https://github.com/vadimcn/vscode-lldb)拡張機能は、視覚的デバッグに必要です。 -- [Command Variable](https://github.com/rioj7/command-variable)拡張機能は、[CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md)を使用する場合に動的な起動を助けることができます。 -- バックエンドがLLVMインストールに設定されていることを確認してください。例えば、`"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`。 -- 起動前にクリックハウス実行可能ファイルをデバッグモードで実行することを確認してください。(自動化するために`preLaunchTask`を作成することも可能です) - -### 例の設定 {#example-configurations} -#### cmake-variants.yaml {#cmake-variantsyaml} -```yaml -buildType: - default: relwithdebinfo - choices: - debug: - short: Debug - long: デバッグ情報を出力 - buildType: Debug - release: - short: Release - long: 生成されたコードを最適化 - buildType: Release - relwithdebinfo: - short: RelWithDebInfo - long: デバッグ情報付きリリース - buildType: RelWithDebInfo - tsan: - short: MinSizeRel - long: 最小サイズリリース - buildType: MinSizeRel - -toolchain: - default: default - description: ツールチェインを選択 - choices: - default: - short: x86_64 - long: x86_64 - s390x: - short: s390x - long: s390x - settings: - CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake -``` - -#### launch.json {#launchjson} -```json -{ - "version": "0.2.0", - "configurations": [ - { - "type": "lldb", - "request": "custom", - "name": "(lldb) qemuでs390xを起動", - "targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"], - "processCreateCommands": ["gdb-remote 2159"], - "preLaunchTask": "Run ClickHouse" - } - ] -} -``` - -#### settings.json {#settingsjson} -これにより、異なるビルドが`build`フォルダーの異なるサブフォルダーに配置されます。 -```json -{ - "cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}", - "lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so" -} -``` - -#### run-debug.sh {#run-debugsh} -```sh -#! /bin/sh -echo 'デバッガセッションを開始します' -cd $1 -qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4 -``` - -#### tasks.json {#tasksjson} -コンパイルされた実行可能ファイルを`tmp`フォルダーの下で`server`モードで実行するタスクを定義し、`programs/server/config.xml`からの構成を使用します。 -```json -{ - "version": "2.0.0", - "tasks": [ - { - "label": "Run ClickHouse", - "type": "shell", - "isBackground": true, - "command": "${workspaceFolder}/.vscode/run-debug.sh", - "args": [ - "${command:cmake.launchTargetDirectory}/tmp", - "${command:cmake.launchTargetPath}", - "server", - "--config-file=${workspaceFolder}/programs/server/config.xml" - ], - "problemMatcher": [ - { - "pattern": [ - { - "regexp": ".", - "file": 1, - "location": 2, - "message": 3 - } - ], - "background": { - "activeOnStart": true, - "beginsPattern": "^デバッガセッションを開始します", - "endsPattern": ".*" - } - } - ] - } - ] -} -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md.hash deleted file mode 100644 index 59ec36b1198..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-cross-s390x.md.hash +++ /dev/null @@ -1 +0,0 @@ -6b95d83e208e0143 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md deleted file mode 100644 index 5c46c68f275..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -description: 'Guide for building ClickHouse from source on macOS systems' -sidebar_label: 'Build on macOS for macOS' -sidebar_position: 15 -slug: '/development/build-osx' -title: 'Build on macOS for macOS' ---- - - - - -# How to Build ClickHouse on macOS for macOS - -:::info あなたは自分で ClickHouse をビルドする必要はありません! -事前にビルドされた ClickHouse を [クイックスタート](https://clickhouse.com/#quick-start) の手順に従ってインストールできます。 -::: - -ClickHouse は、macOS 10.15 (Catalina) 以降の macOS x86_64 (Intel) および arm64 (Apple Silicon) でコンパイル可能です。 - -コンパイラとして、homebrew の Clang のみがサポートされています。 - -## Install Prerequisites {#install-prerequisites} - -まず、一般的な [必要条件のドキュメント](developer-instruction.md) を参照してください。 - -次に、[Homebrew](https://brew.sh/) をインストールし、次のコマンドを実行します。 - -その後、以下を実行します: - -```bash -brew update -brew install ccache cmake ninja libtool gettext llvm binutils grep findutils nasm bash -``` - -:::note -Apple はデフォルトでケースを区別しないファイルシステムを使用しています。これは通常、コンパイルには影響しませんが(特にスクラッチメイクが機能します)、`git mv` のようなファイル操作に混乱を招くことがあります。 -macOS での真剣な開発のためには、ソースコードをケースを区別するディスクボリュームに保存することを確認してください。たとえば、[これらの手順](https://brianboyko.medium.com/a-case-sensitive-src-folder-for-mac-programmers-176cc82a3830)を参照してください。 -::: - -## Build ClickHouse {#build-clickhouse} - -ビルドを行うには、Homebrew の Clang コンパイラを使用する必要があります: - -```bash -cd ClickHouse -mkdir build -export PATH=$(brew --prefix llvm)/bin:$PATH -cmake -S . -B build -cmake --build build - -# 生成されたバイナリは次の場所に作成されます: build/programs/clickhouse -``` - -## Caveats {#caveats} - -`clickhouse-server` を実行する予定がある場合は、システムの `maxfiles` 変数を増やす必要があります。 - -:::note -sudo を使用する必要があります。 -::: - -そのために、次の内容の `/Library/LaunchDaemons/limit.maxfiles.plist` ファイルを作成してください: - -```xml - - - - - Label - limit.maxfiles - ProgramArguments - - launchctl - limit - maxfiles - 524288 - 524288 - - RunAtLoad - - ServiceIPC - - - -``` - -ファイルに適切な権限を与えます: - -```bash -sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist -``` - -ファイルが正しいことを検証します: - -```bash -plutil /Library/LaunchDaemons/limit.maxfiles.plist -``` - -ファイルを読み込む(または再起動)します: - -```bash -sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist -``` - -動作しているか確認するには、`ulimit -n` または `launchctl limit maxfiles` コマンドを使用してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md.hash deleted file mode 100644 index 46e441ce42c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build-osx.md.hash +++ /dev/null @@ -1 +0,0 @@ -c23c7b23ebcb414f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md deleted file mode 100644 index fae3b9784af..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -description: 'Step-by-step guide for building ClickHouse from source on Linux systems' -sidebar_label: 'Build on Linux' -sidebar_position: 10 -slug: '/development/build' -title: 'How to Build ClickHouse on Linux' ---- - - - - -# ClickHouseをLinuxにビルドする方法 - -:::info ClickHouseを自分でビルドする必要はありません! -事前にビルドされたClickHouseを[クイックスタート](https://clickhouse.com/#quick-start)に従ってインストールできます。 -::: - -ClickHouseは以下のプラットフォームでビルド可能です: - -- x86_64 -- AArch64 -- PowerPC 64 LE(実験的) -- s390/x(実験的) -- RISC-V 64(実験的) - -## 前提条件 {#assumptions} - -このチュートリアルはUbuntu Linuxに基づいていますが、適切な変更を加えることで他のLinuxディストリビューションでも動作するはずです。 -開発に推奨される最小のUbuntuバージョンは24.04 LTSです。 - -このチュートリアルは、ClickHouseのリポジトリとすべてのサブモジュールがローカルにチェックアウトされていることを前提としています。 - -## 必要な前提条件をインストールする {#install-prerequisites} - -まず、一般的な[前提条件のドキュメント](developer-instruction.md)を参照してください。 - -ClickHouseはビルドにCMakeとNinjaを使用します。 - -オプションで、ccacheをインストールして、すでにコンパイルされたオブジェクトファイルを再利用できるようにすることができます。 - -```bash -sudo apt-get update -sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-release wget software-properties-common gnupg -``` - -## Clangコンパイラをインストールする {#install-the-clang-compiler} - -Ubuntu/DebianにClangをインストールするには、[こちら](https://apt.llvm.org/)からLLVMの自動インストールスクリプトを使用します。 - -```bash -sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" -``` - -他のLinuxディストリビューションの場合は、LLVMの[事前ビルドパッケージ](https://releases.llvm.org/download.html)をインストールできるか確認してください。 - -2025年3月時点では、Clang 19以上が必要です。 -GCCや他のコンパイラはサポートされていません。 - -## Rustコンパイラをインストールする(オプション) {#install-the-rust-compiler-optional} - -:::note -RustはClickHouseのオプション依存関係です。 -Rustがインストールされていない場合、ClickHouseのいくつかの機能はコンパイルから省略されます。 -::: - -まず、公式の[Rustドキュメント](https://www.rust-lang.org/tools/install)に従って`rustup`をインストールします。 - -C++の依存関係と同様に、ClickHouseはベンダリングを使用して、正確に何がインストールされるかを制御し、サードパーティサービス(`crates.io`レジストリなど)への依存を避けます。 - -リリースモードでは、すべての新しいrustupツールチェーンバージョンがこれらの依存関係と動作するはずですが、サニタイザーを有効にする予定の場合は、CIで使用されているのと同じ`std`に一致するバージョンを使用する必要があります(私たちはクレートをベンドしています): - -```bash -rustup toolchain install nightly-2024-12-01 -rustup default nightly-2024-12-01 -rustup component add rust-src -``` - -## ClickHouseをビルドする {#build-clickhouse} - -すべてのビルドアーティファクトが含まれる`build`という別のディレクトリを`ClickHouse`内に作成することをお勧めします: - -```sh -mkdir build -cd build -``` - -異なるビルドタイプ用に、複数の異なるディレクトリ(例: `build_release`, `build_debug`など)を持つことができます。 - -オプション: 複数のコンパイラバージョンがインストールされている場合は、使用する正確なコンパイラを指定できます。 - -```sh -export CC=clang-19 -export CXX=clang++-19 -``` - -開発目的の場合、デバッグビルドを推奨します。 -リリースビルドと比較して、コンパイラの最適化レベルが低く(`-O`)、デバッグ体験が向上します。 -また、`LOGICAL_ERROR`タイプの内部例外は正常に失敗するのではなく、即座にクラッシュします。 - -```sh -cmake -D CMAKE_BUILD_TYPE=Debug .. -``` - -ビルドを実行するにはninjaを使用します: - -```sh -ninja clickhouse-server clickhouse-client -``` - -すべてのバイナリ(ユーティリティとテスト)をビルドしたい場合は、引数なしでninjaを実行します: - -```sh -ninja -``` - -並列ビルドジョブの数を制御するには、`-j`パラメータを使用します: - -```sh -ninja -j 1 clickhouse-server clickhouse-client -``` - -:::tip -CMakeは上記のコマンドのショートカットを提供します: - -```sh -cmake -S . -B build # ビルドを構成し、リポジトリのトップレベルディレクトリから実行 -cmake --build build # コンパイル -``` -::: - -## ClickHouse実行可能ファイルの実行 {#running-the-clickhouse-executable} - -ビルドが成功した後、実行可能ファイルは`ClickHouse//programs/`にあります: - -ClickHouseサーバーは現在のディレクトリに`config.xml`という設定ファイルを探そうとします。 -代わりにコマンドラインで`-C`を使って設定ファイルを指定することもできます。 - -`clickhouse-client`でClickHouseサーバーに接続するためには、別のターミナルを開き、`ClickHouse/build/programs/`に移動して`./clickhouse client`を実行します。 - -macOSやFreeBSDで`Connection refused`メッセージが表示される場合は、ホストアドレス127.0.0.1を指定してみてください: - -```bash -clickhouse client --host 127.0.0.1 -``` - -## 高度なオプション {#advanced-options} - -### 最小ビルド {#minimal-build} - -サードパーティライブラリによって提供される機能が不要な場合、さらにビルドを高速化できます: - -```sh -cmake -DENABLE_LIBRARIES=OFF -``` - -問題が発生した場合、自己責任でお願いします… - -Rustはインターネット接続を必要とします。Rustサポートを無効にするには: - -```sh -cmake -DENABLE_RUST=OFF -``` - -### ClickHouse実行可能ファイルの実行 {#running-the-clickhouse-executable-1} - -システムにインストールされているClickHouseバイナリーのプロダクションバージョンをコンパイルしたClickHouseバイナリーで置き換えることができます。 -そのためには、公式ウェブサイトの指示に従ってマシンにClickHouseをインストールします。 -次に、実行: - -```bash -sudo service clickhouse-server stop -sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ -sudo service clickhouse-server start -``` - -`clickhouse-client`、`clickhouse-server`などは、一般的に共有される`clickhouse`バイナリーへのシンボリックリンクであることに注意してください。 - -システムにインストールされているClickHouseパッケージから設定ファイルを使用して、カスタムビルドのClickHouseバイナリーも実行できます: - -```bash -sudo service clickhouse-server stop -sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml -``` - -### 任意のLinuxでのビルド {#building-on-any-linux} - -OpenSUSE Tumbleweedでの前提条件をインストール: - -```bash -sudo zypper install git cmake ninja clang-c++ python lld nasm yasm gawk -git clone --recursive https://github.com/ClickHouse/ClickHouse.git -mkdir build -cmake -S . -B build -cmake --build build -``` - -Fedora Rawhideでの前提条件をインストール: - -```bash -sudo yum update -sudo yum --nogpg install git cmake make clang python3 ccache lld nasm yasm gawk -git clone --recursive https://github.com/ClickHouse/ClickHouse.git -mkdir build -cmake -S . -B build -cmake --build build -``` - -### Dockerでのビルド {#building-in-docker} - -以下のコマンドを使用して、CIと似た環境で任意のビルドをローカルで実行できます: - -```bash -python -m ci.praktika "BUILD_JOB_NAME" -``` -ここで、BUILD_JOB_NAMEはCIレポートに表示されるジョブ名(例: "Build (arm_release)", "Build (amd_debug)")です。 - -このコマンドは、必要なすべての依存関係を含む適切なDockerイメージ`clickhouse/binary-builder`をプルし、その中でビルドスクリプト`./ci/jobs/build_clickhouse.py`を実行します。 - -ビルド出力は`./ci/tmp/`に置かれます。 - -これはAMDおよびARMアーキテクチャの両方で動作し、Docker以外の追加依存関係は必要ありません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md.hash deleted file mode 100644 index 687306ef9f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/build.md.hash +++ /dev/null @@ -1 +0,0 @@ -43d59cd762a519fc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md deleted file mode 100644 index 20345fb7152..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md +++ /dev/null @@ -1,334 +0,0 @@ ---- -description: 'How to build Clickhouse and run benchmark with DEFLATE_QPL Codec' -sidebar_label: 'Building and Benchmarking DEFLATE_QPL' -sidebar_position: 73 -slug: '/development/building_and_benchmarking_deflate_qpl' -title: 'Build Clickhouse with DEFLATE_QPL' ---- - - - - -# DEFLATE_QPLを使ってClickhouseをビルドする - -- ホストマシンがQPLの必要な[前提条件](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#prerequisites)を満たしていることを確認してください。 -- deflate_qplはcmakeビルド中にデフォルトで有効です。もし、誤って変更した場合は、ビルドフラグを再確認してください: ENABLE_QPL=1 - -- 一般的な要件については、Clickhouseの一般的な[ビルド手順](/development/build.md)を参照してください。 - - -# DEFLATE_QPLを使ったベンチマークの実行 - -## ファイルリスト {#files-list} - -フォルダ `benchmark_sample` は、[qpl-cmake](https://github.com/ClickHouse/ClickHouse/tree/master/contrib/qpl-cmake) 内にあり、Pythonスクリプトを使ってベンチマークを実行する方法の例を提供します: - -`client_scripts` には典型的なベンチマークを実行するためのPythonスクリプトが含まれています。例えば: -- `client_stressing_test.py`: [1~4]のサーバーインスタンスを使ったクエリストレステスト用のPythonスクリプトです。 -- `queries_ssb.sql`: [Star Schema Benchmark](/getting-started/example-datasets/star-schema/) のすべてのクエリをリストしたファイルです。 -- `allin1_ssb.sh`: このシェルスクリプトは、ベンチマークのワークフローをすべて自動的に実行します。 - -`database_files` は、lz4/deflate/zstd コーデックに従ってデータベースファイルを保存することを意味します。 - -## Star Schemaの自動ベンチマーク実行: {#run-benchmark-automatically-for-star-schema} - -```bash -$ cd ./benchmark_sample/client_scripts -$ sh run_ssb.sh -``` - -完了後、すべての結果はこのフォルダ:`./output/`に保存されます。 - -失敗した場合は、以下のセクションに従って手動でベンチマークを実行してください。 - -## 定義 {#definition} - -[CLICKHOUSE_EXE] は、ClickHouse実行可能プログラムのパスを意味します。 - -## 環境 {#environment} - -- CPU: Sapphire Rapid -- OS要件は[QPLのシステム要件](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#system-requirements)を参照してください。 -- IAAセットアップは[アクセラレータの設定](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration)を参照してください。 -- Pythonモジュールのインストール: - -```bash -pip3 install clickhouse_driver numpy -``` - -[IAAの自己チェック] - -```bash -$ accel-config list | grep -P 'iax|state' -``` - -期待される出力は次のようになります: -```bash - "dev":"iax1", - "state":"enabled", - "state":"enabled", -``` - -何も出力されない場合は、IAAが作動する準備ができていないことを意味します。再度IAA設定を確認してください。 - -## 生データの生成 {#generate-raw-data} - -```bash -$ cd ./benchmark_sample -$ mkdir rawdata_dir && cd rawdata_dir -``` - -[`dbgen`](/getting-started/example-datasets/star-schema)を使用して、パラメータ-s 20で1億行のデータを生成します。 - -`*.tbl`のようなファイルは、`./benchmark_sample/rawdata_dir/ssb-dbgen`の下に出力されることが期待されます。 - -## データベースのセットアップ {#database-setup} - -LZ4 コーデックでデータベースをセットアップします。 - -```bash -$ cd ./database_dir/lz4 -$ [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null& -$ [CLICKHOUSE_EXE] client -``` - -ここで、コンソールに `Connected to ClickHouse server` のメッセージが表示されれば、クライアントがサーバーとの接続を正常にセットアップしたことを意味します。 - -[Star Schema Benchmark](/getting-started/example-datasets/star-schema) に記載されている以下の3つのステップを完了してください。 -- ClickHouse内のテーブルの作成 -- データの挿入。ここでは、`./benchmark_sample/rawdata_dir/ssb-dbgen/*.tbl`を入力データとして使用する必要があります。 -- "star schema"を非正規化された"flat schema"に変換します。 - -IAA Deflate コーデックでデータベースをセットアップします。 - -```bash -$ cd ./database_dir/deflate -$ [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null& -$ [CLICKHOUSE_EXE] client -``` - -LZ4と同様に、上記の3つのステップを完了してください。 - -ZSTD コーデックでデータベースをセットアップします。 - -```bash -$ cd ./database_dir/zstd -$ [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null& -$ [CLICKHOUSE_EXE] client -``` - -LZ4と同様に、上記の3つのステップを完了してください。 - -[自己チェック] -各コーデック(lz4/zstd/deflate)について、以下のクエリを実行してデータベースが正常に作成されたことを確認してください: -```sql -select count() from lineorder_flat -``` -期待される出力は以下の通りです: -```sql -┌───count()─┐ -│ 119994608 │ -└───────────┘ -``` -[IAA Deflate コーデックの自己チェック] - -クライアントから挿入またはクエリを初めて実行すると、ClickHouseサーバーコンソールは次のログを表示することが期待されます: -```text -Hardware-assisted DeflateQpl codec is ready! -``` -これが見つからず、次のようなログが表示された場合: -```text -Initialization of hardware-assisted DeflateQpl codec failed -``` -それはIAAデバイスが準備ができていないことを意味し、再度IAA設定を確認する必要があります。 - -## 単一インスタンスでのベンチマーク {#benchmark-with-single-instance} - -- ベンチマークを開始する前に、C6を無効にし、CPU周波数のガバナーを `performance` に設定してください。 - -```bash -$ cpupower idle-set -d 3 -$ cpupower frequency-set -g performance -``` - -- メモリバウンドの影響を排除するために、`numactl`を使用してサーバーを1つのソケットに、クライアントを別のソケットにバインドします。 -- 単一インスタンスとは、単一のクライアントに接続された単一のサーバーを意味します。 - -今、LZ4/Deflate/ZSTDそれぞれのベンチマークを実行します: - -LZ4: - -```bash -$ cd ./database_dir/lz4 -$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > lz4.log -``` - -IAA Deflate: - -```bash -$ cd ./database_dir/deflate -$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > deflate.log -``` - -ZSTD: - -```bash -$ cd ./database_dir/zstd -$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > zstd.log -``` - -今、3つのログが期待通りに出力されるはずです: -```text -lz4.log -deflate.log -zstd.log -``` - -性能指標を確認する方法: - -私たちはQPSに焦点を当てています。キーワード`QPS_Final`を検索し、統計を収集してください。 - -## 複数インスタンスでのベンチマーク {#benchmark-with-multi-instances} - -- スレッドが多すぎるためにメモリの影響を減らすために、複数インスタンスでベンチマークを実行することをお勧めします。 -- 複数インスタンスとは、複数(2または4)のサーバーがそれぞれのクライアントに接続されていることを意味します。 -- 1つのソケットのコアは均等に分けられ、サーバーにそれぞれ割り当てられる必要があります。 -- 複数インスタンスの場合は、各コーデック用に新しいフォルダを作成し、単一インスタンスでの手順に従ってデータセットを挿入する必要があります。 - -2つの違いがあります: -- クライアント側では、テーブルの作成とデータの挿入時に割り当てられたポートでClickHouseを起動する必要があります。 -- サーバー側では、ポートが割り当てられた特定のxml設定ファイルでClickHouseを起動する必要があります。すべてのカスタマイズされたxml設定ファイルは、./server_configに提供されています。 - -ここでは、ソケットあたり60コアとし、2インスタンスを例に取ります。 -最初のインスタンスのサーバーを起動します。 - -LZ4: - -```bash -$ cd ./database_dir/lz4 -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null& -``` - -ZSTD: - -```bash -$ cd ./database_dir/zstd -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null& -``` - -IAA Deflate: - -```bash -$ cd ./database_dir/deflate -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null& -``` - -[2番目のインスタンスのサーバーを起動] - -LZ4: - -```bash -$ cd ./database_dir && mkdir lz4_s2 && cd lz4_s2 -$ cp ../../server_config/config_lz4_s2.xml ./ -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_lz4_s2.xml >&/dev/null& -``` - -ZSTD: - -```bash -$ cd ./database_dir && mkdir zstd_s2 && cd zstd_s2 -$ cp ../../server_config/config_zstd_s2.xml ./ -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_zstd_s2.xml >&/dev/null& -``` - -IAA Deflate: - -```bash -$ cd ./database_dir && mkdir deflate_s2 && cd deflate_s2 -$ cp ../../server_config/config_deflate_s2.xml ./ -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_deflate_s2.xml >&/dev/null& -``` - -2番目のインスタンスのためのテーブルの作成とデータの挿入 - -テーブルの作成: - -```bash -$ [CLICKHOUSE_EXE] client -m --port=9001 -``` - -データの挿入: - -```bash -$ [CLICKHOUSE_EXE] client --query "INSERT INTO [TBL_FILE_NAME] FORMAT CSV" < [TBL_FILE_NAME].tbl --port=9001 -``` - -- [TBL_FILE_NAME]は、`./benchmark_sample/rawdata_dir/ssb-dbgen`の下にある正規表現:*. tblで命名されたファイルの名前を表します。 -- `--port=9001` は、config_lz4_s2.xml/config_zstd_s2.xml/config_deflate_s2.xmlで定義されたサーバーインスタンスのための割り当てられたポートを示します。さらに多くのインスタンスの場合は、9002/9003という値に置き換えなければなりません。これはそれぞれs3/s4インスタンスを意味します。割り当てを行わない場合、ポートはデフォルトで9000となり、最初のインスタンスによって使用されます。 - -2インスタンスでのベンチマーキング - -LZ4: - -```bash -$ cd ./database_dir/lz4 -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null& -$ cd ./database_dir/lz4_s2 -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_lz4_s2.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > lz4_2insts.log -``` - -ZSTD: - -```bash -$ cd ./database_dir/zstd -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null& -$ cd ./database_dir/zstd_s2 -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_zstd_s2.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > zstd_2insts.log -``` - -IAA Deflate: - -```bash -$ cd ./database_dir/deflate -$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null& -$ cd ./database_dir/deflate_s2 -$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_deflate_s2.xml >&/dev/null& -$ cd ./client_scripts -$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > deflate_2insts.log -``` - -ここで、`client_stressing_test.py`の最後の引数:`2`はインスタンスの数を意味します。さらに多くのインスタンスのためには、`3`または`4`という値に置き換える必要があります。このスクリプトは最大4インスタンスをサポートしています。 - -今、3つのログが期待通りに出力されるはずです: - -```text -lz4_2insts.log -deflate_2insts.log -zstd_2insts.log -``` -性能指標を確認する方法: - -私たちはQPSに焦点を当てています。キーワード`QPS_Final`を検索し、統計を収集してください。 - -4インスタンスのベンチマークセットアップは、上記の2インスタンスと似ています。 -最終報告のレビューには、2インスタンスのベンチマークデータを使用することをお勧めします。 - -## ヒント {#tips} - -新しいClickhouseサーバーを起動する前に、バックグラウンドのClickhouseプロセスが動いていないことを確認してください。古いプロセスを確認し、終了させてください。 - -```bash -$ ps -aux| grep clickhouse -$ kill -9 [PID] -``` -./client_scripts/queries_ssb.sql内のクエリリストを公式の[Star Schema Benchmark](/getting-started/example-datasets/star-schema)と比較すると、Q1.2/Q1.3/Q3.4の3つのクエリが含まれていないことがわかります。これは、これらのクエリのCPU使用率%が非常に低く< 10%であり、性能の違いを示すことができないことを意味します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md.hash deleted file mode 100644 index e678a7a153f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/building_and_benchmarking_deflate_qpl.md.hash +++ /dev/null @@ -1 +0,0 @@ -12f4a25741923514 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md deleted file mode 100644 index e8a1a5cd6e7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -description: 'ClickHouseの継続的インテグレーションシステムの概要' -sidebar_label: '継続的インテグレーション(CI)' -sidebar_position: 55 -slug: '/development/continuous-integration' -title: '継続的インテグレーション(CI)' ---- - -# 継続的インテグレーション (CI) - -プルリクエストを送信すると、ClickHouse の [継続的インテグレーション (CI) システム](tests.md#test-automation) によってコードの自動チェックが実行されます。 -これは、リポジトリのメンテナがあなたのコードを確認し、プルリクエストに `can be tested` ラベルを追加した後に行われます。 -チェックの結果は、[GitHub チェックのドキュメント](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks) に記載されているように、GitHub プルリクエストページにリストされます。 -チェックが失敗した場合は、それを修正する必要があるかもしれません。 -このページでは、遭遇する可能性のあるチェックの概要と、それを修正するためにできることについて説明します。 - -チェックの失敗があなたの変更に関連していないように見える場合、それは一時的な失敗やインフラストラクチャの問題である可能性があります。 -CI チェックを再起動するためには、プルリクエストに空のコミットをプッシュしてください: - -```shell -git reset -git commit --allow-empty -git push -``` - -何をすべきか分からない場合は、メンテナに助けを求めてください。 - -## マスターへのマージ {#merge-with-master} - -PRがマスターにマージできるかどうかを確認します。 -できない場合は、`Cannot fetch mergecommit` というメッセージで失敗します。 -このチェックを修正するには、[GitHub のドキュメント](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/resolving-a-merge-conflict-on-github)に記載されているように、競合を解決するか、`master` ブランチをプルリクエストブランチにマージします。 - -## ドキュメントチェック {#docs-check} - -ClickHouse ドキュメントサイトのビルドを試みます。 -ドキュメントに何か変更があった場合、失敗する可能性があります。 -最も可能性の高い理由は、ドキュメント内のいくつかのクロスリンクが正しくないことです。 -チェックレポートに行き、`ERROR` および `WARNING` メッセージを探してください。 - -## 説明チェック {#description-check} - -プルリクエストの説明が [PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md) テンプレートに従っているかどうかを確認します。 -変更に対して変更履歴のカテゴリを指定する必要があります (例えば、バグ修正)、および [CHANGELOG.md](../whats-new/changelog/index.md) に変更を説明するユーザー向けのメッセージを書く必要があります。 - -## DockerHub へのプッシュ {#push-to-dockerhub} - -ビルドとテストに使用する docker イメージをビルドし、それを DockerHub にプッシュします。 - -## マーカー チェック {#marker-check} - -このチェックは、CI システムがプルリクエストの処理を開始したことを意味します。 -「pending」ステータスは、すべてのチェックがまだ開始されていないことを示します。 -すべてのチェックが開始されると、ステータスが「success」に変更されます。 - -## スタイル チェック {#style-check} - -コードベースに対してさまざまなスタイルチェックを実行します。 - -スタイルチェックジョブの基本チェック: - -##### cpp {#cpp} -[`ci/jobs/scripts/check_style/check_cpp.sh`](https://github.com/ClickHouse/ClickHouse/blob/master/ci/jobs/scripts/check_style/check_cpp.sh) スクリプトを使用して、単純な正規表現ベースのコードスタイルチェックを行います (このスクリプトはローカルでも実行できます)。 -失敗した場合は、[コードスタイルガイド](style.md)に従ってスタイルの問題を修正してください。 - -##### codespell, aspell {#codespell} -文法の間違いやタイポをチェックします。 - -##### mypy {#mypy} -Python コードの静的型チェックを実行します。 - -### スタイル チェック ジョブをローカルで実行する {#running-style-check-locally} - -_Style Check_ ジョブ全体を以下のコマンドで Docker コンテナ内でローカルに実行できます: - -```sh -python -m ci.praktika run "Style check" -``` - -特定のチェック (例: _cpp_ チェック) を実行するには: -```sh -python -m ci.praktika run "Style check" --test cpp -``` - -これらのコマンドは `clickhouse/style-test` Docker イメージをプルし、コンテナ化された環境内でジョブを実行します。 -Python 3 と Docker 以外の依存関係は必要ありません。 - -## ファストテスト {#fast-test} - -通常、これは PR のために最初に実行されるチェックです。 -ClickHouse をビルドし、ほとんどの [ステートレスな機能テスト](tests.md#functional-tests) を実行し、いくつかを省略します。 -失敗した場合、それが修正されるまで追加のチェックは開始されません。 -どのテストが失敗したかを報告書で確認し、[こちら](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report) の説明に従ってローカルで失敗を再現してください。 - -#### ローカルでファストテストを実行する: {#running-fast-test-locally} - -```sh -python -m ci.praktika run "Fast test" [--test some_test_name] -``` - -これらのコマンドは `clickhouse/fast-test` Docker イメージをプルし、コンテナ化された環境内でジョブを実行します。 -Python 3 と Docker 以外の依存関係は必要ありません。 - -## ビルド チェック {#build-check} - -さまざまな構成で ClickHouse をビルドし、次のステップで使用します。 -失敗したビルドを修正する必要があります。 -ビルドログにはエラーを修正するための十分な情報が含まれていることがよくありますが、失敗をローカルで再現する必要があるかもしれません。 -`cmake` オプションはビルドログに見つけることができ、`cmake` で `grep` します。 -これらのオプションを使用して、[一般的なビルドプロセス](../development/build.md)に従ってください。 - -### レポート詳細 {#report-details} - -- **コンパイラ**: `clang-19`、ターゲットプラットフォームの名前をオプションとして指定できます -- **ビルドタイプ**: `Debug` または `RelWithDebInfo` (cmake)。 -- **サニタイザー**: `none` (サニタイザーなし)、`address` (ASan)、`memory` (MSan)、`undefined` (UBSan)、または `thread` (TSan)。 -- **ステータス**: `success` または `fail` -- **ビルドログ**: ビルドおよびファイルコピーのログへのリンク。ビルドに失敗した場合に役立ちます。 -- **ビルド時間**。 -- **アーティファクト**: ビルド結果ファイル (`XXX` はサーバーバージョン、例: `20.8.1.4344`)。 - - `clickhouse-client_XXX_amd64.deb` - - `clickhouse-common-static-dbg_XXX[+asan, +msan, +ubsan, +tsan]_amd64.deb` - - `clickhouse-common-staticXXX_amd64.deb` - - `clickhouse-server_XXX_amd64.deb` - - `clickhouse`: メインビルドバイナリ。 - - `clickhouse-odbc-bridge` - - `unit_tests_dbms`: ClickHouse ユニットテストを持つ GoogleTest バイナリ。 - - `performance.tar.zst`: パフォーマンステスト用の特別なパッケージ。 - - -## 特別ビルドチェック {#special-build-check} -静的分析およびコードスタイルチェックを `clang-tidy` を使用して実行します。レポートは [ビルドチェック](#build-check) に類似しています。ビルドログで見つかったエラーを修正してください。 - -#### ローカルで clang-tidy を実行する: {#running-clang-tidy-locally} - -Docker で clang-tidy ビルドを実行する便利な `packager` スクリプトがあります。 -```sh -mkdir build_tidy -./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-19 --debug-build --clang-tidy -``` - -## 機能的ステートレス テスト {#functional-stateless-tests} -さまざまな構成でビルドされた ClickHouse バイナリのための [ステートレスな機能テスト](tests.md#functional-tests) を実行します -- リリース、デバッグ、サニタイザー付きなど。 -どのテストが失敗したかを報告書で確認し、[こちら](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report) の説明に従ってローカルで失敗を再現してください。 -正しいビルド構成を使用して再現する必要があります。アドレスサニタイザーでは失敗するテストも、デバッグでは合格する可能性があります。 -[CI ビルドチェックページ](/install/advanced) からバイナリをダウンロードするか、ローカルでビルドしてください。 - -## 機能的ステートフル テスト {#functional-stateful-tests} - -[状態を持つ機能テスト](tests.md#functional-tests)を実行します。 -それらは機能的ステートレス テストと同じ方法で扱います。 -違いは、`hits` および `visits` テーブルが [clickstream データセット](../getting-started/example-datasets/metrica.md)から必要であることです。 - -## 統合テスト {#integration-tests} -[integration tests](tests.md#integration-tests)を実行します。 - -## バグ修正検証チェック {#bugfix-validate-check} - -新しいテスト (機能または統合) があるか、マスターブランチでビルドされたバイナリで失敗する変更されたテストがあるかどうかを確認します。 -このチェックは、プルリクエストに「pr-bugfix」ラベルが付けられるとトリガーされます。 - -## ストレステスト {#stress-test} -複数のクライアントから同時にステートレスな機能テストを実行し、並行性に関連するエラーを検出します。失敗した場合: - - * 最初に他のすべてのテストの失敗を修正します; - * レポートを見てサーバーログを見つけ、それらのエラーの可能性のある原因を確認します。 - -## 互換性チェック {#compatibility-check} - -`clickhouse` バイナリが古い libc バージョンを持つディストリビューションで実行できるかどうかを確認します。 -失敗した場合は、メンテナに助けを求めてください。 - -## AST ファザー {#ast-fuzzer} -プログラムエラーをキャッチするためにランダムに生成されたクエリを実行します。 -失敗した場合は、メンテナに助けを求めてください。 - -## パフォーマンステスト {#performance-tests} -クエリパフォーマンスの変化を測定します。 -これは約 6 時間かかる最も長いチェックです。 -パフォーマンステストの報告は、[こちら](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report) に詳しく説明されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md.hash deleted file mode 100644 index 2761f34ff09..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/continuous-integration.md.hash +++ /dev/null @@ -1 +0,0 @@ -fab57c31f3e619e8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md deleted file mode 100644 index c567dd88515..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: 'ClickHouseのサードパーティー使用法とサードパーティーライブラリの追加と管理方法について説明するページ。' -sidebar_label: 'サードパーティーライブラリ' -sidebar_position: 60 -slug: '/development/contrib' -title: 'Third-Party Libraries' ---- - - - - -# サードパーティライブラリ - -ClickHouseは、他のデータベースへの接続、ディスクからのロード/セーブ時のデータのデコード/エンコード、特定の専門的なSQL関数の実装など、さまざまな目的のためにサードパーティライブラリを利用しています。 -ターゲットシステムに利用可能なライブラリに依存しないように、各サードパーティライブラリはClickHouseのソースツリーにGitサブモジュールとしてインポートされ、ClickHouseとともにコンパイルおよびリンクされます。 -サードパーティライブラリおよびそのライセンスのリストは、以下のクエリを実行することで取得できます: - -```sql -SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en'; -``` - -リストにあるライブラリは、ClickHouseリポジトリの `contrib/` ディレクトリにあるものです。 -ビルドオプションによっては、ライブラリがコンパイルされていない場合があり、その結果としてランタイム時にその機能が利用できないことがあります。 - -[例](https://sql.clickhouse.com?query_id=478GCPU7LRTSZJBNY3EJT3) - -## サードパーティライブラリの追加と管理 {#adding-and-maintaining-third-party-libraries} - -各サードパーティライブラリは、ClickHouseリポジトリの `contrib/` ディレクトリ内の専用ディレクトリに存在する必要があります。 -外部コードのコピーをライブラリディレクトリにダンプすることは避けてください。 -代わりに、外部の上流リポジトリからサードパーティコードを取得するためにGitサブモジュールを作成します。 - -ClickHouseで使用されるすべてのサブモジュールは、`.gitmodule` ファイルに一覧表示されています。 -- ライブラリがそのまま使用できる場合(デフォルトのケース)、上流リポジトリを直接参照できます。 -- ライブラリにパッチが必要な場合は、[ClickHouseのGitHub組織](https://github.com/ClickHouse)に上流リポジトリのフォークを作成します。 - -後者の場合、カスタムパッチを上流のコミットから可能な限り隔離することを目指します。 -そのため、統合したいブランチやタグから `ClickHouse/` プレフィックスを持つブランチを作成します。例えば `ClickHouse/2024_2`(ブランチ `2024_2` 用)や `ClickHouse/release/vX.Y.Z`(タグ `release/vX.Y.Z` 用)です。 -上流の開発ブランチ `master` / `main` / `dev` を追うことは避けてください(つまり、フォークしたリポジトリでプレフィックスブランチ `ClickHouse/master` / `ClickHouse/main` / `ClickHouse/dev` を使用しないでください)。 -そのようなブランチは移動する目的地であり、正しいバージョニングを難しくします。 -「プレフィックスブランチ」により、フォーク内での上流リポジトリからのプルがカスタムの `ClickHouse/` ブランチに影響を与えないことが保証されます。 -`contrib/` 内のサブモジュールは、フォークされたサードパーティレポジトリの `ClickHouse/` ブランチのみを追跡する必要があります。 - -パッチは外部ライブラリの `ClickHouse/` ブランチのみに適用されます。 - -これを行う方法は2つあります: -- フォークされたリポジトリの `ClickHouse/` プレフィックスブランチに対して新しい修正を行いたい場合、例えばサニタイザーの修正です。その場合、修正を `ClickHouse/` プレフィックスを持つブランチ(例: `ClickHouse/fix-sanitizer-disaster`)としてプッシュします。次に、その新しいブランチからカスタムトラッキングブランチ(例: `ClickHouse/2024_2 <-- ClickHouse/fix-sanitizer-disaster`)にPRを作成し、PRをマージします。 -- サブモジュールを更新し、以前のパッチを再適用する必要がある場合。この状況では、古いPRを再作成するのは手間がかかります。その代わりに、古いコミットを新しい `ClickHouse/` ブランチ(新しいバージョンに対応)にチェリーピックしてください。複数のコミットがあったPRのコミットをまとめることも自由です。最良のケースでは、カスタムパッチを上流に戻し、新しいバージョンではパッチを省略できる状況です。 - -サブモジュールが更新されたら、ClickHouse内のサブモジュールを新しいハッシュを指すように更新します。 - -サードパーティライブラリのパッチは、公式リポジトリを考慮して作成し、パッチを上流リポジトリに戻すことを検討してください。 -これにより、他の人もパッチの恩恵を受けることができ、ClickHouseチームにとってのメンテナンス負担も軽減されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md.hash deleted file mode 100644 index fe4f72b3a72..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/contrib.md.hash +++ /dev/null @@ -1 +0,0 @@ -2eafd19d10897906 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md deleted file mode 100644 index f00cf8144f0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md +++ /dev/null @@ -1,223 +0,0 @@ ---- -description: 'ClickHouse 開発の前提条件とセットアップ手順' -sidebar_label: '事前条件' -sidebar_position: 5 -slug: '/development/developer-instruction' -title: '開発者の事前条件' ---- - - - - - -# 前提条件 - -ClickHouseは、Linux、FreeBSD、macOS上でビルドできます。 -Windowsを使用している場合でも、Linuxを実行している仮想マシン(例:Ubuntuがインストールされた [VirtualBox](https://www.virtualbox.org/))でClickHouseをビルドできます。 - -## GitHubにリポジトリを作成する {#create-a-repository-on-github} - -ClickHouseの開発を開始するには、[GitHub](https://www.github.com/)アカウントが必要です。 -SSHキーをローカルで生成し(すでに存在しない場合)、パッチの寄稿において前提条件となるため、その公開キーをGitHubにアップロードしてください。 - -次に、画面の右上隅にある「fork」ボタンをクリックして、個人アカウントに[ClickHouseリポジトリ](https://github.com/ClickHouse/ClickHouse/)をフォークします。 - -変更を寄稿するには、まずフォークしたリポジトリのブランチに変更をコミットし、その後、メインリポジトリに対して変更を含む「Pull Request」を作成します。 - -Gitリポジトリで作業するためには、Gitをインストールしてください。例えば、Ubuntuでは、次のコマンドを実行します: - -```sh -sudo apt update -sudo apt install git -``` - -Gitのチートシートは[ここ](https://education.github.com/git-cheat-sheet-education.pdf)にあります。 -詳細なGitマニュアルは[こちら](https://git-scm.com/book/en/v2)です。 - -## リポジトリを開発用マシンにクローンする {#clone-the-repository-to-your-development-machine} - -まず、作業マシンにソースファイルをダウンロードします。つまり、リポジトリをクローンします: - -```sh -git clone git@github.com:your_github_username/ClickHouse.git # プレースホルダーをあなたのGitHubユーザー名に置き換えてください -cd ClickHouse -``` - -このコマンドは、ソースコード、テスト、およびその他のファイルを含む `ClickHouse/` ディレクトリを作成します。 -URLの後にカスタムディレクトリを指定できますが、このパスにはホワイトスペースが含まれないことが重要です。これは、後でビルドが壊れる可能性があるためです。 - -ClickHouseのGitリポジトリは、サブモジュールを使用してサードパーティライブラリをプルします。 -サブモジュールはデフォルトではチェックアウトされません。次のいずれかを実行できます: - -- `--recurse-submodules` オプションを付けて `git clone` を実行する。 - -- `--recurse-submodules`なしで `git clone` を実行した場合、すべてのサブモジュールを明示的にチェックアウトするために `git submodule update --init --jobs ` を実行します。 (``は、たとえば `12` に設定してダウンロードを並列化できます。) - -- `--recurse-submodules`なしで `git clone` が実行された場合、不要なファイルと履歴を省略してスペースを節約するために [スパース](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) および [浅い](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) サブモジュールのチェックアウトを使用するために `./contrib/update-submodules.sh` を実行します。この選択肢はCIによって使用されますが、サブモジュールとの作業を不便にし、遅くするため、ローカル開発には推奨されません。 - -Gitサブモジュールの状態を確認するには、`git submodule status`を実行します。 - -次のエラーメッセージが表示される場合: - -```bash -Permission denied (publickey). -fatal: Could not read from remote repository. - -Please make sure you have the correct access rights -and the repository exists. -``` - -GitHubに接続するためのSSHキーが不足しています。 -これらのキーは通常、`~/.ssh`にあります。 -SSHキーが受け入れられるためには、それらをGitHubの設定にアップロードする必要があります。 - -HTTPS経由でリポジトリをクローンすることも可能です: - -```sh -git clone https://github.com/ClickHouse/ClickHouse.git -``` - -ただし、これにより変更をサーバーに送信することはできません。 -一時的に使用することはできますが、後でSSHキーを追加し、`git remote`コマンドでリポジトリのリモートアドレスを置き換える必要があります。 - -ローカルリポジトリに元のClickHouseリポジトリのアドレスを追加して、そこから更新をプルすることもできます: - -```sh -git remote add upstream git@github.com:ClickHouse/ClickHouse.git -``` - -このコマンドを正常に実行すると、`git pull upstream master` を実行してメインのClickHouseリポジトリから更新をプルできるようになります。 - -:::tip -必ず `git push` をそのまま使用しないでください。間違ったリモートや間違ったブランチにプッシュしてしまう可能性があります。 -リモート名とブランチ名を明示的に指定することをお勧めします。例えば、`git push origin my_branch_name`のようにしてください。 -::: - -## コードを書く {#writing-code} - -以下は、ClickHouseのコードを書く際に便利なクイックリンク集です: - -- [ClickHouseアーキテクチャ](/development/architecture/). -- [コードスタイルガイド](/development/style/). -- [サードパーティライブラリ](/development/contrib#adding-and-maintaining-third-party-libraries) -- [テストの作成](/development/tests/) -- [オープンな問題](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22) - -### IDE {#ide} - -[Visual Studio Code](https://code.visualstudio.com/) と [Neovim](https://neovim.io/) は、ClickHouseの開発において過去にうまく機能してきた2つの選択肢です。VS Codeを使用している場合は、[clangd拡張](https://marketplace.visualstudio.com/items?itemName=llvm-vs-code-extensions.vscode-clangd)を使用してIntelliSenseを置き換えることをお勧めします。こちらの方がパフォーマンスが優れています。 - -[CLion](https://www.jetbrains.com/clion/) はもう一つの素晴らしい選択肢です。ただし、ClickHouseのような大規模プロジェクトでは遅くなることがあります。CLionを使用する際の注意点は次のとおりです: - -- CLionは独自に`build`パスを作成し、ビルドタイプとして`debug`を自動的に選択します。 -- CLionで定義されたCMakeのバージョンを使用し、あなたがインストールしたものは使用しません。 -- CLionは`ninja`ではなく`make`を使用してビルドタスクを実行します(これは通常の動作です)。 - -他にも使用できるIDEには、[Sublime Text](https://www.sublimetext.com/)、[Qt Creator](https://www.qt.io/product/development-tools)、または[Kate](https://kate-editor.org/)があります。 - -## プルリクエストを作成する {#create-a-pull-request} - -GitHubのUIでフォークしたリポジトリに移動します。 -ブランチで開発している場合は、そのブランチを選択する必要があります。 -画面に「Pull request」ボタンがあります。 -本質的には、これは「私の変更をメインリポジトリに受け入れるリクエストを作成する」という意味です。 - -作業が完了していない場合でもプルリクエストを作成できます。 -この場合、タイトルの冒頭に「WIP」(作業中)と記載してください。後で変更可能です。 -これは、協力的なレビューおよび変更の議論、およびすべての利用可能なテストを実行するために便利です。 -変更内容の簡潔な説明を提供することが重要です。これは後でリリースの変更履歴を生成する際に使用されます。 - -ClickHouseの社員があなたのPRに「テスト可能」タグを付けると、テストが開始されます。 -最初のチェック(例えば、コードスタイル)の結果は数分以内に届きます。 -ビルドチェックの結果は30分以内に届きます。 -主要なテストセットの結果は1時間以内に報告されます。 - -システムは、あなたのプルリクエスト専用のClickHouseバイナリビルドを準備します。 -これらのビルドを取得するには、チェックリストの「Builds」エントリの横にある「Details」リンクをクリックします。 -そこには、デプロイ可能なClickHouseの.build .debパッケージへの直接リンクがあります(恐れがなければ本番サーバーでも展開できます)。 - -## ドキュメントを書く {#write-documentation} - -新しい機能を追加するプルリクエストには、適切なドキュメントが付随する必要があります。 -ドキュメントの変更をプレビューしたい場合の、ローカルでドキュメントページをビルドする手順は、README.mdファイルの[こちら](https://github.com/ClickHouse/clickhouse-docs)に記載されています。 -ClickHouseに新しい関数を追加する際には、以下のテンプレートをガイドとして使用できます: - -```markdown - -# newFunctionName - -関数の短い説明がここに入ります。これは、関数が何をするかや典型的な使用例を簡潔に説明するべきです。 - -**構文** - -\```sql -newFunctionName(arg1, arg2[, arg3]) -\``` - -**引数** - -- `arg1` — 引数の説明。 [データ型](../data-types/float.md) -- `arg2` — 引数の説明。 [データ型](../data-types/float.md) -- `arg3` — オプション引数の説明(オプション)。 [データ型](../data-types/float.md) - -**実装の詳細** - -関連がある場合は、実装の詳細の説明。 - -**返される値** - -- {関数が返すものをここに挿入します}を返します。 [データ型](../data-types/float.md) - -**例** - -クエリ: - -\```sql -SELECT 'write your example query here'; -\``` - -応答: - -\```response -┌───────────────────────────────────┐ -│ クエリの結果 │ -└───────────────────────────────────┘ -\``` -``` - -## テストデータの使用 {#using-test-data} - -ClickHouseの開発には、実際のデータセットをロードすることがしばしば必要です。 -特に、パフォーマンステストには重要です。 -ウェブ分析用の特別に準備された匿名データセットがあります。 -このデータセットは、さらに約3GBの空きディスクスペースが必要です。 - -```sh - sudo apt install wget xz-utils - - wget https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz - wget https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz - - xz -v -d hits_v1.tsv.xz - xz -v -d visits_v1.tsv.xz - - clickhouse-client -``` - -clickhouse-clientで: - -```sql -CREATE DATABASE IF NOT EXISTS test; - -CREATE TABLE test.hits ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime); - -CREATE TABLE test.visits ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), `Goals.ID` Array(UInt32), `Goals.Serial` Array(UInt32), `Goals.EventTime` Array(DateTime), `Goals.Price` Array(Int64), `Goals.OrderID` Array(String), `Goals.CurrencyID` Array(UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, `TraficSource.ID` Array(Int8), `TraficSource.SearchEngineID` Array(UInt16), `TraficSource.AdvEngineID` Array(UInt8), `TraficSource.PlaceID` Array(UInt16), `TraficSource.SocialSourceNetworkID` Array(UInt8), `TraficSource.Domain` Array(String), `TraficSource.SearchPhrase` Array(String), `TraficSource.SocialSourcePage` Array(String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `Market.Type` Array(UInt8), `Market.GoalID` Array(UInt32), `Market.OrderID` Array(String), `Market.OrderPrice` Array(Int64), `Market.PP` Array(UInt32), `Market.DirectPlaceID` Array(UInt32), `Market.DirectOrderID` Array(UInt32), `Market.DirectBannerID` Array(UInt32), `Market.GoodID` Array(String), `Market.GoodName` Array(String), `Market.GoodQuantity` Array(Int32), `Market.GoodPrice` Array(Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); - -``` - -データをインポートします: - -```bash -clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv -clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md.hash deleted file mode 100644 index d110e0ea572..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/developer-instruction.md.hash +++ /dev/null @@ -1 +0,0 @@ -1e4a13cc81c6cf75 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/images/concurrency.png b/i18n/jp/docusaurus-plugin-content-docs/current/development/images/concurrency.png deleted file mode 100644 index ffd344a54fb1acb50bbf5ed6d2049689e7de93c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 34535 zcmd43byQT}8$Jr6poFL>9nvb@-QChiN=wJkjkJ`AlrVrIpwc;XcQ*{(-QCn{IeWkReV^xfpKaJ%MJX(FLUbe~BrF+eaTO$_2gKmd=MPcA z"(qTttkh^UO(LvVOKGzkTNCvcL`a#FQ3b8*g5T? z)C+=>7!W6kIhq&c4fiNu@BvqU$Wcmgby~K^ot`#^`~N2Z{g7}-s==@HNS+knDW{Hi!nwd zB{E|hO@)8&C57M8a9kTFYbnUht%_y-uE}Do4LhUSN=yndNh)g-d9By{cT`fR%fsoF zOiDiB~Fb_tvi+*#;j|+r|}(?P&TDJDvyc3Wom=xPl0Zg$S4-h z#5AQTKR>D;mxnHQs}QbT;CSd;gQZ(^@RO1=W3by-b(zn)6zWJ!v2*VrtPD# ziQw7<5>ZBuCDs!*ts@v`EzNQXJWroi@>|c<#U!z1AGDLzkGZIei!VP@xt#uLQ*21* zq&L65TTBVQbA(kcHGl#Qe;V_CpvU~eZ^We;u`zIK`WIW%7FSml<>l)q^P@m8QheM@ zxF2=eWb{cdf$L8Y;*m)nd$P2)wz^#rIJk~aL{d;VUYsvQpOoo60GGdC=~=O`uoB#e zwXLB*NcndJg|#Oiefjcu)P(~~e#!`9j=*85;NgLef!kx$98AXJ^eVNchCV_PyQ!IS zec(GEY}%2``|9!UqN6G6AbM@~1ofQwVqahSq$HXMNy_iv7fS8sBDeCZBZ+J)NVs5Z zu+fQnp?Ib`w={)hA94;3g5+Mw0My9Pu#NU#%Gkr-#m?Av>&<#u&)o){ zU!e`vxU+j->h|loD!k<8u9+4j;=JzhKGV~*otd%|Y6dTqhK`se_uGpzx)iIaax8Es z$qYJUG4U?52Z7%z4wamey}_bsA}6p8QLb>8nQG zq+>V38LRb#p82#Um}9|z(~t_CM&4yqXY5{^bsrsU)~qBZ(Jis1rP=H5CIOR7@Y}}l zS}Mo!RmtL#>(PEQS8SqNMN|3DpI%?SL=_sfYW5@@@$S{0kt{4=(Fyb=+ug>#E6Gw- zQ}fx+85%EiJILnWZxBl4cjrYFyC4_pS>-47K6dfBISD~;@9pY(4{i_L0t9Nk`ZS1f+YiX$~&?XkE%tM8A~ zNr-p(SXzo>J+aS5r|HvtbEcH4Ks!5J&!8-Vqq7i3PP@)6>3Z%k zZ;smudw9GlDyj$H%Jc;a?R1^idgS+AOp3*Enfn`8 zbek;bbLY)b?P9awj~2XtGVVLSvGHz~d#~<_Vz2)08InSB`Jz7>DJ^dm zrAu`jl<#gnBiigeD|8wBdh+CLbO`A;ZQDEZH0V4gQAP$ItIst~spTkX#-Oy3(Y>S9 z+V+BaMp{ZrXb2f;qmmHG%a`e-NCKXZP5McRgl;QB+ewKYHT;z6=jnIw5WZ@`$#_@i zMUI4sgYO|(L*K<*`T6;|9jQ{8>aRaj%L&p7`f#0152sPfxh$^Xz1_vn(PVN6j+~vX zlmB*14?^@MGjr%tdvvi$=UPwl*<+XLo9o+;VA+1j`rjbJOAMOL=t$*JR3(HN#Zq>rH zqnxHAq{RJf29JQpF{Q-HNoDA1JR=lM*xsHSvUV#kEu9Ft!)4GOuD^Rr>S;~YoxnuJ z#uf=?BBFXP^`T$m`-YjBTHV6+)m04(*Y!BMdhq06D3_bD>)V@~yDJ6gbCJQtKFsoe zgU$H_|NKc=bJT|+pTwFm?J(=HWwCa+RnTudkp8r9PE2%jxXzpF$B)T_<6&%#o7M18 zX=t{L@jt0#`J>f9ItvQww0wJ%K*h-zOI&=|CrhcdJ^|#89{Ht-q5wu7Sf|q$a0kdQ z`D@6rg{})^yDKXz50+yKczo|RbFjN@3<}@lawcQ>h2dn!VGzklNS41p-ycMJ{sIm! zTrgiNPf5YFt=drZI3?}?IiV{k(sah>ihJ$wbcd*ag<503lh*Fudv-$ig&^#1@TS1| zd~9F-NH}h9n`IdttV{o_wcCjMPUgDGIRd5bP>vwg`eOqeg4H{ z9TMHc!&8;p_T0LTJrxxseJf*me5qw#YHECs$)ZqXX}(onZX!=ub3S`^iuB~EgG6Q? zk=(`kIaJfTwzhvyU;<{x2g-=BOn6^O*>fzc$X75si+Q*80iU%?v)&(lZV~YeQ#TtP zpF*W)zW=)vEQ3b18@^B*6oO0+vZ667w8gqFhk2?b$J8Xnk{Ta(Y(95Iu3h(j%97yf ziYbqd%vcVdhyF=e_*5gPY{lMJl2ODw()#*Q5HC4RX(c6R#=G++3ZH9-L7qJ;)=ZuR zh$CxCcVnZuPp`T&Hr~^i?tp&*Waa#0;)&+KN6pgm{M)+b%=r5U2Xx~U%uGxnO`_p) z{We9*dB5rPOrec2YO@i?4K71-=3;2?jp%Lq`c%?A^g1)L-kgjgONd3Dv8%|FV6_Yd6c+ zld8x1T0vM?fJQN;4b0_->56xijvE`Vk9TJZwbx$#w(^6xAFs>s-yHk?aZV|2w1Rh< zyiw&Gc>%^C`oA$qk7qJi$0YJRtBb-GJSXMig2{G&>vd-o@^w4^&4NBWq?l6X<>B!O z!oyzr)3dDJ_sQ<=?!=CZifU|*dh}4?)A<1G_@OeZIoWRLp66Q^`L>9N5w@@VCB?C` zUtJgqWMySl%K8CnNT};7*!&9~I84rVg;KhGFS;k*G)wvkc$QaPU+3w#x-jld!ESO_ zmX=)aQR~%ZWXRDZ9qjMhD@J^YixY7?EP8X+-y?4f5?=1Rk_b?T?olsE(@>KjBT}7u zaOB(Dn?F&UHoQD@OHjTX-`rxhzy!q~s&No^1At(6RsQ7{cxrIQ4)bp_7i(#F3CRQm zx@<}EXS+Ah5`zN$TFjiX^sZvhQ)}laHaNf-j(OLYrzDHIw+kUb>ZHcE*Vy<%KKM)R z*v3bz91WHAXfD;zXd$1Oiz_(I$ZI}alh+0YzUJvkiHQwkdG*i({2UGMc1;8Cp3d0z z?mWA>x@3s^%_UjR^qghc;jq$N>H#sx7_irWi4cei!{)5Mm7OU%?E2H)nYNMNXT#|t z#%oo@Y&2=n9w#3OIJXtke}*6U-d*WtuH%*jH`kaLw}l-|LvXm=54j~Xo~93V z8hd$rL%Y?T_v+4OrJ&vY0$lznDY>%UB>WW#*=eNR!%ERBw^wLO_S`8`lV#x!QA`AQ zu#7Hv{+##O1-fA-NGAFb!&wT+8dGsko<4O55|w-Ui0|lC>GR>zNTE7^!arK#%SLJi z`VWuR>d_jaNRl5t{3UgB8Cf{BL(E;649emX+??!(LULaO312H*n~|0_R=*Z?!Jd2n z@Q?!Ytv5&eItOh${5+=Sw$t_n0IL07KJ~ zcu#zcjJfU&6wF1+r}Eb}ZM}N)#th8e^XFPxRZgTJUp>bYQyC)nIbWbl^*R)k&x%Fj zKkPU<-f~4jlFW$lzABIXT!@|AD+>UB>4!l=Fuy9QN{D=Qd(mHXeYu4`>vbgOvOOX0 z;(|&*U{Pu@oHQ`-5EGRPz*?8;83IdEK9|qISd^Xp{l<5uTpq;*F z%`67U@BH#|acN1)$A=URd)M^#`poSV9@5d**V5Y?Ij)~9HJruJU2Eoh=aH*@LV}c| z!O&<%*V5h|T~kBsvOC?fRUp)wV9>W%KDBE+SErT!({!C9CD)w91mM&B#hqoSZFTRa(0GaAd?P`BNi zC_>w-ua8(XLPJ5-l@alWSLT`b`}aVv*j~Su(S;~bA&u)oa7xS=An`oIrHz&ReahigSQ&X^}^2J3XeeaV;X8k^SL%Th1Sx=Wxu&)Drub1K+w_?9U zF*};{JH`%uU0GOYEa|Un-+>7=9B;%Xv8|mrPObIoNXg5SAxTDU%YbQEDK~JCP}L|9 zT)dtE%czHKNvr03n5|A9%+0LZ5=MTli&o}Twu0vb0luag02}-#BU_ln6N|piMEyto z^}cNoP=j{fRliKa3F*P~PJgN}rB*Yh9`eoSpSs=g;Ou;f3B)lTF1=eCD4d2x%Q(+e z(m?sLKk9X5b6S68?z(b!ZBAZLdrBo0MN9<{Umw)7>gVI$ZxRPLXP#O$Zj3@V#|&4e zJ1^MGMefliMWgp7Hz!t8@qG$sz2h;K07!({2>z1cD%zE7(?04r}IiN<(#Rw zc<5b;84s?Upr*dNdL_L4?kKImeIv8J$YNOg@X+xgMvhW6>3BR}T6Q*VLCxVK*I6F_ zg8I~6GT&=iOG{RboVd#qqJeY~98@gA#?Ys(;DPJA#`$d~tHDwbK(M;;e1_#!vuF7D zqB$Bi1cF}dfYRAjCY5yuw*t!O{uToWGr!qJn*J&`%l*H@aHGDlOa6oB5I_E(bew;f zrL*Ms%!hKePs^Jq9@}=Wp8H=<=70G+BN7blCYcrZJv@&SNL2sf=>%gRDd;aAZ;w$t z-s#BO+5G3cP+iu;k{w3Actm~s4dJiF6nDmI5t2p_X>i1_!Rx6*ww;pYqJPt}{BuT4 zWa%!cu(;~pWd2`aszYL@Nm8*Lp08*zS zv}y#Qjr?=G$;1UJP(r9)wUJdd`oH&So(W*!@-vVGV^h3p%5(mGN=J=N6$T0pM#|=v zFVx`^H5aacAjc#phlhmliHMMc3>!scM=W`^D`2&K&6yncf-e4G%C6pVUYMVU=Uqi5 z!K25IX?Ux!ZmzJ?eeXmnDxnvAi2-O!?`qDi%I)q@VqcRa_bPt)fDh$$UWnJR&34`@ zH=p)x4tBlr%*nkgV|>IeDk@)OWMs@v zHpvM1&n2@Ip)O+sd;Pp#-2SwwZV_LfDrcsP-LL&VXs7#R@REdt(S+;8j_ zvdS?5fq?;D7b|PlGgU!1S7(RkOE}=6`}gH8+ns zs$EF%c@7!P`mP7ye;gK?nEZ?qpuVd;Tu{TM^}(~F9yX?-N{FVYSo1^d==seY&!fw& zy@4D}1Ej|!yd6IzFuF-7d3eHjAcFVPErwUUjt9-By^iuqZKhJeT4r`!)~h+2wKf+% z@1!M*jf>luzH13H(a6_D7JmDd*{<$_5zx)=?nk)nHj^p(wU?~MZBv-|_9RGPVsmvX zYvi#o@gv(D8H8r0rzhkpxGJAMrIy28u%AG#*sDW3I629&kC2QcWHafAMB*lXTy8tFJsqE%-0VD#_1$WG z7n8^Z<7D%Jx^}c-v#kT;9=zm8SKsIXsF)}X@9meuXj9von@ffqU1vDj;Bl8AK{T6{ zsBd}Yvs+IvSGn#*MMq;J;f4n#v6%xX(_iSir%(J?iedci-#PD#t5OS>j&z^*59#}@ zyVMr5JzI<7x4lhI$Ytv|yEa?rox}EXbEeuL$act7LXwl3wfJ>dm=u6u50jDz?xTeu z6}lho-O-sMCS2V;0xbKM3I}XH9p3m)pQ=8Yr@9~Hp4g21DuPt<=3O2h9Zl3$Q~k0Y z&LY7gWdBo~JNV(J-JI3=<<^7cWfL5-`m}PJsk;qG)nwo!9BD;GGoU#DJ~%x+JyGF6 zd2nzLLBthBE*AU*2y;`Tf7?4TC5XO6S>N!HnjC*f#yTc-zn*9l@|q(M2>Z0_K*Uhd zkUp3n_tP5OQvR4{2dxA+Fq(er0C|qJP4`r?IesGn^8Be$Fjhoe>fg1TD+_ zV3Ce)KQBBj@sv9_D2T=5_`}f95VzNP;gL6UlDvGHAathM6~IyKpFv9lq%CXe zcj}t!5a<2({6HA(-<_%E_Ti7YzV=!>kI(;{l(e9$U+uyHVoGq$yE}^K-4w`a5?)^X zAknv*up+YS?uD@^mM>z-~v;+O#B(a}*(t(`)}QWW1rwJX!sctLhy;gf(yVF!m!&xEgETQHbv8W>>H zO*5`@h_cTxuJJeU6cL)QrMQR)^4)oRMhd_CJwH+b4**PSK|*+})PIOPZ&gr>sL;9| zw?my$eh_pvIWfrwxMTh{3pz#)UMQn>>1S|fC=LDnX;-_RpX zRq`NTiLcaL_jK$?I_GXrqbwBFxB|IYYrSvB!t?3xpKM z;7Ar00k@Snm^}oNQ)&&556o9_iD)`L2GUu72QyCs3W{8n659RyK<{(lCI;mj>TuW8 zB#MrXUZ_P6svq%VG*CpnZZC+w0RD@(i6Ma7_Qm_s4FA4y-Ja+Jb;lo66~MOx8vO=80D{+hq3Ytjkp_q# zxsVW=tE=l;bVvxAprGLS)z#A4n$-LEFYD^+&Mz)_#xE`|D*5-rBO;cg_;%lq<-Vh$ zO|vA|A>_XY$RHaSHd42FWH7QKZwD@%J@G2Pnq*HgukpTmGsIEOn1i?JZm}alztcMI9}vewGmIWpgr$#YM#jCSoB`zgVsBcLiDGKYQ^)Z}t4DU;B2xaY)7tv()m%1HYNwbeUK) zV!;$)TieH5V>h1?61YH$`}px506%6WN#GA4(?}~ZJ-WHRK>;u+tFW+vKn-Yn@*W{uXfQcnqp;#xx5(_`Kspp3mQhEdfl5HyGo4<)GUAtf92?mV_j82PP zH?@xrBLQu$hf55b;~A^HgdEwA>iYwwWn}t-=wuRo;{F*GJ$)x|Yn?b5{rH*6zN!9D zI|1y7-722G?o#E?cC>smefiGt?Wq*8sT`DzX(9Z(^r|z1B5S;r;u&uxnC%Q<6sZ6p zx9YK-PMaeyl$4Y}z9QnvZ?`OqPsXY)jpE_q*`6lYA~r8JY?kyon|Vf$i}5%F1))7% zfI|EQs9+i%bz*OShfw_I1qfM$m)qbVjqADM#Ke#=Co4rp;y-yJ4T2P!hau++5;!wH zX0E|qNr|<+Ju}yglWL_#STd*0{J4Sd<($=4$NHc#KvnkT=Ou9K?U`yQK=Pfvz2A-7 zPsOERS-*aPmbQ_(RVVnl?ZT&1eys?m-e$_><*QftAQ!h%gD}5_zC?g610wv4jI96M zGxbLW_hHTEOUI(37q~?So;VJ^%PJpuPVO3nI!Nq%T`|$qH-8qNVd-d^78!eUl3YC) zEK(BiLfM-24(JHVGRNc5bg_C`Nka#kpi#Rl5U1!Q8qJHLrM4$c1^YvIA{6QaGoMnp zbPS(b;bq1YQex{EvKU6zT8x&FMrvOyY~H0}$}BpnwUHEzZy1bFLM6U2q`*QJUFdF$2{8_a$(*vl*^Jcy6M+MXA+z zyq{l#L6fN zKZEsVtGD5}5#4I!P)?{IBjZ3??S$E0Y|Im{PAL9Op2n;{wfA$B>cQb)Vw4IO$W>Z5 zrf)h!=%3?>96d&6w<8l?e=%F(xk;!q{$iQ0L$>T$edcnV;g(X8N#^7_5Bt}2=IF`% z)DG&6VJ8#wWl9Sz@1*Cr?$IQ zR+uEbPHn@(II*vxSVUt7J1~Qow6qXV;-Uq42?7?b?LNy^tE3f7PwN(!lt6_5Tr>tS z2ozXNLe6_DW<|kY7}qGAMc{>lOasuU@!+9facLjhxRyv3TL_+JI< zH;ewx9#CFZ(t&Y9QKlfT8CeI)m6(-TPWDg7jf+zB>;v%{pX%#AU6I>Mr zybdqHW~w`X1A35PrX7#hpJpj|-t%jVOdhKDZGD#tft0)}S(Pd-Ve8jgyS+hUb>4)y zd3tIVmnELfUhUnPCFgHa4mbQ#&TwD$So=fa_vug<2K3u-2fi zvy0tKLQF94)t+eJtH43Rp_y(6G1O%f9r~d|(}2PG*0uzm$g(7%k$-&~Q7}F}o&Z4p zAc4j`H+SV7+$avi=3uOvgEk^!9>A5MY@=>8Vz^Fs^X;1rQxVnnSQ-0T!v{`P%} zGmPTrvNIW*n!20vVYQ1+$FUhp>=9-zTEnR3+2V5N^Wx&-ls(h&{F(JBSpAU~(A#`& z@-scpQY$JedCYq`@i7BK#oG zR#m(I?zhI5$KNbQrn42Nxq!tDzQM_v#~hQSeYpJ$K4~!XK65fc62&k^Ffj`6bBs}I z_f+IS@V8cNM!kb6$4*7qU_}s9r1oXSa-!wJtI^XdN1SS|_0aIJjGP=L0|QoUr?{aZdA6@OL8RpM%?(0u z53@gv`{#erNQwol}V>mT;Q#fNl^Fz#G?~IiC7H{{K zd`@K(rp2}^qM`JpU4fM$H5>+FXIF|*l@G5M-9(WeTs+R-#`jCJb?WbqgKiqiGA_3m zVm!Erk`uJ&Pp+xq2Xysz{96H)NiMza@!1(IJ$=N$z*|vK(TCtMUcQ{iWO%xwl8~6# zk-`I!mY2V89~>T6_JN*={nH3_h|5ZBvSBmGnZcTuudKhK%kB|H?eu3mpCJK)Yp#3> z&s2wboR1%P+Em(8=v+ap4M^LUjyk1ZtIfYs3EoxwsXY{pXGBNe1khv`A;(6Xmx*b82>){*?C?8YkW- z+0UpFTXQq*z(y!7s3x48vo#8YVn#xA;!N!*veNfu&lkwH{i{PrgZmOfvWWzdckySi z5+O`81TJiBY&`b!NCX7#T6q%Su8cU7=>o$6K>|0VQt#t!-m$*5m1BRv>f$l+D_xn< zTn3Hdxi+!5?nX|7@|%?%^Oo**tN@~)BuvZbc#&mPDw>%*C{nHwk~4OntR8UfFT2$w z*C9RQiM%Jp!-Pyu>gKljV+TERE<*3kr%#`_UAFOo%4^mhMY0V%C@n4G)|^C|1~bv| z401G7AgCy)r~&~<-W)Gby16_7+EMF{Q4&<#QV$FeQ_a3;Wqc$$p@d?Y5zg-j-$L45 z(g$UFSTS-RM^(ffo(8hN*-)oil+6@)Ry>qfruQNy{1edMd1wK33P8i=ZEn@8c8Q6J zQO-a40YoEZWo4jpB?FZUhm=$i(8RWILz-umui}Cu`CPW`gCo<^|7pcAV(q2|-#HI! zVo1dl1Fwg4Cg5IhpamdmF<1-RQ!wljxUQ{H0=RP^lZk<++n6Y#1m6!prJ7Z|W#zRR zF6WyMf7P7IV??(a38XD}nH3)5JHQL@Pt~{fe{z5>Mg+2EruWq;fD=jziYCbWPMsmA!;39#UD{q3Pl3;-^G zkMBICP5<~fw^LhA?Np6BEYzq6l_>|u0Hr3bVYy4 zRyl=^C}-RsL)Vs5@x5ACg}ZPu1)|fJjD6nh?zuWsz2&Ep^=f)EG#iVzJ6b(jSOrlEFfe7 z4+^lIsT$0E$Hp=HJD3oI7%2MhNxKEE3? zD5DXnVRcmO-by(6l?=af$iz;$jfY1*o`^fHsGI`lQnE$}uT4 zJRDdxEe8xKL437lFA)`tjpOBDVttSbOUG=d>fz^A%1TDuP$rY=ESHYFu%a$mhmmx9-Xom=v;bqZywQ5p{p#utGc!(6G^+m{cA;dXzg`1>&gO z{&-z^EhHTV;{tI>PD2w6;sY$ffV4CNgb>F_RRZt1zBqbUVQ*N;bR=7loo!%@g&F`! z?lb`%`ggzx6AT39H2qqS->n%K40=^x!MHO!t-mGYuvD8G1&*WT*0wfaF+fMf#1{rj z;P^9>N(kaSkh@SH6H9l;GgwjOfaAnvP+S=2l}I+ zfwd)0K}o5nUtiUc>2GX`Et-kVAIIbE4G;MNjDE@Bs{gio7P>6;;{nAd^FDs6Q*ONg z`bdT?p--p1f$6Z%luemTwo5wL?r9!L`i9BErxMv zMi-`9_DC`xE!2i>xWPcNsT#xhdef_`?P9MBM^tRm9u#c;bS5W86(vEQV7vO8`+j?M zQ+?{1&VN=2p+w#^2E+i9xbIf3Ateg=3T8+~E&_$l-Xb~$i->a{w6GlK8<2r4V!ugR z>9Ev>NR6SP56uUFPpBh`gil&QfkD$}^p841syZH48U`WFvB-;&@E{7LOaeBuLd(%N zYXNW7u|?EhU%fS9#@DK_Cl3n?8h_^e`^0x*x4M*BXSj(PEJfr)V}k48WRqT2Z$nd4^)p| z_OVX+UF9yNTJ$xSn>nD+vRaP3axM~70wH|7F&r{7q6U`LaJJGKpqKBbD;zB2C@EC1 zKt2Gy#ZO5&@dAe_y5 zzde2S>|=8B6c5Q30Dv2v8eS+fVIMXFvRzN^V&T@DGzwK%)wL$pP3>g9Nr=~0K|M4W zizzLui}qO$U?M-I?lg|K=d`hZQH*|0Hl+2@A(eDR$>Y6RB2!y}WQ=lyCEJt42Cs2v z1L~#lNkLJzZxJ3Io>VN2LTE%pOLKFxsdF81naxx@=m*Ap{Teu9OG`~1WZ-iV1jbFf z%B;#QFI@vEeiTE zTIpoZMsAOY5O(-oq(sgW(;PvNEUR(xXhFBzz`!8Cruadv$4TqlT@#uOD#}P3H3ci{ zGoY(_xfE&9BRY>Hd@jJVCF17By}P^S0(D&}30M0HZ1F-uzQCh9SbxWS4}~exa8UOo zN%!;Kyqm5dD%Xd(>QuTMH~9Koy%4QZ>T*X6B@nJaV1VWd8>qh-GO;xG(a_Q$8p;~F zy2J%_mv2Ds>${p$gz_FVVKCWm<-vy1MFQA-q5^}02J3x&0UePM6}<-%{qq+u42!fT zDj~E0h=7)aT8=GVGN2?tC;Bkmlpx9A4ZKVV{9z!Ol};}+=WiaqyTo1uDV&_0{VU+O z8&kPQ>}Eaq&v0-qz}^Y)l=zsaoVL@Tw`!f|T%xEbC@SJl;FquG33^0I`U6Mdp}IY@ zV6VVfzFs(7_mF4fla=)b3+Mp2os4Qe!oUDV0Mg6gQ+^;YBU%(Hs;WRyC3+l^F+DM2 z83$RQrXgL8qu@ZZJ%EX?78~`H2T{iJ4Ff8>E-D1BLQZ%~RX6O{A5 zOPlG67?7dY*?ys~k#NV~Qu7i=5hnHgV0U6qk_W{#YdSYYMIbYed9vJQ>pEIRS6AM_ zfdycL>#MmtGk_m~5RMnpth{vcnh)ix_IPM$%Hc|a!I#9u^+Tp(MHqk&i;IiT11p71 zOsK$Z{@Pc7HNLkm?@D4ZM(ynEEK_al?ucjDvILfx#e3NyPcr)%E=8q&?uJD~m>e#5 z0>j~trtbSOK!t@~o^0`r$L%QFf7e+7g{FZ2bV3^V zY2>d?^^bQvA1t-a0KWzu66hT{=$mKDS7uMwjaH)S;calP?R>mN{HGhL~&v5JdT z#hU9wV*vl>bN3)VK8$SUipJ;rQBJ$W!fSoVy1qB+i1$I8TyX2wQC2cItP&!$T;S{N z2jEo{GD^PL{=8rWu{#^E?8X7WoI32l%EopM+&=_dw(MLp?0$%I&i5PnFP4q354-!# zy?kE3d1G{ONDa)c0n<*yNT^twtvfJm)5+58(8w9o2B05rj57uM+!$#Un{8NvG4T4{!0x6i?c(64g9Kad?IfCf z-Kn?NN6WFsDO0C=(Rq%^quPF8X9;Z@u~?d5Bw`kuu?;l269i+K>;U+YPvMRMD1xV6 z*H86K1&IGfKndvi#*x#7YLSUE@O704$g3Hk{vahnNOJEOp)GD~kFVg6T;r?W+$+w= zo>*##qPV0aE3hsAE&_TvGys>>0^ttQ-QC^k9=U)|L?ox7p%DqIc_mQZX+punlLQ#; z_(UZmEikMZ0jve~)bJ7WX*fb0F6p!Inytr7tCgNVPrSHTYX$M%!M1y}k$tx?s(Gip z;7>zI83%Thl-ugHgAIgT$#y<6d$Z%}9+)|!U2Odkz+0+BXDufqLjg92jpigUoA+(i z+}*O~SNyQx(1(UfugDe@0r`ut&ba+C&>Y`HSWu3#IW9|sA|H;+@aF7H%OkS(ZOOa- z6rRYt+p|i?EwDF0MnPe9+#mt6R~yZ0)-wm4s;TY9PP{WOXbiz_XR)1DprWBsn}|JMdLG!_qRCk_` z7dLQwES;|=^MLJcb0c9QD^`>Ln(f~3SHO2%6jP*E5uT>J%NStTGua%mP_N6bzrDl& zHhb%!B_Qk<8aDHCT5AD2GPb^#18QBJ&q^17JXwm~HKK*?hbHK>N?H3rpl35|qP&0q zJ}=DO)8IVJv&+Y9d2Nlwv?~MH3v-ISthYH+y-#JpBJIlS@9ib$;tBx<#f#NsCIUW} zd%!A`mGwu@CX>7c0go4&sr{AV<^vc!w!44S=UChiXA*neSH`BmRva8`;CW93+vL71 zG)u90UrxWdgWetw!{n13m4cQM8n=*ET0qs7sKX_xTY{u;xR~ z*x2Zp6t9REbah5uUkShs^TD9E1is{M?$D|z&@o~La|QJAvaZ8RdE8eQhnyf*5vHru z)W>i0{naeEw*HuvFnW^B0I30ZnRc)FC#v4WmbQp~x@$2prG5Ou{)4Mvoxh~lbp&V> zmFi+oBigt)I2mD4N)Ua2V=u4k2YWfBfFXY-7h_!K*(w$ZKn0jsZCA+g_fW`>A=|MW z%>uKgz(?XzQq{}Ts#!hn50^Kpx}z#v-dV0I^xcnb%k+Ps#_`WGPoY|07X5dy>qP%%B%xjQ^$sdy&W%IGj4 z%~yXLlSn|I7O^!F6}#?*@0~E%h|~&bJOFGpk%MlQm+)b8%WJmr_h6dP$cQ&FiO{g? z;_9zW=@q`|${2nR0viYRXFcy|$PLM^&f0qA*t~=>oF&lH2d!rV zOAnvhp4;D_{Ac!FI~4k#7a$w4nUr3S7u%13jRxdn%14i0TPR<`&8=SF`kwCc<^RsP zLpi67_#9ZPEZmB0N%FG7{`m`7d6Wj9b*trxX4%fLMMz#*S)TSuQQp|tpV3L)KN@+9 z`8w0+pD5;ql|R?UJ{jAK=C7ps%uGdpQ)9qnpzpXRa7GpAxpy(U$CLf@eum@)1?MTD zyn=$m$|sdQ&xlV|5uZPcyn59UqD3z$-BFFtX-zym&Hot+4P!HZyp)}`y0FlE1sbcy z9VQxy%!Xja$;ry@uO=G@1>3-EMG^(QIVVfq(PcA%=QJDi!8P61pm+-#okf4>p5eI? z0jDR6+M5^h^770+*Y^Qjz zJuj#iM1k^Nh*CXU=!5EaC3r=wM8(AIT({(QN4WaN1)O>KzJjbu}6E7tGdoe!~vK=`GG zCS?L4@)`XJ^e0+&3XTPS%4t5Jitgk;z z;dOojbPs-wf-x+5M*ZueC)UUNAN*bCZcxF#)!?wOM?gF%vYqiQvR?>UpSvS4B-?>_ z=sgXWi>@?oC#!JTp{S|x8px3H4Hc)TsC0k0<*rC$b4e5VCV4$;!%X0P15(ZDq}7L+ zDh@`2x(MJkG~AX1Oke{3+g!08DB%@W?j4$Hl#}N|cy|gR0XaO{rLe%*SRBBYgLJ_B ze}6k7XPwLPOSyrCgQKj8k)`ctm$nMV_lOJDiV*?XV36^}$WkHOH@uV`GW#-Meh+ z?nayQy(8G2i*7z#F<5BE#v7Z*AqW3QL1(9kt*vcdWVE%khlj@sk#bxdv{Dfptg}^Rtfm@8NB230|UJku6r~{ z=~B_RfI;T*?!jzy3^0kg37bQNg09Y%>^Db5faKRSr32F`H%n`2aRWY5hb#hkNo`F^BdVA#?uv~ z6^;^TJ^@ZPlgA+gTBofs6cDcn8`1U2IBLzcV`$%5Zbbz? zz~4dOA46M>=nd-7foO_52D3VQ4*dAQyK4gK!ou|h8@Kl*70?c_glC{2la8MGu_jUb zTpwswR8*i}@$prsTaA+@@}1!j3+hg0)(f&`mez0axmY(2e5Yiw8Z-CUIy@PN1Pbq7 zU))KI=L-ZnySSY0A6o~{)~2-q1?c0~uO`>mN7#tPxV^pI9j88b{D2&3rd$q=#_Mlm zz@esFmiMmY;e!X8MHJaecf@SwK8(3mW4RnvPLIU*W&`%OOQ67N69G6AQuU+I?ESGb_VVo#eF5d`9yoEN2aqGy{gKI1|O$;M4#h+Z5WX!>EL(WC4P?nV0W&* z07s6zj1Y=w8Gz)dnjJ~r_v0Pkaj$OP_k!dTV1Y&Ww`iS*Z+%+Z=bCzw(za~Yks7iT zgeZj!ZbEJw(J-7CYbL9lF`5bunUIk3^#u$V^L%Hj?(V&v>+n7qE#PgGv{B9Wp>oQ+ zz1rh&#`UB9(XQj<#J=q44vmP;RbZ<@C5S%l84cb|=u-5t2jVoL(j+dCCst9>IMT0@ zpcY(#X41(-3Vqte}+T z&iC#tOtD#`xvL8mBtiAEt@ED_E-oSk`rd^XsO)j->*wu{sBSjM0~)kDH*Q0e zBIGOSW;0P}?({K|(5OhOBSK9`jR`t@~# zChWZ$HaR;eqdtGg6l@e5B^Z<7J2-81em+ztHtQbJS6S-QJ?y+7UZM~%G?$Z%gx}D3 zLEn{Kd#bg!7v<(kD4-8oZ46?dcFN7u)i2FJ2?=aG|Fq96xzrMN(_{r)^+S(1EfbS< z9wxsZ;$CRau_i3jRFCnZE2Dx}Ml2FkovWv2tU#LY!Dgr?kM zA)aTuO z04@4Ol`$YZPm}~!u#q}CJDW1)43S2Z@aSaAxY*d5{gC??&*pr=vk~SS)ZaHSxZ!dT z7sr|bV)7HhNqeHi=Ca~!io1+5yf3^4B?^yT$-zT#7 z%HA_%BztB@k-f7?_TD0d?1YjndnbFJi6S#Adn79v5hdKu^SVCY?{VDU`wzJ9A8tq2 zaa~vH^d7JAd_LAIPDA$!b`mm2=2f|#tMc^n$~%kR{S^TN6P0@VBDB1tqY1pmD=P8+ zAD1O}cf9QFkL$gXn3mZ1h#5={W=0|8p=TpkOZoO~TmUlRy)2SXnRiwaWjA#3gOyGpe+cA%eyc*f=pL-N-rgPtNftuQA@}cZE%onp z)kMJde{UZdHO}m7Z%<@UJ(10>SO}XI@ajCh*7)#NtlD%-5^?5U7unIyGa=|1vQwTF z)E%>ReAzQ04bsjlB-2Qds$5 zL0mam8cdQR+6?9V{X2@XlPitK;z2hUlnITEJxld|L($@cby6lU>q4dIh4)~V-?N3@ z*tpNJt^If^Lw~l~xPGeH|9}E<+)^FzQxw^%TcX~kij7GqY}AK(F-KbzGhZY&ucV4> zFd(onyu$v_@9oSfma2x(N&8p!c(5}?dmREJM@n+fgYG%wUFEu`Z)zM*W<1>8Nm`Ew zy`CoL8k$wr(G3o$Uzfkt`KNt#bCa9hIaZ04vwv%B6FpHcsQg)3`T3Q6eqmD)GxLR& zuV0J(%t5SfZ8It6M?}OAMN(xg-)?wDVFCXRpVMMty;rKI7W<&5H($rcgr}{a!q`Yj z(+U@;tGPdq8BI?~`4vR1L$Zs0v&%@%c{zo9F9X7*|+K_2X;?I>w?zsmDO>I8!yv(&fi<(O3Rq8OkIDM z8hCH=frDz=knb3g_1rA&4_)tPW^vOgKlKK|lM8b_GBQ*9xT{jsIM>n+$k=;^ZtIzrEW z5n6p2tXMrbo_o7BV~L)ad4)K{U$gk>bB1SqqZm(~GRnV*Qzn@d6k0HIMo;~7m)2G9 zA%*+I>*A@mb6P$?BoWYwxmMP%$y@_B^1-B`1sL)a_V#qo6t0uSF0ZVhsm`^IxK@_t zr>3T|c!%Fs4-5;(n4_L=tbF^H!b(D<;pp1WaEwQ5-QitKW%R5CeZ>}Cp)aKTT-WJz z8UWz8`uL0kxY>#$PGi=T?y)h_8=o_WJYT?v@i}qo3iS?E8K(_YKCC@BJ&*yj0)Dl5 zTRc=@ECK?m&9a7u%BgjeH~sTuFt#IcZ|N@z2n*|7& zq-*V)^2Nwd)x_ua#`=w2WsX8HlzN^5`F;hHkYwr{Y7(>d(&4w;b4>z3Q`yyKX=ETH z&qlzKuz5vKSC`!VtWk2%1JIxc3$vz1O~Ebn+S=QE{n~{pY=X5(i-%fS+zd{K-Qng~ zJKE{$X-OcgX5i#x4L)5d9Jyja_jZoouD>61pvfGx*vs%^->y~YssH^sf<-UWN(YQO zT>WFS9H;#t69W|$$5%bSNN4qCjTh2(Ewlw&t6Q3>I}@deHokrvdbNgpgRgQubwZJF zC|k<#!)f)VEFPl0y&WXEaX>MF_!8f^^4nF;+Bd=xWiN4*qmo3OQ$F=k6x(mFfs#Wp z{~k`i$kf-?q*Pi=dKqbH=G2eShnYKQ#Y@W^Cw){+(&W!TsW^eox=AfP^*3~3VF740 zbgop!X0M_j{k#*yPq`~NZ8*U%j6bMeYvU8i`+C8z{&z05#HP^ zj(Czg8mGhxPJS8?+A072QvAW+=w9L4LkseTeyZ`3H+=jFPpf;v{?Gz)V?%n?S8cFwiAC>8OP8EtRyTFpN|VD$%08GPAWy-fwZU%mY~HgqUR zC)N}koiaTxI&JaZfa zA;vK3S7!YE+mjWfl$3Vvo+3j|L_|a{Bz<`1dpkU&6fXUWTdi$sRuaTiSY@~;g^-&tIl^KEt}+C z-u(JiZ-dGNi$1{(<`)h=KI+zbjB{V>Tmy{B7?i%e?mj;6TSB%Fv{P5E%^H))$}${` z$Qkm?v_4#iZ@+pWa~4fIUDf$v-O~)}RVWHJLH|=(RdMn>H)cD@{u52ehP}<_=X;w* zCogxB>VfB&z+08GOw_t+O{3!9K~`fZVn>1UTF!lRbRwUk_8R#{J(=OqYCyEd<@N(q+T<0eRSdJyw#NeR?% zQDKP~#Q@!0Qghzcp_&_xPk3t#FTQj+sR{V5q4iJ*;zHYCs3 zRAas~^F&Qpf3WiI$};DZmMArDF=1U9f@p~9d9CB9+$NU->+un&#uht*QZh}>)`A{% z-T_WM&H46Zo$szmmbiz)#$)qFe1(x58IbV0b@%s|EILS_L^s)j`!&8zo7>}zsoGKv zU3D=@N&Q=&OFTefV%8Bzi5G?b>(?*9@^K}7`Q$JdNkvc}45m1F9i81o;^G!7FR&gd z%CPaw1<5bn_56ZWC*+uY;G|WtPuTi&qszmun%c+fv3f^COel-4UPv6D)nGMiwfIc*kRDggg92{66 zSBe6IAB*7T>gwa->l_chjFBB?6+FadIFHy}S-l460gjN65bS5<=yT!W;lb`TstHd% z!P`>?rvFG&%k@s?o4L1+7*rWg`&kOgrMx#uK%G`597ldHu+G=dufHTW^r>6tZ>(KV zV~zHx$jjg~CTTe!nxXxFH~6(oed)#b;+~1voo}Y5U!!{Nh{f{s-28a}I9y zs37+DA6(-z6y3l0Qm@KmW8a*Waf?H(w2_Z;4i}lFOb!AP@a&FJpWAq5uX;Of`KTN;0yCCqen=FM-AAFi6sWg+wAz!nM~$MQKtcP(H(3 zhjcetB-|`uEGH&g{1qY9{y$o-QHE;On72J}U(nV1jXOA-YM8DM?KE`9Fu+~S+Wi&$ z7X~Z5D+70NZOcuGcjFhIGGb9ud}WipNbfoB>-M_g$b06V|9vSK6z77V0}DQQPZdaB z2)d!jl~`+QYj+S2ve1n^3X?IDzFsa8ey3vC;KA4;*;Som%{Jd%vK!oJ-JtjGz&$Qq zp_l0fLhYdlPU-o3S_x0vs`=+~vB-Dt4ge`&m6h!@6B8)YdrEcRAC1?rx_5J|kZzJm z^WLbSRX~5@67Rey0G|dm7er%YhCcT*K2|R8)i8QX$zB)~)wiCX?P-GWZkC&dRX;5= z^C>(7>mTziI7CE!DA^-W;mja|DLD(%<9QGE%O_FzR0SX-A(VY^N2v&IX0%9yYG!8Q zKIhY4wKRZ3KdGp2T9?u;5_MaQ2g(~)tP!}PY2OIyQI}6S5?(XQzA$mcy&tG1B$ddx zru+KZ8KdBw5xGY=a)v&SqcR04YNtCLW;#R6qwGrc)cH(*VGifY8C1sw_9vR7l`d&~ zIpA%8S!kFoE&ZSp_EQfm2nZl_^;_-L&X%|QvUd*!r>uj(9+~{%1Mdrw9l6uZLcjf! zrB_kYbB))tY0d$@!8%DwNT?QME(`ggdFN(ulK=Oat~8>cd%7S?AWJ{_v0%Q^$diWG zb>2t@xGZD9o03@)28IL>0d6r68wwwNo^5bC0H^)r!?~jJjaJ2I-{-hjwnMdoj%oS$ z_F#?Z@#HthdpA+WCg?J&1~#|C{nl^o7zi#cM#1JsdK2q-7|&*3rVumE3pFovhL~1N zD7SF>YjP&MA*LGe%?;gcn68Cj0MN21a(W(30i=5Gh{txwBL9+*9l^)oi6&2@4 zi*jYy(3;AAC}s?Iq+ly5^VV6bKiKb z2hTU>KcerSpLhP0BZCPrSxlWx+v>%+c=j0W>eqy9*}gI14I z4Pp5t0TQmc@_q*`gxmm`R(SBmAs3g>B;-*LxXhr+auWn9tJsfRLOh*;dgC=d zzFvpiyNb7N4fTt7`ML1}hF5uc%UneT4*<-n&)t(bWs@^EZ89mTsMxMo{T;os(29vG z!-hl>S?x@T1P)%icCF{{@4dBtrVVRkBs0Bq^IeDYWR?-!L0*I4swVB67t~3)p_wNi z_SSDNf16S=>I@W7*b@~J5`$Tu;qJ?S*Z0G~c!%oFYyQ{?tfE zXXgd-`xKQx&bu6$%gu#q=+ouS-%_B)r(1^zWq%?~sF8LCbWwE=##rZDi-v1IcZT{v zg$tZ167(-dT(Q^gX6l7jw}qaQfeuFm1&P-b6>a700K$4jYs3KY5AwIyB{i6)d+%Nt zFno7T1A1dJBG15p?CXj4h=2iSV$hvWnt?DOxC zQB2#myHi*>skq#i?r33}CtGm8R#0W8HM(VIWeLI?xanBX*$Atz)z)|sGN2d6u#BoW z>t=w%O($N;KD!CQdXjuc-fK^~rYOSMvMlXNE-w1}`+<%LMP^GCq5UCcQ^DzHF&7~c zAF#av30pMRP2LIyMyTng!vdut(9v606fZAaJkj7Z9l2~|^Ig^@rlJDYI)5R=aq~5S zjtM&Oz{T!N3$U=PFhaif#x9 zldhYFzTce_bH5CwN6qVJ9J$L;ROl%=Id~xT4*s)sqH;d})l&ih)Y$iNR5lZ(+F<@R zgpgG{*edGGrnBB)*b_6!(u06d9;D3TXSCXE-`3V5fPKUj)9)jad^mgwL*My1@k?4- zMPZ$3p9t!E#KJcM4zUpvORvgwqpdy{=c4My*CkwReBVhpHR;1()v^3@G~yK8u9+R! z{lz$>K4b-X8&X}de$23KAyQQPD<8nD-}zLvCslN!QrCJq6|!BPAB8fxl+F@Q3x?5{)skL7rJ z=H}CwUE@YYKrL80wWCin^qN%6b$-;bZ9>1%-xvcs@oKtZl}Y6B&LbsjYv~&|#@*Kk zB!bU&rKsgPxlkk`2+|U2ccJ!Y=HSp)vqs(Vw8bS)u+~!Gjs)a&T}8ougBe<2eOTI` z_gZlOF|Uo-M%cOUbLUILSA>=n+twDIqvFK?kNU;OnXhbamj7II!XNpl)d)LLSh-KG zs4dhUo8v`s$pO^lwx_NWB}nq$CdEwmabyH3|oUrNN%d4!s7e&!4i+nQ6gL0 z*H*(>V%}4)GBZ~{Yjr{Fak!-M%_IfBt7{cCp*^qM5|sfJ6m|JS^Gfu*vOb}(5JH+~ zBZwH#0j`}RVp2G^YOvQ zjqk^c=IXpQ0#_PVSD$<~Z(w=w<&DVwPzhjuTCPG}1LPn>=jDY|w##hOURX`n{YW6Z z)Yt*`Q#WgNzgnnD9NzUv3`8q(Xo$am(Ltmm20C+o;AtR@B_5P>8B+H0@_wIQ8OrJ& zEciLBEOv-Ph0WmTSOy(m=Wt+(S|sE&p&?-VoL&y%i}LyUUs`~`%k!jz9&EQ;MLI0f zf60*V1hy(q_V*!vl)wAWanVaNh`ng=ly&{whaE>H&<{!23J_zfpyFe zFhKY+0|$q4vYPEZo^^4bZRWre*QYr$*p|B{K4MEh7R~@KynA}OcKaUpVQuJnlH@|M zNZ>JT3{&Wg$&cd^Ig2OII4y5a{sgSIm&)SKw6OqL?EaXl#yuUK$L4JpH&1iE4Vy#E z-V?aFRb`%>Gscz17XGh%}~$vuV0@BHQ)37wYB~1qb)c! z@j#~nQ!5-|Kp?2gayUx@jNd&__X zzpJ~K)05?8ZEBH+j|Ii+giwaAX{Idxlh)2IuHEii8^hTLxtD)Sv>i&xQbIwquL^>A zA|_b^;04#5`0_eq&p_h_%fO(Q3SnAyN^2)wFRi`3y;0+LH%JsGdUV-wRY=H?Ib(3; z+Y|*FTFAk6g&a)IxP&?c2pIi`UfCnmq=C&X!;Nk>*zuQ`I zPQ8!2?9Gz+=Vb{B3xgMH;o;xV!EM-rI)B>}f|Dh(&Cl%?fWn{a(=@-aAx_Uudb%zb zk+1}>&eYvQZIDt@4C(1p?jDX$f9XQ;P_5$Nat0gn3T~Zl({&=jVo(VPI>K)!CC+{QhqUc3M8z0Oy z$^b_fo|lIf_^1TMI{$GbY;;?C3?0w%j|K6s8@yI-d*J2GLBjeE4>>7;j+aqjenbG# zb`eGWH&TYmUzs#+nxIvmw<>esfJl~7~-otKC z;Ch_>x(f;kMV_>tj~{6k7hQzBH*rpXV@_4|ZOHNL#?Cc^E=(IrUm|N>G!a3g=f}vhSpO{#^ zIZ;N(dFMr}>%!yOhx+dqF5$AHUFL>uj<@$vJLT(QS?C@pe*73BrI*G+O#x?@T`Nc9 zS;maPb&@y^mD8JpYZAXd9zkir2HkFq{^qNYKid9*fgrY#eFu3yvCvoNL5vaPO#%DM zc=2aP0Bk3C5mA2t_Z`x&8ayMgBHajDF;LL6{(4~?O~inN z2J2W;MTchBvJi?@@N7VdDTqavAP?v^%y!7dWj8Z3BLTk%!11aV&Lg`T{RuxM+>es; zY%H*iW?ve1lIw4-Ug*uYJWJ5kcJesy$-!&7e%7P|ZhfVsRBSQInGPOO>sZ_;cReGV ze$KYgR1*V_1|&Uz5-AHae@w2%adQyqQ`DO5Z*(DDPiDx@B9x@Sk6 zoSvLi@<+V8B9*ZB8EsOSZobz>;%b`Vgt=tN-7}*SI%W~G%(Evy6d9J!yw`H9?iZoc zj{arW|Fx0pbTB9|h(K6`wjgfZ!q{$*ZBS-Me?8tR_XE`J?!XlqnVL?EoBqA++E!^M z!4yaJhyVc<-E~PD`ez=t?CWda*s7Jq~K?r*(k2l<5^f>y3J#=olCY zv+wjjo1cMA?OvJQ`j3Rc;TzX821%)?u%i^TUFULSf>yzGtq7e@yg}C!%jp{2W;tki!JDl$zAtefm z)dJA^B;7sP>Q6~XAiVc6a(ebw1fZOSU`YIH$`bg=vG0zrher>{z&%cWm_u8x?`WpF zoA-4sYvDd{4#9$6#esKPz;h+qid}T_BTZ9udGo^=2J1s=`{Aru@T=y*Cw+gTc0}z3 z*R%RQE+swvNmy8z$?XDE8V#tsD}L@Xa))r&$ne`3&kEaWLC*tzuklx;x7Q~J^mGAK z1)U5g3fBU}5`?~ZHdqGFW5Q<47A6119hSOo`Zy)f|95 z9-Bvjnd%f*TRX#JZ_x!5-uzZm-`;2f;}lUX+A2y;DuV|pLV!7rok@s`Lt+mOP!NzkM#VH^s8}`Z!bDAtO5_0-*y?9QFU{3gEK)GQP3*c*%dr| zsmKWICJY2hkVYdsphe)dg7lyl-xou4zboXB^!stY#0S!gS<5wYa&r7H&rgIx{&<56 zb_K=)z^7DwahMY)vmlN~C2FCn-^IwJQUi=cT1`z&5F1j1&BlN#n(g5cl%a?`v7XhlBGw~Ent)M@i6C=`?S#JtNFmN`( zJ>YC0Wpe{sJ0g{YK+3zB{;XVF_yCFbmFwe!3JB)das;NJP}#x(E9yWPTJ)E9u;AlR ziC-vZ@ZPp``ZoKrn$o0UTHYE&1PQs!-Xx&8aRuRi_R9q-LNqFY`%2){x^d%%H+r(K zuP=(P0qkfD9zTSNOm782BxmdA#wvkyzAur)vi_LBj5WHP5zaN98@Nh=iT+b&!wYg; zFnp1wj0rBQO@ujOGeD+RR9@k?OW3(=_LqBrG>QV49{RFau&SbT=ZG*6nk)~b@uJ}9 zB8`k>Ft35(a=AB_9DxuM6N6+{Py>30=jJj&UdoUVb8HMF8?;-1uaC{tu5e|E*D?9+ z^(o|kh$t!&$rN=7&yhJ>0TV$X$O(?k^-+H4z6*_#?7sdh4O(+BEM+n6ryp~4Ls<9H$(d7&sORfPmSYp_f`L2Pe@IcfEY$SS%2V-o=u)6B!{xHg&(F192lEc#g5*f+OS38NIkp`u3i_hSQRG|BUMauTky zdh>G$F@jl4j0#+D3Rt#!dKA@WEwCu!fSdU$sGT4PT>*T&uobJK7*kLW3RW;*zg1>C zTE`+RjM2OLry7p!L4C<+QoFN1R3h-H0X9hty18-&cfP@a0X)EgLE{GCZNy6;Fp$*A zhBfz|h_L#4Veoh-L6e&gj4VWx*o!%%-VF>YE^3TKrG)s-JN>{-f?tIK zHr6VSQSDcHJjzgKqRiOfkRS!;V{^!vC|HI))(0rfT0Aj9Yl%uvfRz^u0n!-MZNMH3 z+7)c?t#N!Pq|$s(;Mk^ren>wC>W3uzjtT*ijEGN%R{%p61~w$F(Ao6Gqak1^LhbFS z3x}MRg2F;l=!G{oA2`F2J_7B6xasKzu%A4J8-X2sd36=NOwXjM4#p#VJuHydf4qu% zPBN~~TcWsm_@2aGn-f)aO?WPJ4R{eAw&Mo1cw()LeZgDJjJDLVJ;aBACCGJLXp z$f@8Oh=!*hFaFHqnrGZSJtGtqb!<*qfE9(N8puBUeAkzP_f)lYejARN6itKb{v` zMecCHS{|PeA#rC&*}3gr)hPSQ3)9*mh1UtF>-$kvRQpy$IgpikL|bl5NMw!tmulM2 zu9B+8MElyeWs6495UK$7XXOVExKSj`!KyN($5Kp9O*QDvZ3~Nup*kfZ|jQ4sNf_*5irF&dw$Y^Y-?ROh~vJC2WJy{TS1N<)$|m0&tC` zFQe{~N#kGUq&7nLPqKV&X{TfR5ctq4O%dM4i2jyK=&?HY9qS9QB zFZlpoPRz=ppYI5wg1si%Xnwz*!S)6X{DK`nYWnc-5Ws;%lnUeZYn)pjptAul4Bi}w z>UxHUL0CYf(BzDbjcsJKFQulGkek|u>C7;cl3t1&iZf0&$wZD3tFCYOJ&NyjFE2~_i#$8k3e`UZb~k9i+k}aaAEzQi_qM2rgYWuvpU25C zazH5$wak-!5Z&m0NSxqIQ@66Q!7w#7&GvyFl!JqVp6OW(Z9?NhWlClqGMiVz-H}td z{#UUexp1wLyq*!(4YHpsDKBLYLomitOI^h1diy`5xW~Cr-AIgo9XwAo@`Sa7jw zmMI@?RVt0&K|_Mc%gYP#cbo0}8G{g}$8m*CtIQfB^E8*8s3xT+JH5%ab#?XJjpYdz%~9Xt{bX>zKQ66URHbcVpWcT>s) z<2j4T?=N{S#6ce_1I~WE*8WDhvR&sr-;oo6 zZYR`w)T9{qD?KYOtTc1Rm&GQgD<$qj>Xw4x!^`DNjzkifYX5$9k#9`S#E2O2oY*rl z&y<(1l|E0UyB|O6=@~*NdWFY@GI zorqM>JissYBApEjSJ!G1_D`TVwYM)G_JoW@2wx(=BI31ylu9QjrRygWj11->qnKW)X+I?)qr+J?8JPDS2wdQO6@1#5_xF^Z| zjzjx!`G@#UvsuO?{CI3BE_!}F7}t5~bvB;tuNwQ}r*Vu#uKv>G!%_NIPgl;~GMMMiseM<#`4NorFwr;ZEaXw96oH5-uJcC)q8*KnnI6GBOUw#5v70& zbtxAY71)sRAj(tT$|^ru0#+*H>hG(oKx7n0qFlcRV&WE`8{hvE{ zu}bK5Mg;H+YRVUxnaJMWE5*gdA4UvUdt(cm14Za@aZgw>Hqa0t{a9HUBe=L&7_G4o zWG8!9zXb9hI0lP2Lm;>R{OsVv&`^{TYj}M>6fEzI;-Zv=JIvTO-FR*6*|Z6Lwr%ja z{P6R4J%3t@y59d(z4uU?&5B?2*4YnH9BNx)I&6DUG8$wU8_nI%>16SlKN8!t1Da`4 zqR-&CJ3Gq!=H}O(6;qZkVwK(9t}72F8i1bUFm_wvdP=DS`lk^?@L%G(bxMFvXMXt? zaNL0Ls($#OHXR_t#YHC}@dZVgK-f>;*N<+N$jD^X^3PLMuYk}xS%35WW`DES1}6|A z2`ye-P==X<&<`J$GngeH=>ibBw}0vVpW}*wNpi&3L+Rf43xM}rhg2~#3@n1rbb_EX zMW(QwrQ}-3#sGX5{^=6|EcTXz>4e~Spho!}Tp%erhMbG&R+-*rdrn41Tzk72d`#bB z$7T0)6K_u7fT0l+l^;3q>D7l02I)~zn0$PE%qeQ#7qF6UUBMLzR~#D2^wB9hiXFh= z2-Mqc{Q?^o?|e&fXP<0L8YsrimL>~KI2&$6nd0NNpI44nX0MYUggq;*hCO+5{Tl}a zwGk|a8r1rTIvB8Nb3kodh+2||=8UKOL`uVjgmvZ4b;}OH%jJ}J@59Q@941&rZiRO0 zaM@fJ6{X$l5F^LLL_i;!q*{1YCN zd}Fv;qY37%cIYa9RYEczs|QheH62@c~h+Z4w~-MM+V* ze}3{a%5v?q)=7U1Y2k(8h&0TUxa+1Dy{O^*LG2y5cEGvP#?8%*Iy4Mo#q#ucK#%t- zCATJ0jU95HWX4(>i21Hp-P6-U5prf-p_l+0M49D8r*d6_?BnhR8#J3cLKfK?q=t^;m&ht>^ll-FSo$rvSjb|p^z!9Lm26A?{gMs?L!$B0qT@?O-s^ttz=2akg{MslkT55Sa-i4YVKQC#Xg z=b0DfF72u{`$Y|tz7Iw3ys_)LsNw$;;O~z&Xl&e?IxH?Bfj2cd$ znVpkjMmh0z?cR(Fg}5$O&I%2X#a)kFU$tJi%?JY$*U(UmokOO#Ot+tu><}H?7FQSA zu%X!(*c{B9Fqq4w`nQkO_7Or2!r`8tvE^1+nyh{giIo_%-0}HliOYjOYUkj9ZyX8s zM*pp%F@0GTs8ScF;l`stddLD3ALLB1)6jg<;~SheiwEiwEjRd(5v+Lq;6JDh{;dOw z6pF@1-~FHX@*_DNqa|7-m6Zcp6x#pC1pubEELo)UTg?sv_7(XJbxi4mRw z_giw|z$Xk)7igl?$plVLbXN1~pdx{VGGe{)!5Ep-lyff-OUE)nTi|ib!wg8r~ z*`2+cWH$Bpw*|;7zYUp!JNi-)mNcl&E9w`zE*ZP$TXOY`(v?_&_Fn_Vhd2-iYlrLj z;*T!)hO=-Lb#z$4YYK;2#K8Ep`F&`?LRJ>NT(vtA_wNfYZ54MUAQj=BU8sV{wuJkg zuwH;O6%8U7lkSJYG)!bXr$SgdIDiVrBqA?w*%JhY_qe}KRu&RM_*@c%+fti0en*}x zT@q-G*dxL|e82%xNq?rXQ4WqL`={UEw(d@ep{0N|6nU6HHw`~T&Gs>{v_Op0>*_pX zEXxA-hm{2P1No?zQ6eEsp`mCXQhJXZDbdOpzGMDBUKF8v5@Sc?ddQitVRIFDlia$h zl$3foV0kh4O$~aoDM^(FQFDE;d-WxB;R~B)FiKe%P33%St(=&!g=6RpPmP1px1T+; z&XS>fB6o30$fZeNR?Eo9IAripc4m)t~=fI%_!*;5ntY4u1Yr-I-cMgvC*f zSdHX7B5*c-4A0}~QmARH*izP0U~mn8Qv;6s*r+n6`P+!x^y*ZVQODfJ^#G&VW!_4o z1C09+@&T-bSV`gR34qj;fzd{QH545rUA{f>G?!!7)+Wl<0Pn&= z#M4UvW;%?NeEEqCa2u5K5*i3Nz~(+YVg=56R3a;*bWA^FE}$e>3WA4ehMW+X9jZW+ zn5cY>oDUrk%Vp&0h!w#e1G_nY2z(9vV2HsQ*1nkIE~2{I|+(W*-TY_sjaUU8`!Y3 zM*l0|aafm}dOzlcMvI^bd457JW#xBqG1`BgmA0}q|2i{o1A?*2_Q9;l!{;}Yp_k=Q zxfUCvMjVCCeGLs>3#HZkJ2ur}yoCtD7SHt?vS-c)MdWS&A-;ix+iLH$>@h^$7{aLK z&<~Es=$!8ao&x!latR#5c}N?uR42kYet7b)fBu2x4&3_DSZ!Q$b5fQ)kBfjo>=-`7 zp0K&iC{>)Z0^!}cV5EMCpv+CYe!WUC=vT((WQALcPtu=&moL$*J)*$r^=}2s+Dk@G z@aikp^tWe!s{cJhN;{%)XZp%~8;k16twbDZXN%uN`rQ-6tPhhVeGcFCB}kFbwqjUW z{X-%C(-E9rv7JO9ut;tz-O@|_FFMio0}dRfo#)J4TaMx)LB|uBE!DL8_jkRTnm?k& zj0n{!fvHv_qZ*)d_Dr#p@wP`b_e1Ke-_t@IB_k1Uvk#TRp%w8e2_m zkI3alE5~&zdms70>nkvCD~a^v)Vh#6+LI#zlCXEOVg*2RS*TIr+ugIXSisBX2c5_h zFkKKpOS8TE-_s7yUYtz3YTQ;_=Up#>>c7hfm#K@bxaTkV3%oc}IkF)<$= zK*{*u-zv0%CFj4s;-8@X`w^&b|Bt`)X}b+w>ByfvZl?|F-;GyEF9SZQz{KvM$&v{3 zzn@sp+RH!rpKmKE+1?}&|MzvkAo~Yj|L?vOa|Gf}JT-?cYA7+%k(FIjLVT4_ z#xYMbTAKX#EW3v{@7zt8Ce1{RuR!M)FJhU~Uocx36L{6(etOSI;;~Qi@A&D!!&kqT z$M;z6$_tIV2?qSAad9+L+bE{kEst#`{hUa*?pmPq8hVgn+@I=y5=%+7BXo5LvKF!< zPfN|-?zqO;%aRqQ4Yd=)532N+vBT9c6}IJEU;EKO&+RFZ`0t?8HukI8U|!E51*g{D`*)c|ew z)VOEtE&(~43N8&ysDggYXnE7-0AmD){Ar3F`;xxkCS<3>*A@93^B4_m-ISEqkE;qL zB(KaOXBX#7`$^KCKd;4vb0L$mvyK$#=l4lso6=A#vx`>H+k>X4-f@2nmVXcQ65%`$ zJ9J++qJ5*QUz_~Gx;u{l!J;OXu5gNW(mxYPrt!@dOi})nrCvyRbmkJ1ZYW`i8&!kE zFggvivntV#AR9g#CvQ+p`uW6cUe2}o(W7H9;}x!5_h!XoFDPonJagmVi8Wx;yZ@?L zF3QV5@Hpc(KLR1wu$bu7lsn-)R^wapl9-@}G_1?LLwei0dHT11&D^o^-gJ!sIs$Q; zxR|G3zg}zbO8f4{`S!2OY#&m3Gz0<>M$HS0F#;hUzl?+WH^h1V-}&EQ#0aIg(L!Qi RDeC9nR@PLiRE#2J>ONf+!ba#Vvcb7CscT0CKyYGYF-~XKV z969dl^V~akX70>qiclp5X)JV7bT~LTESXOqzrevEA;H1H$D*MC|I#7USp)p_!v2${ z6C50N@AD6QJQFq<92_;A%*PL^?rBFWZr=Eyxz6)7>r)G7aSZu9w~yAfO1+WPjEsgp z$H&SqU%n&@2_wi0)MpEs|GC=YhwJG#@j`|=?_{hMd~{;WxA1KOf7|==g|XSmpY-(f z_f98mVFebX;%GEb6>fQn(a|6xyLpuM5$cmaonF-e$UVM_Vu0FRRAADYNSCR z?{Gqc6cORgI|}*KnCR#auCBaIO-*;+BB~(Cm_FbLXf&)M&l`on;Mz+3#)Cc9J3~xb zO}T~zoI4M%H?KXfe;_;`70tmR#t&FkXFY)Lx}>DLqQg+9k=?Kfq^{EyTEqYlK^OZX zcQT}F9Y*@I1mXFtxI|Ph&;WwKekCPEx@`UMh_55ZtCln!MtL-t36ER_U0WzpM+;C< z@(w@z<7Rji64E!^O;IrrD9&P<9s1=y!mPTj%->OB|`AuP~oa`K>WT=9%-XO4&B3 z47pMU^KF8LO+o1b2KM~+0_pBTl*_gal(=YWME{=tKiZMyK%hTY&Lehh1P+ksi=Bl( z)o)!}gqf-J>$LKf|3`M(Ke7?_1tGiPs>?RkR}L+D&qXbwgOcR%$SNn)(gzblWxro1lJT z2N(u$dXw%Sr#p*E!s;yv3hdfL`F$m7O!GkH1RJsT4(Bys-}Il>!iYC!NmCXV2G z!r7#lx44ou(vm_(O?!46H_qM9+=UE;F+Q38qxl=$KlMPC1c7QbYuHDV@f;u}!>qKW zawIyeiL0(+?8MYIsLvFLhbaBiQ4BG5F0pG*4lH8aD!KBR-xt^vl?&GV=?Vbj03jMI zz{k)4+XMUv_>T@W5NNU;O1iaJ_i})(?SS!O*_IqT_}|jF*8k~|cnI}BGCUOQ1gZIm zE}1GmGRN+_*6jXfs#-b*)^0O{2ei}-)lc>L5(3m9|F1-LWuxAAVYa&MJ3~2(ze-wXY zXp#jaeHm9^;U_BC$?o{(yF~Eogw)I=!2DoX33i&+7*uIo9sMUj8uZxGk2K%-ql!zy zT<_Qm+*SdZfW3v5Qn|Y}?7xACDz(a{Mzj7A%nwhA|jUJrI0ZwTKXwr{BR0&<(UdPL3=Ptvlzc#4ArLyKFql=Z{MEoa|UYUQ&KPCYv zKlGZYz+(D!C{WGv*Tq2n`E~`Rv9Z!S~kd)c7 zA!c>#e;0v{kr5sEuwb1~{~uK!i-$nDz!B)L>Jhy~LcE{iiY?L7=;H=^w+1(&QbGFs8xq$o?J2ePIlwQVik!TR~YwkQ&0ukimnG57X?GTH-@!25ri zK|;rgEkW88SDlLJD6n`%j0@wJpH{j(>bjVeemQU|`-ysI>T{^0z_YHn05_9x_GcD( z%YU@0#wXEal~`8sK>@WP%K5}6AC@zSr*WNMTpReU=*=%#$%KF}45F;pRVW9*XqlbV$ zvJ>ksRwoO@(n~YfWa|poC%g-0qNkE>|6O(DEhilnn_OzZOVwk{o6Yd7!_S7Ja6q;0 z%loIG0t@0qHu1U?p?)oF4$V~9T<;L~>S+NK=D3%%iJkxmaHYPnbu+MO|6#G+x-yOYq%J*vPcL|wh zAIUokJTN#JRjC&@I-fnpdk6k>4d&g-dc=UsSN{${R1Q8|?LngHf&=ws6&LA(*{MZE zaWcnIQ$!`iq6Z+GqHo;g%*@OX^*K`@K3%8?hye823s`jpz{S&!|I1@p|L({?p;?S1O`b zw0I0nm;V%IL7; zOMaL}5VlB?-`?~3aK8V6?FC8*IBa~c=c;mXh3h!q4At+AsIWvD34JnynoiZWYMI4uXWWRa&aRyvfbbh6xtGH>OMa*SRL1_Sv%qz9cLqBhu%tGGqrkk!fIUGK}`O)a^-#2BET+7DE7*e%VUH zMT|*3P{tn|Cdg4*66Uh~NE}HS!SAcE_i==mOrcPvf)PNG;l`+g5$*pwy=wW6f@{au z<-1SA4CM_0U(C=LWV=3g!TIYXg<(jt|J(tzPw5RE^;qbpLOXzzp-=OoP=-zCcQYkb zxxPL^u7Z?NsETo@g-k+%V$GfZ%zk5Inr&iWev>+mu6BX|HLeA`qXQ!={;Sqkds;Hl z8X!bk`^hDBwv716Vq0}F ze1SeQayM2P>ce-~kQ~RPo)BNEHeCTjglpw+Mq6odx#v?q7d1Ml-YPPUC~TgGceh@A z`RY*c@9LoP5jUHnVo#wZW*bfYJSS?DM{+fDY@whe5ec7}6K{jJC;O(ek5=6=vT-u}k7JBTy z-Nuo51&*4jyxx!vFZ%Rd?}?h*W9!w{x7JBr`kgzD%1ku=OSXrShI^4_F3ElAf})T+ zmHrkiyd2kF0 z60HeV+9jrc2cJ#%;k6+r0kh@PcdR|ns?FDRUW({;I8^Xrr`mP}D?5pamd%;7CX?0m zqQv%RNr@Irocl@@clw=tr3z=0X_$*M)$#mm7Q>3{n^8Xi5B#+lO4du3B4D4?Z8xhu z!<=*)A)ih@9_gLU=kt~DoI~o# z{iU)0t9imCvH)cKiBNV>^!N0u4Pu^3yOXmHw! zAy$#W=isqaUnmrXVWO82VI!>VczK~T19sIT!9+K$d6R-p^U*<}@VA+Rig`W*o?1J; zzrQ|-$_BidwyiB|DxY1x?~^Co_V#wQ%VE7!H8Txo6qOkif()nQW76_PKK5{C-sjKf zwJ=j|d84g%Z#dFk?biuwvAbKzb}FCK%rexCb|0*zAkoU!$=j-Ibb;MPg1-~obD<9r z(OkHuHY0|MzvaGPGZ%K~0xUMe%k^mg`~hyWOmEubvBBAQ>E~C&{+`HV!RyP;@NrS; z3T?Z04rJrfD1rWbdSEQSR)`R<-D>yTn)hb~1y)fxSZHr=q?z}m?wyY2-8V98J;`bA zlLIf*TZMFT2N4ex#A)r?ZlkKZQgrtmz@p3_xysRTNd?09i2mu*7mql2@U{9=)I~CS zsB`Yw#;jKrW?;AKthUzzFr1KC<5O}Yr$JBPBdth#wTSiUY8$ggC9dBQJ*8mj;kjP4 z2c)q*s=PVJ8GZp{=DxNJ=FGmnrd0sA!%t(Db_hXxUq{Y1XS4H4koLH1$%x3L4<=Qi zS{({FXx4Iz-^TZ*2VR`peHMv{D(QMl@C5JIal5lo$?ztQLxtZ#dS*TQU2s!vK#)!F z1?|{rUFP(?y#mU;2&|UZX*=p*jP_nNoGXDClR?3?z29k#GXK=Xc9HI`=7rkn5aN19 zeR*Z|9Ix})jorSrz)y8UF>28ceXkO=&<<*{Y8Ct-*3fix6rH8?NQy-#w~ogN?g$)_ zhpS)0eHg++sF}uu*yzq*+Eg`e)qD3lSoQqdP!I{y6v!3m6%z3Zr$c89`;9pbxJj>V z7+YIo;?LR%xESep6M83}Tq(6$Y=uQ;q^C_s$86`a{aIbE*Ze{rX=~*ypzXxz5k3yt zN5hxpnA1E3s3DeZ*RjR-N0705 zjR`@u8Zt!Myx=Q+SH4#|L|ZS$^%mxe8ci3B8k?qPDU>ZPuND%e3%E`Ux1P_k7x}la~E0vRG?7RHG;=InZkhC-k(vwLEhh zsiI~2M$h}6r=sJY7jC*hZnKa?l*=sx+>*j))ejSfP{%WZ{tWAR$kR_vPg{UTL9Kj- zjUteqC{dFuGb8w<%e{);zW#LU+l)W*!d^8tORl6m)`O3ySX=r271dlG^c_$k3cp+n zX_EUc`d?pL5}r`8zce|%EkB%02To^Dg)c;Ld#@@5=KZc&vuA&I_A-UXW(1bum2ha= zn7q8K)9kuGqHTvMTy}iv+4h4(FykJBsLJONdvnvUsrswn!G%#}HxY=fi3mvrFVuj}}d*c~{z3cyqjFvyJ??{IzQdTL%z zR?`0c{s&bF(iB64KC!04*rvAl@LMmUUjtq>Q<@!oQ6A%JIfsvrv||lh`Mf3Z=iu6Y zHi7;V#Hz}}qQtj?j&SCrcKNnJv2#P@=h7(2O{9Sp5tG4RRpu1pHSkPU zHXPam{clh$6IguPZ&z=rnbgXs^hEE#QOdbeF20bH!!JIwge8%!YILi5bXx28N3&pHvGELLmZowmc^ zFZbA}um38O>S$$}W%vdNxL9U7XW|!H#P-+P&U~o=`<_kE{6_uKMg;8DE0bAc^N1Zu zC91GcZDPLq>)q4ov2aqM7?aO33aj!u*39y$ z+?!pfR9WI4Fkg*2Ppn(vGj=$MV9Wxm#Ic8~rG``e-bboRgPyfNWh$xklwNTS-`jc~ zu*e!W!(3ZM4T<3zZramrmfR*PKOpM*m{6a#^2J;q^657$08b2iTx|r~H}wR9!V{Sd z7Kzp>ZI*beot%o}ahF%Mdm_pF`oa?@ax|T+)iBJp<}X%#_T+VaO&Eq+Vv^6d-27n6 zoVJjUgv?qakraZV;o&=B^)~x+6>JXv)_$feY?mE&VwMLlQ|iof?ZEcS5qvu1o$%vM zut3=YcBXJTAb+=N=TSBq0+ji$>_H!xQLmwu_?cRX|}Dz}$<+HN*izS65+ z-urZWlj{>~)qQh6lmMElAA>q!j|;(G!%;>&0bQRzq(a9>oW_M@ zPIVg&hRM%YKNWb3U`Z+`aXfi3sxjyt7<~hmTK0M9uIKF+_ry07MymgY*##eG%BC`q_D`OFwr0!hl&|s2mSf!-!oRTm7b%dCoK(B zQ6d*g(H8xY6cuZgNF8bIPba7-I3y*B_s6%IeQF^7bQ3Ags|db@vsvq80dYuT- z{h*LvX?Bt2iE{cdqpC~J9`g2c;=V=IP>ZXRV#i}oHFv$u(pZR_bhZ74p~$`Y8`B!m zAZQhRhGAE?q-WDJX%;H4bnUjuR;GTBkMEuM-Uipbi;k&IEY|ZLI(qL!!e@$qejcw3 zil@)?jeK5wr@E%*eT?Tf0GeM8_TV_ed~UxKJDRYz)lvc|6(P&BuH;Vz*rICKPIpvR zJWX6SF2bgmRKoUM_=4e9^68WXuH9C@&TC2c0M`*g5VHkyukND@3u+VryXi9|#sCAKSkCC6b2<9F>Z z+k6ujruB+y`|WSC0Gfo}+Az*+$?2#R-rZmW$?TrVTJCzgHF&Xffx4zVg-IjcDYG(r zi5$36wF=XjgBb{PfVn?{Bxr%0@`py1caUx!qW(_FjJ5lb%)xANj~>llc>ZwW=p<`__JYMb9_4meC8C z1dp?5=_<{#t|`xjb#iPff!?-T;lW96aLWr45)$vHQMbIp{PSHh(Z0uJj!-kBq`39!qLle-eHy2OgL#USudm3{DL4rW45PSN zRd_tG+8s$B_^V8%0(fIBZoB?jo28+_!A@)5=|*J9HMKd@ynGa0EhDlu1fx3>PZwEk=Q}D=U&19xhovimfw_vQ>d9TX2<7V#* zxLL?`BFt@6&r^^zU#}zJSi9E!MJydSa(oYmGlSnDz-`rss%AlMT-!FsFkkjhsoRTG z-nZ~dWtIfbS`0AT>X!1P1xE8FigISJ0dQ5TUN8wl7Oig7(pX_lRw;P{Je-Ist^3e@C;^kW z)kLIc*gZe_a4Tu?Y=gd2zGPCbFqqnYcjwKRQZUtILD?Z~%@8TiU^yXVI<-NJ}Fx z=~bu6mb#8*Rzm>FY05EQ%$5n=M6eVp_F>=By}~iDMS>%1r`7p zBmE@`uF}_sT_VdM`a)Av6M%a57nhe7)6E2W@~Pde3mu1w`vX77sH#46S8dTxR7*N} zI-h=@-rw%*3dE$^u%i&pmU4Oa;6RAR85~Le-fH-V@nu4p(EVNR4>GQ>jD}^*JGLYd z$o4c4iI!V0H-_?9%{g9!)TgR)VebadUYvUOFrA$TpH{hd>13e)3H{n`_%>7V z3BgYf{3wBgQfs>qqnznIcqpOwmXD7r^K?XuJ44|3^Q_&x-=w+mL?=9R+AV#qw9mIb z!wrD2*7F3|JJ~N&rozM%E$WKC$>+4O^pUv+h~Te#99#Wc*Pc!7(r&txN)xYL)&%YL zBC;jIzWS0R`hVjzy=WtK8BVoOU+Ukfv(7Uu(E$yN@g&Y)7hY?6kmAU_7a#ECJuyFK zbj`?jx%w7m^q#Tlo-}h+`TKDN$e_>9-Lt{|e5M4iRfMZ;-eN61-hbF~J!5#KFX7&G z^aW{qb#O)|<@Tq7-$b{G^othE^$@`A^udJh{GboT15{X6`Yc6fW;Vxqhs1oVYw5ax z5hHopmzGQCjJ92Yl>LF#*$Qvb6P8J2rpHP&*oL+WLTm((GDdcQ2-Efnh0<%=V3mv` ztv>>?7t#|PRcZT_%=1yJ-ugr7X^Yz_y#C1D3%?G_X&@_ezA~rxw``6|z&XieSRG-5 zeg&{*t#{i-%!A_wo&NZqg%lg{Z7mjFCV~n_^D?`kcIaK_9`J|7&?L_i{Rd{q&v>tp6=s3deRC;phBYs4%;2G1c`SUIBvvYb$ zm0iRR5s=Z@#N5#6aL>LYhxx?kAKqZ3o#Dj2PY08Rg@X2X>=C4V>rZaMm)m$z8=;XX zk_Jo?4w(Pf3ox7JVZ68Pmvc7vfF+m2jf@|(=wf*}Bu$&l?Lhvu*%f$v<$Fi+)xD}u z25>K`U6x!~t(NKsFL>YDm>xH~=(x-fKnq*Lx{36BdcH@Haiqw#a<3!rLhs2ky@g(d z&;0i93HEqQcl)4Be7!HtZMlq#9|V+xqOTxKL(4Hia98Bd4mpe2Voh$8edAuQ6@OiZ^^_jbq zsHYeGhZV~0-tU$0eWBNUF}!x~kFS@SZ3Nv`Jp^gjsJhbKR?Bf<7Q1X&3|EcyWQe+V znIe!#!H2WOpIuoDt%4VbU#6#uRI3oEevo&yzOQkIR7r~|YgAeCOt(Xt=bwJYr;je3 zwtjA(`wRpMNdhLX!O(LI3c*@bNrUBH8SD8*4r@}-bdh!gt3+_CEDZW|wK1$#o~J#> z`osH_!c%8So=woMD@R33O=gMoLD1o1Pqjih(DrK!E)`XjAY+Q_F)N~^!OUVExu`EW ze$W#tPL)$4tcDaqo;JFs>14`feugU*2~nb|w}bhjU{Q8wbxF)OfICDWE4Nadz<-*K zCi~3JN%-!l`|jKtKF&}>)R`b&F(}S~$dl;1WR9hJ%U4~m-Uiw9kP--~|3E>+I-E1x zzu5Vueaja?#-|VS4+LZPL>R76@Kd|GE&FYS#?!$6KZ~neRkjFn*UD z6iyhMkkG%;gY2J{E*K*tyC^rDFiIkFB=351COUcJ=~LJSb}R7QpZo*6djFm$b6L!f z`i~z~WK2fccMugn9${=Z(QSOL>3fV9{s+jD3U`%?B)k+OMkC!utrKsB5aA#T@G1$E zdriIOX$hNK9>%#9G%cQt?^vWiV`Ia4-q_rqZAeHKcx-Q={!LE|_gR{K zy=4jH9-8;3hlWS<&9oRn;rsa;HH;wpc&5+sckVh0M6SVsO>2tMARbIQJlgLak~^}{ zr@KX_!1#I(Y?>D7h03$$zqlKP#lF)#h4zfV}euSR^{xp zyXSKYHc%K>_CHX~epp#a1kxW4SAUi9*B1&Y3$+c+o`i+m*WuQDMALVM)$PyLKLGGI z_>~4uaL-sv&sz7^IUFt--e@3>D0*k>Jw{`mZTgpr%r5x)f2UJ{BvHcCBM$r4Uh-92u41oG zhxz2IFHw0paEc-~XmwU|2?dShST*a0Zg>d1nV0L%4;3$T#Ln~Q`bu6t?y93&WCGagor|(UwTxz*?*^h zZ*Lnvooahthw^u9Z-15ij(qqN)n;k8f^^;S{k1;a9h>*qEGKQ#FZsi>xx~8XXay*= z-5kkeOGO;yr;INo{8R$qkl5W#D~~mj9Tu6gamv>gSHwyd`<|6r86#n0xQPOnVJSO=hbQm9*mBS#hPiH1^@mebi*nfa{@Y#pL1W-C}C1&LZMAr44iTY`F~GOXtO$g2I)3GLF)vRx6VwV6&ywQYtkoe_6c zzz&YO3q-{_;G@2>T3_|OojA>CL0lq%_$prsB4N$f8x=xLrgJ>Fo*s*V;yZAl89gi- z+v;l;(p0&o@ImFYzs%v`w?kyBCCWAj+5ngJqf zEz#QMhnonvd?9p?LJU3m^u0r{dho*abODbu>ddFrYL3-s-o|b9tFc}-!<*Apm9q>6 zbg5i_(ic2e9s=nKFb?%LM(jd7tjdLB1{7rOo!SHf3+=!|3i4HG%8g`X+3-?b-|@LU z zf&%2JJcZFFij*f05a2Wy79M~KGS4(3T6T$Q@fYKyQ9@n5hmZOqC`Na?$?}q9Ai)(A zsV{ex&dOUq-idDm8m<71h)dpl$$|4@DP}WMpPz!>LQMghvUC=GrgkWD~`y zn8Oh3L*EIFii*-0Gv)*ews(#P_`9Qeovj|1nytnp($kUE4qJ4*w#&WG@UucxabSnh zhvL$QM3;_j=;Nff(lj_%*tO}qWu>3tW_EG#2W5WtU!+zl5ErG#AMZ7cMS>$AL%cYX z$F3>l4)w|h@1~V!cVKD8d%>q(@_}q>#*QX7f;+;JqRpZe!`-$Zb3HzYM(Hb~?U*n^ z-ji{`Ef*$;CR6Aup=b+IkxU|fkBNf$%RLgU%Q=Ck(-QxF1S%hh1tF`}M>#pn+hrGe z8#b%J1?%01R8Oh2;I6E@@iytOQ;I$StK?=%pP1t68#J5{-<(Jf4;ierKQ*pU0&PL= zM(kRH`&!CM_<`UKbJ7w%s^ICNxX>-lO8b`>gv`eKQv~Gliq9nq>&4pSiuR|BOy4`g z0GDI}_9RSvXg~VOoF@t?vLaSZP4(3^CuxMip*e5zwPUs-F$T`o8x!ydcD(<~J*%4e?(bfgmuDVr3}QhSJ4HH8oY6`0{jcWo&jd1muNBr{2N5)Y z12MyLIvn9eF2x=vP8b!t9;di3M|U_~TwZjG;5-0e>!Gqh zKIJ1cBQi>K|FIa?(VL(KtFY$RW<6P_K@Fh?n8lPUU7&XFk;>P2KW4``eb8_w48$OI z+p9p|R2Owbo-Q!Hw1ZC5>p{C2s>HMbntX|T`!gC5#Q;9wCuTrM7yQ$#XluIP1c-&4 zn88y$`~H$XzQ#Gl?oTC6SWcyb(FY^1i17WrX29e>tV!sR$>oO;T+Iz|a!zEaT5Qae zf~E@>s<<@m@XZ!32qb@Jl$jtfYuBeGu%raH-mr}W9kgXBsDZD$3b#H;M-{|dl9)+E z9PnIqoVIScmY+_31^c{KLe+w2Cd2@){~iEtpG!(dN7hHKu+iO%oncxOMA;S}*VG8~ zo#852=LaC|F!Zx4@X7(`Dc;_`(_V3FT7Gl7IC{P{P^4CeUV-C7AV2*{c5!6;ok}vt zB@vMF?hio5*k9zS`tz7&s)-lq*;{F@O(1&FEWbR7X>I?yXilW8CgD^-)DdOG>J(n>sSQ z`oajtiavC5)d3}l1ZGWiu{O^{rFNb7H+gxL_R$?SKbGtc`v6G)qI22fl6I^I%nWDF zP`L2bQg-LJwS`h<(Fk+3ljER}143}m2`ugHn@Z?u#G6yujn3>I)n7`K$lwE5m>8lt z0b|_2hV!%UPEH;u)_R-K6{rV=;T#L74Hnk7@+T__9>kf}FDfcP1%!fuJmu|IdqI4W zjRA!Z4o?Z+?E3N{o#+xKuH z^N(76OI5`Qm~ON4+t5&o>$%V3r15e^nx)jkh@Tc)%-BlSq+&cdi5o2;DM`6oD@5l_Q-%$PMR4&7WTE!a5zurNEsoqD&_xN+dY4Z$54oml?O`4$!yLiZyujVeci4#YUbxQ*$uwGHkR}5%_`2(dcl&}vcE!sW*sM^X_b}@ zc?K)fopPP;Fv?Z;^$c#R*hGrGixl49%{_MOVAxXTsH4g8pyGZ-k%^IAbCe#RaWGb9 zY}{yl&wXvN@n{3!(- zRk5u++h_J#;Osc02RN!U1cJ)kCXn>^kofj=22mRibQ=(+Ezb{ll zJaIqOrWE^r-B19h`DI0V*DCTuiWWf4Eb46hAIwKC0~T^4Qkof#S^w?~%Ot*yB^vu^b_I(br~x`o%{ z4HFmK+~pQv9yst=@^C9M#?aWPfy$RxJ`bz%K#lo~xr`Ohg-<(*CKO?h7EAR!92`)mrL)`Bty@iSHLVBz7QMpeKqh1c87S|| z2BGhC-^i%VzR2)6K~AX9=>l|%nA@Cvef&)3X!$;FPEmc?i_DRnfkyF<+wnr6;SRGM{MYifjL6Ux;`7x(!&?)eg{%^_y>N}D_EfmQT^_W2Ld1&*X|poeh~84;$>%aP?jdoN#|&7#Lold&>iaMR%uqouQc{6P7E zS|ZC{ple~a(LloWL=G^;Ko1B#9o_eltz^mH@hsZk>8ZeRKtD~YC@d|PkDKsM-Dfi( zW3Fr5-2Gf{;9Ip9Uh{z=0yPMSwI?XFZusNh+k0ykJP6R&K}o4g)gkLGgtP2c0extu zT)Ec85Fr!9K3nUZEdj-{XovBty0S8ab$7zEO~9bR^Hp5Q+IB6p*51d9b?z53U_{k9 z*VFzVBwU(6Xctbz_8j>8b)bgUUU9cQ7H6p}vCRRTp1O7O*gr2}h2cyp_X@>2wxh`f zc5_`dxx#XOCg)c1^+>mc0pV`bk;48(3Un2{fO+`MPEiz-p{70YB{CIvRJl%x|V}k(2Lg<*|`$5nJNG34Noe3-xg zGol_mL{VB;SsD1YoB5i!4;xzg4tb6J!99Q74l9M*y7$<})(Ge}nNO~>_;6#Gh`KyiK^0EOYI>W2pW(v_N!xI2ao^cVWG9u+x*wm+ zTEBewbmU19sOJkbb}7(&W=I0Z|2}^>Gt2PtCo4bg!hm`4{!}Jvk0`9h?sI5a7J@C; zV3Knu{kXz>5=LXh)BaAB0WKf(0?KYeGN}z`gSm=9$5p7O9Z^8dudvD&S#gE7ts#5h zLmxT;uqTkLC{TEc3Bn*ezfM%~fG(Kbbx3DYzxUvCTXkW}2AU0qXrqMi6Uws3H3NYa zgdWZ$FunG^?v5poFTc-D7D7)Kbjk8EO#JBr>RxRScVgxiYK0Utmp0n4IAjL>oz2HE z>W{=x9aRC6I8D7<*&kuEv7STE^JrgG(0yJ=3Is74BwJ@I-tx4yio5UnSoha!;P{Qr!bKD^5*+EDjyRxh+&>s4 zV%qc2@jfO6fQIpZW9E~Aeuw?*^9U(KpsEm~DDBgK)O5-3_$GdLG=+UV2sU}WNEU{K zq+s%>!%DB`bN-$RERX^e>#NLeDK~%jP8PUW0eL+2j`yViTpXv5G6VET186G^#-Wm$ zh7|w(D&V$xBhtn}`lL85Oo_vW;~lLiN+GZ1DL}xan#WK2)3aMzoZ19@nC;Dt#aiZW z%5t7K``9)-k<%P+r=Gq(eLoQ5Lj2dUI6cv;_X`K3kRUbfXOo^)p=NG|MO*`=xSmcN z9MF1Qvc<2(^kK!r>Y`Ux+du_OD3e$+Zfp4slJIq}QLXhPkg)I>0y-83$HuZaoJN;CXbO6 z+s1oJTr~cr<;?-Q!ggKy+&?oiq^5RR{NrwNqK`ifBiaib%VqEld$ z5lIgdvi$Xu&J?4I1*f#Ttx7pwu+Bp32vcW(dA@Ikv-cOnOZHadsdrDens=&f4h~fh zciqpPTo$Ot9w%`+FdG2~1jv1n3wVEH1eHtxdF7Lq+Y_-mzq4Cvnrwk_;ahwlg{!}G zbNr`7Ggh}@b%aM4CsH^;I<++5)W0*c+~4sSjgZv}e9*OJ@8NPTsPWj#dS$ z3Vqa8>||!u`Kf#!rLZgziSEx=AvUZ!aw!xBz8)|fys&NG_bgGb_`N-pSz~hO+Pdlu zeCx;J(~+JVOGi*V zy$~B4@;ph06DG(+nD;=W$A7m%Akb*IP%6&KTC-xU?R&eCO))&VDXWL*MPh|kRZ!EXA{qDj_7kp_0*M9Fm==G)<`Kfi5PUU@u((hmatKM|b~Ei0zEGF$zyO;U84G5WV4 zoENU5MJ0@~T8o+(C~#2UEifedYX-5UYDA&oJ%7%!?T9IuhwugZzd@LgPt{JB{H{Xb z62%t$)@qwM!W$%rvU%T80~ZjGlM!G;eqy)fUz51pD8bP8!O2`;qur6UD+eGSppJ@@ zW4(}b{Nj0>I6446dJ!9Lotq4wSq5djmO*0_MG53B0yYZF)@1y5GVyQK+AQ;BgufF* zO>f(K$oU=P&`pdmI1v%}AW%wI^ad8C3G5HWhzQVr%qOD0k`hC9Vjv{5GV9i7XE&VJ z{-x0r_G}?xoxbF*sGw5SZ0RW5-u~WrBEatPI z$W<#{E`KKSO5Z5|G7Nz2UYBpk)egZ53+OWdo+HY8Jf5$h-*tQ3}HX zF~yqzH0Bjh(6aWcOjwJtGi2eTqbuR!^IM2fC$W+pZBldl)YnEFKWS^#n=1?xj*jnh zF4kH~rmB^V?Q@Q)XZxsU0xj?}QQ96Sj0)P82b!rf4u!EKPY=-~coOC+(XWvH4Q=5T zmRROjTQ9exs@sb2mcF+iuE3|lSAsjnFTpJGSDn=t9YG0%ubA_SDgMSU3QVGxltBIb zNYSkQxqmWu<0n$!Vt!*ZEB-C!=qaO9eWU#MDd+5ekJRFKq( zOC7<})L`+yL7E7|K}VWGQy|3+NjZ=05Gft{wqV?yMGPVOg@W9qLqLQn-&5qDQ+$Vz zj9{*n9SPXSUDc9iU>JCx83~E7&USR9bO}#|7dvV|!rGeI!2uW|5?bwBC2=KcX6h^k^pw%anEeX$tgNheq$LF?oA9I} z`1pa#E8!Ti1t=<=pb4>>MYt>0)x6S%aOpfU=&NC;hX{1au<)&LZLes}x0DUjK{$v6 zz?d8_Z^GZd=F7JKjeF%i58!40lO;b3srhf200e*f8%a*2KWZ$Kgltmhp4WvXxaM&z z?6G3iez-(w`ULRu(`TAx#(b84$2~cS-w@lse6OkYY2&<03WOy+t>B?~$krB@Y1{Zx zA`UfbT3*!||Gvb(+GO|@1x3X0&eRevP?&Wj|2Je7!iCou#fFCdO^pgoXNeK2|6vl- zUp9W|)Pcg|{vMcESRWm~ookTF&)8JlW>;9teULKGcnzAZK={K^W=elf%2c3s`-Ee zcX#>NQ-Xo}3SS9r2p0_t9r(onK2lP-9S%${&_iG`6LN5|lixV(Rp_o8Bn`6fk@8=l z9TjQC+W~|8b~+@$um}eS$45y?Jc_%ujmp?#L}0|5%ISZC2h2-=ka|)wExO3(v082L7rT` z`vITLhgdb6;uxjdABprI9Rd`60|&e+O>Xt|<#f_stI?y8n8_)!IXoaIe(i4^3)KK- zX@x1D=l>%jXc_5J7!>hcTM)x%0^V}X>B+yfhsgXli!YwW;+bGYjU`q=2S)=!~DrVdmhd0w&P_?CRQrS2`LtssCn2pHcqc?MEHd z=W#ML8ihGM8Np)RrqZ(EY@@>^&Tng)`Dclo7cj;~p%ap=3~d9GgL;t1VeSg>e^ z#J*b&$->QsL{#CE6Ajq4hW?$*?fy;9s+UpWwbp+w8-Ior@c)Q<3#cf+sC#$_K_msG zLj^=Sr3C~@>29REyFp62yBnmWyGt4*hwkofzRT}@-~Z>DwFK5M%-rWb=j^@DKIg)W zF^&Ojw|eqz&5FZnCN*JDCT5+l5zmhgKX}h?A?^hxyV0|b3__f*wvCzgUgH_!B16s*l zNR_XgNJU6|5i$=kaZX-t)|gj7Ee=&Rk+TcwM|aAb9#|CT7YTzO_Yd}E3dzsAU22N& z1AT1`uaDHySGRW^f>A%boNOE$*I79#VbwYkqb`GuxH_Vsq@-+jA2QL)jvOcA{NuR% zjs^{_j*ESHb(J4Zie7y|-P+c^b8PH27Z;I(gDRP*pK1yJ@2Vs}@TQ+16TJEhU-e_X zPYdDXlQ=VqnWWc@soaf9_QuDv_u?ayU5F96-WupqmZCYZLkq|H4EXQetjD%T_r6p1 z(+io`{P;&Oy0yocK$D=rWTap;sVdKu62#M%Ww?3Lakn&Ch$ffM+jfFQVRqO%Hy1n_ zfzbmTun%_^CF3_Hd*dT5KXw90l%RB?m@Pa!D0oc2UYu=GA?+XL2ovA2%{lJtoaP{L zpIm1<-roYMG0W&`wGu7f%c8M#vES9tHYasVOwfCbuqmIe_Ar~eCY!Ej?RVHOLH}5oxg3oZd%pV=3B&c*>l7<1Qxk4o}*-`S+ zHJ3&IyZZC272+G{sc)$b6^O{CYkME7?Odiu)^jxYauKSs~u%vfdQ6Qo2UctZ*N zN883;D-^zl{^%;87YxN7GzrPY784W4JtvRBCZMF6@agXIiym{kISlqJ&;;F;k&%6o z*<;*~UE^-&C;1n4z16LcljnBNco&%01GrqwGd-$}7YL=K&^I0owcMrlPlA@^G&Nz4 zXf%Wr9S){To{BgeA3p>yx}aBfcU+feD>K~jxLj=acngd*-CnxlhW5;922Ad;oh?YU zj#Y`05~py4ULL4cx^1X}mCK2_vERU-GM^VKBbE`bGb9TX?Q1$2{%7KoP|HM{!`w{LtH z@L$2Bt|Uggw8H!lG6((hSqiV5tekzLEEU_PSg=C8u2);9@1Xl}38;iWAlKJ`hnR;_usA3{;$ksi1 ztAH)`$NCKWu+>tvS+kZEyY)h-yCGU<=9D)1uI-$+@m1}hS@|caX-mC@Jac$N#MAAc z(Ecm2Jw;lh$t;eM)_Vj*G+}B|GK<~|iH}$WwPyKC3yzE{X_5UGLk7&?d&Bin?-=~h z{rSptnNCE;0#6bMzYm=AW%o3PJ8K&DkcE47R6j4#RE8IRsE|*#58S)=qEuqvzr&0g zO>FPmZ4(lXLZO+mg))T-1!QldeseztT{u>VD7^olfG{sKjf(MUPI2KAj0p)wK}lCM zS40)4p!s{ztn4rQ)&?MYw|KB~aLOe`3tN}J9+j5W$^0|pE4Jq)WXi7}8KGBsL8pgR zp$8KiGOQWw;{#{vp{cm#<&_g9U8LX@1RB}OuS*2ai5L<(_`mQzHKc$$9`<;kY~bc* zdPzi7w0n3s?ALMLVB(i(Ng^)D@mO#A=@`Yp_G~V*Zg0qDC4CDS2Z!I^W2-ce-KB5e zeCITks9H;7Yin`bZ_hXPEL%FwSgGg0Ql)#|d{(P9-#XnOgS~&)o~@=v-W+N#{P$-I zbo!hOT`ff?IN>3>A||ng>m8;fR+&I$DWe@WsDhWAjtH&O=W@K-ZQs36`$S8dDXNTXOPs)Ow~xeXIVUP9 zN&I?6BEfx0k1&<6@9*Gd-%F6#JdO5|=>R-A z3QSDGmu>gL=?3>Rk#>9kBB98{h|2+5iWOIF+C8Df!*fR}`61j-rvxb5bHRaYLSW35 zO1%NxSnulJp`m?5?x``X6Tp=M!!G0y#jNHvuRzd9+k@k%>?d<^s=Fh;3YI|o#XCZQlmN4mHbdYL&aV+$2`x8^b9}$Ow zeZn!LgRxm?_SCF5nRcYUw6_>(<|OpBCuf}RH5OahASydM`)EngMc-!k_jt&)dG}Eh zP@YqGJz)$DDF(5b1HuXQ+XbF0C)&36JC$2br#nwpJF>8Xf`VOLJ*R)yq-N|+i)?AP z@W;j&A$H-rd1g=3o#7|#|H9cF>4?$M9Wm)CP9D&Cne=BL`v2^Mrp)6B&$Nxf#LDYy zCta50nMy|zX2TKE_wQeO6&HhUoAj{q{4~JjxeUvmIdW#O5@DV z8=;P4&^boiTRqMy)1d|lwVRXaC78c0ah<`z*t$$fb)NlB{^&%1Pondk*A~#tIbHDZ zRQBg>y`Wc81|46r>k=aVf(X#=?IWDXX-(LPa7A(0MagPDXKedm>2dA%x)Xu%%G)?0 zL}>A5SuW1~hDZS6Tz_`mwXd&l?JnJ;NY=7Ms56R9t7B*=bgUEc>?mOCHQu7HVY(Zx zmnScGIEbil$76AW;6V?bVP}FZSt@W4^ECdOgd|lvoUx3MI^s=rxGbh)cBGW zhg2H1k%e_vO}H)B{`jYa@{XJ?P1}$!TpIq^FUSOONZr}9(iM)~YWrpHsw9K^u2Y^k z;Xri?p{C1&#<`1o^|Yg-3LGO%bEvEVl~bDQ^H-`yz| zo`p_)RF-aG;)^W*#nH;!IeRt-P5OgQAuLQtL^!%*H)DJtT{OvBq@?bM&8Yu>nYEd0 z!;@7{rIbas?YAZzY$7EE1X+Zsg6#cwD(OON8~g6@Y4qyhw27*s{Y6KwRM*SOqqf&f zmA2)!R#_9U*rEc3FHSE6*Kx3AO_)9mVCx|bR)naqX3KV#3vfMJX}G(&K|~~L9F$qj z7k^6T>s}Tk&eS-=HP)E-K%s#;KHhcgbC7Qyn#94H?ui0-j%Vl5PYCbG$z#DioooX> zVwJ_T=*{@up?3TMp8x*J=%%Fcu&6BsG*<2vR zA9-FE&N{GrKEi_jZ;N6_*l6MfsIWzBDr7w$m12^}-aF$_Q4pKk~xnqy_M z?r8p7F*s*QFXOMQWjS;!;pFUG@naZew%WLF`?bI}-ejzMKp5nY#hM}^;l8uoiu&`N zpdq87%x;Mt7#|(T6HxT#8l(2s{o2P~sL;Mw6E~7q zB}3k3QLQq0Yq*9sZCDL+eZH1U$nz(_!|@8V16DWP_kVbWTp~pmjEaz4@%r@&3jPd9 zs8Hvz_FI8cTN@h_`;x(8@r}Nfv29%l&@m|Gu<-r;8EoncMPP#SS4W3Tn?>5e!m%bc{KmnIQPZT(vANI=Oys z>+a6gNe%~Nd(3N#wMG7%&gRqPe_Q*5F{i8d9ca0sm0NVntNY?!>F9ngU%*4KyZal9 zIX}M+6QD3w9D#=#oY1PT^6_D8+(#QZZw9fE0b;6zF4haEMMcY*a~_tcvdC$gg22~= z|D$}uO)5+>jGNt-^Qiga75b37S51i;rsoMJiTmZ-k^3u!(mcbd-NpVF*LSbqY?n1T zT07(^42aN?e^jWyym<8rS>I?e_Vd1Q(v^{0y(^ooW=yS5?EECmC8MY68%llN(O(M6I# zn{+~;kZfiTr`FfFwwJiyzjN3fzti@BVK%7lgwwg_~X`>1Y<(;mLW4KK(h{44=;`!d~)vY#TStW0T__Gn3Vy zT8a-J2$htSKucf|;oiz&X_G_2=8j%{zvv5zkEE_whg*@o=|84RznIO{__X^VMmdn9 zdS$dhl68GxOD@D3b=;%AXm_wFL9%l}a&*7pwZ6%Qn_mHH{@XmA6dewMapappsHNRd0P`ePncWep9W1ZEddN(*0ah z!*I5$oF^}_3UvC@Cz|!l-n5d6k-k18pLRZDU~&;VK%Qc489Vg=ome7KzQL81Lk^CP zN_r|E>09}riqrbn!0bpB`$||Cakkc6@UEr=!ft;__?`DAio`>sFB{ol>3lJUv%WzQ zdbD!b4nzgE;$8qiZua*`%BPKQrhS-=r}I7-`WFQ=3r>m`j^g0VsUImS;uN^4J2Nbc z_XvJ)!#^AH?PlLgiQT655{rs~r`zzJp{j?=2|E2xWC8k+T8v?28C^UiHf|~}4GuOh z>VtfguNZk|+(ZHmlgDCDqeSt0StH_o=AOJHRe|E(_FOHxw>L~qULGZpwWsGE*7!sfHEVC4MtRHCS+%73 z@#*>M0UJ){^d8i6`5iEo>kQN!Jflpgq1N##^2pl3hA8M9R#AcWJD29=+inhW0F`5B zCeuBZ(}mRtqKx%7mN`*ePpjz8#(Bg3g@}Yi=g<(UR|kJHrt-@ zqS+cn&eQ68Hm`Y;pQg??&I+(!V~Kd3lgLRtgpSgt$bn*0GW3W*OpGh>v21g{bw$2+Hfcqx&#q+X-ZaT@aR(~*3Z zy{Y|uid=iDP&}uHkq!TU)Zsf7iKyD;W`ldIaOu=u3`{IdX2-2CZJ*I3RY)#E2R>_# zs50q9ls_c>-8v?m<}ej!br@Nb_RA>f&$4(nPO_?xcofw{k`9vW(8dp$xm8^us=0!) z#tgXP34^t)c4~3@&)EKmB0cj@21cr6Z}Zw=yAcbO)T$bbsXu^NU_b{*TnptT-qWV~eIB&rDZO#b7nYIE0WntgdeD{AfB}Ny|A> zhBN>!B2phUqRK+w{pa0Wf9x9qlxHKoi~eh566cW5rc(-P>2b6rc=Kyv>F+iZ>~A}^ zwA>U91t)G zVA2HSI1JbOoFR}2eHiE>`{&%2b?l7t?hkeL=!so1?_@sF68 z(JzsZeqS_u+gE*8RTVr$Xx1DFj5b6A9Ij+L!>WPE1}?_xtd@4ha=P2e*Og#gTWzYxMVm-rWO{cwmCAz~GD zyhiSxXOS;!7@=HSm1UlK9S>)*ycCkDCFT3A>F$D*N*tC#5_5Y3Uo$m1uI2;MOQ&s@ zes2^_QQNE58>|hX7a)3eJ7DZCmAWIwmYoTwd)^r;34_dXtrlf*8(8T!%hw!+F}h30 zxPx3QgMc?-5}2j)Dz|kI z%#XHW-Oy&#mouSiGh)eBG#)iMuyVL4kZXRPWbk{^w8$*odWprYKaM;2*UPQ>dZ#px zOJ|<@Yb@FE(jSI~!p6pwF7Dc~x|Xqd#d31$*cjUjdvp!Lg5{tA)PS2I0A!VarRv>! zOkI(3-ytzB4FnV z!$GE0>(?2mao=DS0u5*uB>lYk5%;ql1AWsa>oWlO(rEwwP8H3R)htg5-Bi;@+vnE$ zcvR+@EZ#0HAiK-FZhdHp^UrLrGM%fKwOtg&4^8Uo(ksm|Tt3>%4ng6-vDB!}k|^5r z_47aO;UZx$x&HCIu{^h50-;BDV$>TFg3B4fwy4s72PQ!PSl{=z-(cvLRv`~geyXiA zRr5>swR5+LSm?)}un!#gSi)xWqK_BLgK8}#fS6W^t|?zmUNzlrVsh44`UEs``T6BgTiiUI9;sIU|k}F~B;%g9m32>?a@`4mIPlVoy?} zwOHi&pB&fmL|pGiOpW=^ejFiYBuoFbj|=Kc_WGNs{E^x)zKG(p%`a~5qTfZ|ld+>n zS!&56;2qSHy{ar--rN$^;fKXboVNPo*duA)zBmk1;)5j{h@dNpWDxw>$I)8mHGVaA z$yC^A&ug|aanz>f=9j2Q+N-{au)y`M5B&`d*hIxG~vHol?axw}Q& zw|A&A84E$W3TZO_S;v*BmENda>46O8;CN1XPOQfnrummNTGes)9sE5ZxE;F4Tc>+L zpNmym6M^LVzWporZCSnV=PI7};V70J0RL3l3<>c(oO0bixI0@-u_!LA@2sz%=vvv@ zYmYqos5i!a=kp=~G%(A258HD)jnm#JIJ_3el+8kWEA2r~t8&vBxa;0uN#K={y^JB- z$-6=rrC~~8rA;n(crXC^qY)u?zfa|15#w@Xnl>s~$x>Cx>_dqfi$KvNlfgO$3;yS# zDf4eydKrRxa(R+<`HD+8rI&PWygOe$J{{AOe>y(sA{S|!Kt8pz{68-M3CX6}?TZg? z$;U&!j#kx<?{-V2n!5s)$9 z$=8w8VU5AW$Kx zCD1$aV;(MiN~!v3EGkNl9h)FPjGR$#^xcHXTcz=FJZ?L9gG#?i>iB8)K`jguQz*_- zCOzHe%1z~C%$^o+OsmZwzBv6;)EDB9D_SW7b>En^OeKRV@o;_}TMX7l$1VS^M4+Xj zcwMYKym9*$!93ovP#zyILLa$Lslpfw&%xE=lJk4K`%v1$)N5>$*jKjDeoQIe8ZU5l z6eypL47J{KOeXEyotTbbNylAf=N(*RW=M||2c$vT7KUQOm)uJhhuw?f%2Y>AD=6vL z0O3DB{#yHx^#9V*%{KF%ytn3;hSOy@xuMC^xy zood~)_vQ0x!&c`hpS*QeiHe-Syp{bT@l#D!)kLUhOhzZ_9MQTn01@gjMmEu^P46t(Y}15HmU6J8WQmiL558IXzOw;=|d<=>2!eia&!OK zA=XWXN2Bkjw=u&7EE6^?JxN+$OB9*wJV~Z3=3lk7caBXW!0*a=e0@KlRI(-8p)V_2 zwhX|jk;psU0Q5XSjKEhb+mq|(VrnUj~_dHSOtYME>PTAyH1ESVGU*wCw} zC|gRupkCX<@Jrrc&(CSR)ZZt$vWZbI%8ZUuo%ln1FK5uwH&>@>7KpA&4`dX z!*z^9Jbew-no@JO=IH#9lhfoQm?=sj+r#w1@Yd4R38+(*Dr)441W2gw9zA$idlSV! zIvj9yl{O>({W~b6B`*$#&W7&~?z^&yG1f>KLXwSUvRkptw3^NKhO|

QE0Y7l#v>e@+Yw|4+D` zxuQFXx$n=E`jNfxUx8vDXKt&1c*X^ne#TdZ!$*m~t@!AF|5A+-1c`oc{n;w@5*xYB z*~OZi(JuiG5eeB;>}#Q~*j7k#KK)I-P>2DufdHdJ7~Nl0SG{U{VPP(@@2@^lQs$}b zU9yB2X*};JQ(XKGB`un@PAQ&^i9Tk+P@bUHR^Cd(;pG-%xhKrdlNXshljmPFh~Fff zcJg~swL|LNZsIDYqrR!+c?CVZ-BJ6kV)3V;8_}XNp-v8%N8NtQfDhl>~MJ z$#kc=Jk%~Nk>!Ic-g1@Oe-SGDLI&8vx?#+gn=CFu3aLVRtjW&iCy3xVb%*Ndr|YC= zMoPw1gO37ZZui}_F>#9PT%wW^q#U0^zucjp%wr*+oSJ3q&sFH6Yd!s$|HxtNeAA2= zetcT_^pT;htjCq=)5O`PYr3?o?5nmEyy(~;ZJF7E!4xD!g@B z#4cs%WCu?r$579joimIAdsrdv{bw+t<#?70Y>EoX^bGnBD>h7A4o6$nPdvHBLbB7R zkfr;&l#67?nV9g_2)=ticr=pGjmR73*!6tuV87gf7VV{WQZZ=d2@X4fr}y6{?^tWF zYkZ&FV{vdlp`(dxkcbpsi>s9tp+Mk;E_*^=Fk{n1J-1&>S4bO|3}E0(d=wK4ea@mP z1K3Wt*Sa!0qwIA*;s%%;iWD-&bZW-mnNEQE9D~|mOw1c*r}>cE(o)+2^9&1Ilp;bk>4L5M?r*cT#-c~W3s!bk(w7f4Ce5dutr-zgt@mmFWJx(wiLiRY zezfo3#mJz8C3idCnHIYlEH-N?e1oz*R)Q7X6PlxMYbu$vssnfAd8!|JrB|cubk#Z1 zN24}%u{hSb4IRQCjnGd7FQ`d5mv6{sAd&vJKpW2bPc7o|Rduq6m}H4P!Bh>=`R#|6 zJ7@AAshCX5hm8qL9#s;5^oRUQVq#*l6pMC%%KCM_kGX%ZXa`SEUoAcv_4taMogrh` zT%7=ijyx*L!4K^~l{v$bKDSdtR<=KW>ES2T3}IP$UX&ac1YI4}f_dhjn~_tH5SoGF zf*39Rvlm?WuY=$ECM^pj2+SMn6$F^o$QyrWDeJGfoYsT!`L7mRCpu zJ;x8p(ic#T7?LIuBur?8O!wQRP1-sKut-KeRb1q5OJL#)vjx#~&$!;a%jTc@9cib^ zKpKS-^Q&Lbx_7K+p=Rr@mtJM}ZNsT` z#l~>R>rbq>><;&2KMo@7%q-ncXRCF&;__9~hl$xS&jO;Q6b+v;jW&?1oT7!eue$i< z^*akEO+KqR9(GI1^3Mm3pVw3W?mwwKN*7O0TY%IxDuLOp8?+EvYFf2=mj`SVa&m|` z9JNU=<|)S2eAk|FJy)8i-&Uv>r`0vmF1)y{R*}6O6I-cNQ z{h?Qj-F)o3`d3m}EeUsbsngTc8>tWd*k-dLn|124J_>VwsBK=`(%f*iepV(UtNI(B z+amXVgu3`U%=f4+Ec6lbPi^?hrjy)J!f75bmC(V4&^cTVJBWj7iNvVE;-K;Q_7svA zjN?+br>2`g-^PE&iCQ0;0g>N(I8CKdGLSk{7#OdQM7`RhTh`YrS-p=jXg}nm@)?G@e~M;?~EBur5MDRSS+=u#v0z%4GsLs{|u#omm^!T$XqDc?!YLO zPI{WP6qQ04Ox@hhZUQyXY|A5JF+@(VB`s397(528dcyJ0lsz3lIuaFa?0*yxj^)$b zC?w%*3?es=cfTMVE2l7;ZXh>TWnC*#N-L2KZ-`;T5ADAa_G&GAK5y&fGRIo^>)#YSu;92=gz~>C=-F`;Nu#7w9C+iS@>~G%wg2oR`&2av;dqdgA1{ zF`HYmCX^Z9(=BM+OvVm=|9ynHyHlTO= z-E*cN_hmyusx~g33~6Z(_BCyi-*}daQ|MN~JsBsMnG$WdYL3a|;>?<%bXB@P>xki1 zgqK*X*_sIaUAkrCV-5Pu2v<$-P>u)d@U( zcp(XJRXbOfhnXJiBvg-6q`+8(Fuq<~3Kx+&n&VrPHzSWIv`E;a5rtQ$WY5a}86E@OqPp+EZyM7bumI ziemmIaC>@#Ltu88-;jldc4==(_FU;I2<|TgeUz0cMuna}N6RyknGeIa z)ystVaDXiV_T50NB5r|A*4*A`q^_7vO)>{2Rz3=E8NEjfk61?o$VJI3>v)OKuDA_W zpy>jYbB!}yD~b$gM0H7N;GMfQnu*5d?Yt{YI})fgiAYN5_8vZ*(6!QK4#-SAoAM21^w zK5yP&e;8m&u01Hd@@lI|b&dVO2eNGfvu;p@WoKOKB=LBY3On8KFnAuO>ne+6*xT=j zap529V_8c>tBQ7jnLXK66V4)bSFmTWVd%;2d!h$*;PaR1o z;uR@bLQ$rajt|W9-jWal%UT++879)ZMs;7wt1vxXjPd zEUb`tW=p$DlOxHWCEnK^fQ+O_#C6gx#cR8|EkoCn#z*b5D9tbCf`)e^WFtntPe^D) zyeh1R#d~XK{&f$#T1uJ>)UCE=gb>zuIUm??MWt)M`PUjetr%_C1LOg$vI>;8yW-rS zWZT`15SGp@5KvRHdvF^YAZoc(9p2cWR6N!K$ZgHxJaXH;%D5#Z2$A@NZIkCHdMcY{ z_k!eH+`!ouP?unZ=Vg&PhXs5%qY->jawVyiLvxPI`uDhP4K8hlT2BRb|KeIS>{aGG z++9}J4!UO|1+LeFk09618>(3i4qfewX{c%re+-|r+mbR$zxO(PV>K7o8}F$yU5LWb z^2C7zt3L&V@!@HAFOtvdzkjCMNOTMTE^O?M;Dey>S_@}Z8%p8jFWvCa05gAXG zucbESynD&t-01Pgi)J9UqFQl!rFIyv4vQKEG_P6T7FRQa+ckVJUB#Slx#-AKe{Cui zLdZ=hR|eyqCX;&OejvoeRLSAV=pB#U{C$plJU5YLu?%p0f$H67$~?`X7_CL%2%ViAitCEQ%W(J@B|b(#!mBwi_1K-k zIyP%n2S90agsZ(!)$GmXNZl@C-WSTPp7RUpFx&E!*2{?LZldD*NQ8t$>{g#tRqFq9 z0a)Y*#UlJhMv>T!C<;P5`HN5yAa-Wc zsz@&hzc>(Pj};6qJ!S*6{H(&P=B8&(<5*wb3l1yxE0-x2CGxs9RGhY#7~$d%UNd&y zICtX%U~qrDcf@*^N=W2%8tKf=p%T!E_~M$`dS1li+Wsxz$jyjhGck^iw)%h#S~y9> zignk?bUGca(L`7O-c`tq3RmgqqU?9%=Z2jrViEb&GuA} zF_8T-<)|m*#5b*M2GJH>JUyd{WTqA;e^?odC{IIlD?Zi~#%Pv31bEBdyv5NiSFW?7 zjQkji&t%x6r|II3M4p=q7ChJPA9b?(6WRWL@zR<-?NaNJ1|hG#KZuZkplCyJ-P<$eSG=zIKN(npJNWqUC?1FXnK<3Rnesr!P`u)`wpG^rr$h)rRYod*93rm z!{APo;k^Eg(sy-k)|OYo1&$E|u^s<2%KOo!aN4=62(2Hf(L8^&=yS(4$1jtvom{Pz zMw5EOc%U3E1tgAB%+g3Hr>YqvK6@ytOe9@|KKl6yK5Hbl0X5uAp4c}p{8G3) zk-P>d3sFeat$9SFfFh_U#0owwlkt23mHls10woHdi!R)ibgl~s{8VOlM0mFQ+(PmcSWoQeBcs^SfC|Pk|BE8D+Oivq3<%k6)wb-K*U=A!zogD{dv_lQ9 z5C56k%a!%?RC;;`qBkQ~fa!!3_=*9!jLBx~g)#NYX*m;nc85Kg?Wok( zadJ_8_1aUyjp$ETj%|($LdkVBK&1h9GH?6EStvCkeCd)B2ay*he8k3{CN+JD>6F%0 zt-Qkr6%JD6{xVWErFyCQB#Gp|4Dc)I%j-L~P4}CwkY_bCpQOYY%JW~Ccuc6PM1sF@ zj(Y)LoNh*~__q+3_Fa5)(bGi+NCJ<-P$dQ&M|}PnW6meRC= zp`h@})2G1eCGP+JaZi5&542dm=$JU`{M`=#PV{Q<252={@PN+8SR91<;uJD(n)ni^ zYT7G4I<>CJhUR-(o11mWo~IZiU(L+$&U5;Ees{MqE}scR$tlx$Ba1;_qH?XulG@YM zH{TbJfeOH)UDs?u{w~9e9vyZ910#Z35JCkDTY)L{=daC;5V@_FS-I7Fj^@QfssC2* zD+ZM*;c7rmg(N}*)q|A6BS#$#pBGsk%Kuw2@HwMi{+~DSzmFa9-?tOB__FX5DV6_g zmBrYV3QgxKyGsFo?f(`VL=NZu5}yMyyT>XD)*)W|@}s+n{d{J2c~hkw z9!b*$Ca4Hn?97sr=ZdG_y#%R2JiRf?Vu#S)8UPMW^>ezi)@zi^_79@v&u`Q#?mvsbj!FL8I9a5;sJO%jWW_oI$YV{7JAiX*7j@=X}P$66%bbD&K1}sgRW5amk>*;Gf*8*K; z+6Zb&{(H@ibcuBgaJ8)OMa@knC<^#!h1aJdih_{O$2nRLyr~Ym4wRULX!cYJ7-N5 zINv=;akw9HUvd%+ktQ+eci@6>+25}$jKgA1XkcsRgtd1!AW8`sm+Zj}Xvt|UTj*MZ z69#j~x;-_gs&YB`zMbBj`mhU{bKc(VmWOpOuwb~qc zOP0m&Uxw`ek4ef`{ zL?#aw;zNt3lS5&#cLu{yg4#P5K<69u0uY+1lEnqZ?Y-!=%K_1TGOu+cl7rr|Q21 zWQCthJT(i+STjO6oSh*6GRzU~t?GTT)b#iox&^rP+hWz)#Ho@bX7_(zSb~lEkM>wk z=5|r-8*l(X&wo7nJlv)AYb2gAzkr~S=luyTSAoFZDnh56yeyOQ((^_tM zVq3T2s?8qEpzIo&Ud9%ENF?ahO271U%BAIT6)aJ&u*douFv9Erbe;WQ@AdRF)-O9QU6MM&ikK=z!$9-QPIg*O<7cmMK|mB!%AJ&*5(lCjdRqV#?sZ zpmXp1Kik_gV~RHE_2|g-M>r4AWZ=9pbfwblfed7S&;*YGadk}{D!)X#gv7+fO*uU# z%wPd6%O6h;9OD5r);e~s?6HZ1^wd~WwoG~|oxVN#Pg2p1jyr#|N1FZurQvz=JR_d> zUtzeP)`8|MN6^9Vb8}p2$?kkhSfa+dwnq{--k@Nc=63ycWByBn!)m#qrq0DJt#~Sf zzntM3V@wPKsBFRByTce^kHyf=IY!5)hX+?;0a4D%(H4|&Dr3&TX0*CB;|BNzxV^dO z)n!Df$oD)HFRM8hY$Z<5kgv@z5P%5Pd9J#4T>F1s0MRSRXseVIiwPis``Z8NWW-cb zQ;9~nYxs=O9odw4?)M3pm@0s9rhVj1`do3#H6&3ILOnm2Or>wJ)$UFT1=|N1s#ADz z0Z}YoY%6Om(|uNXLw1YJT2+!`r)B<2L0n~vPj43++$kiIYfDb^^&9EV4|#o`z~MyG zr5h7h`cH2H!oN{p)w&$q;@2u{1?H#A^&iaC&lxP;dw%)7KUZy7cF^J%P6DrnNxNZH^>)%yHFh z(SHQh#r}n6vv{l&Mtkn(RE2LY_$SwN3yt-{czFdaBP4;{V;6;IF%5(`R`Y*#+Aj?% z6OnPScI}#DO<7L|`m5QwIL2Bez9*l08vIWzvC?%k^W_s2MJg;bzOQh50osCBqr(@o z?FXH^ebJHfPBM+Nq>0Dlo~+sBQhG6+#}*rsDk&UrX>LAT z@9-eBQQfVhJ-%r!p2+f8Goc087=>jSg4m(gsq+BYC7U-(=tK44&kE(W z5e;yt2o+8Kt8j+D3L_R$HnJ+&i%yJ{s676tP!m5*OUd!JEzbiIMMgQ-f3!81u6RsK z{X30_N)MjJ`N?c*QiLmk>@fzrA6okd2hirDcV(@2 zz!^EtP7{*}ca*2Lg;w2CrWTo~Ph>pW*4b&$2*L}3!<6giSaVxP+vb(}f6NskBY#&< z47ExfJhf$B-*#jrYnIJ}BVXz*PuW`@PS{$W^vOXPsCfU5c~1PlO-#^O0^|MO2Z14@ z_4A3eiwi<>tpzFP$GGBsGfg+=*8`rnmkyeb^t4pJk)z1o4^&7%mce-sz{IDpNWv=< zNzUDx;T7{{FJK8AnwVJ>LqiP=-OYDsGwQkzJwwkDEdN{30|0F?B>_X&Dn$not!OE5 zc(p7cpal~Gl%B+h3`p)^x9`Bzg9p!N&CaoQZ%}Q1u4)3^fW!Kh29%!GbEo6~*jZns zN*BCjjWJz5l+^U_h_wORJWICl`0^C~_a-xtB-ZkWSC95W)l{~wZzxFeob|@>%TT9e zFiVnzby$-{#l^HoygTnJbWkh0JAh&YIvqTkT}R6t(LwzSkd?rT zhjnWdVBqESLDRiflA#X*OK=EedA%>ltF3Q*YI;SYgd_rpYPtJd?zhFK4Z<~zQrX1iBfPe`4 zIbZ9kAQTfF06PE%AueAhe%d+C?!}<{wK1sh!zgp517D=UU#~_=D!ROk3~4;7wCH3* z+kK#-Bwt0YgmPdz2HlGhJpzPte^u>x?Rh!O*-}`9+BPEb@1uRDkXi=qA!~8nfY%CK zkN-f#N{BfFE|c4g7o&H(f8+DN*ao#(va35%2_qxq?ppUPDEyA06%*keS{jX7r>!*MF#d+*}9)_l!?63 z78<1+4N$>BhlDRqw{&OMNnm~j2OEOZY*IMg<6N&umNzy;Bl%zKOgB=%z>IW!eQ36O z!5fkcX`7AxuF|TF<$8I5m@zM`k2hMz#>2DN^b#19gA`1as|~({6My-rkWZQ)PXXq^ z=c;Xm(N`tAzx}O7Ry#&nBY+7zphoQGO;OS?ZPdz7C`7hCsx`${HyMwSZ zg=>eOrVU77+gsl{))Gz>%J_krcBX`Ard9jBudjc(`?SaX*?N}b(C^Krk|g)RbB@-t zfuF6EYQNbHhc#CFtCP>4Doo=p=H{R)J1adwpOb%u1$I;9%-%F|kfgPFIgl`(QEc(r02jW+ZMQ{U$d zXfo4kgX)VcKjc5!dy+j8i9iY47~Q)_X?5~Tm|=$zodNYLg? zVdV@IDqEqvB3>2{Tq#%6bIW8?*DzZ5)g0VOqUvtkFHUc5A2x~5NO%Pgc4NzHD+P*% zX2*10cBc!60c*hJu%JTSooJf`LnGKMtgmM%Rawk`shWqXnm_!zOcORYmzW;m)n7fY zH=C)52A|ob(HGAK@e&Rv9zP7G z^Y!&jI$LOV4F{)3mD^rv=1CAs`MB6KPkeS6bP6WAq#$6cG#%lId-(sTdJCwmwssAa zmIeW71SF)pTafPV?v_qT0V$D|mhSHEE@|m5>2A1_|G($la~)$3hhxLH*0^R+;(I}#uGruCIt}1BN^D=I!;XlgqpM4$QsLK_UUB*b?8GQSflV$27#eTI-@Gx} z-1u8(yGwuo)78~&IrWT$BpdiqKEVB){5#NWR-e{;0Q>CY+5;ST7~RtcHsYt5w@=y4FxrZ+&se1Q6%r^{$c=|Crnl z8T<a_`xZUg(7^aPF1;N zG7Dd&TM|O^yHLHw>5OV``1Xhsu z=K?1!f1GQvKj7vn4XzvKb9tU4ySmn44IId?zE5MhWoC+At~3j&qfgmL=#B?C?U&0Q zOCW5qoUb93#W(eS(*{o)ALg)%W;$PlLdfltTu@-a$G^NkTS??%#do27!E+Q(qZlW( zJzV8;feJBcLd-qsf=e$3WA`%jnr9gFCB+qCySJYXm?S{*1h@q%B(a_XX z;dB58n$7fVFeN1=xVNgrQVCRJJEe=j%=R5bt(BVjb81r3wIBmSdK8Unw*iwvr7(9J zfk51}3K1YdvwJ-XI=B;;l`#M~nNpB!Ovi!n?x4!njh6$ebKH`A1@YKKIL|rqi#7VS5O=TNeh^Vhzuqa^u8EIIIps z@(?nFFVOz+G+!RBb??#Mi|Od_fqzx(Mcrv8;J^#8jSFs&OAeju1UpMYX>#~*5*gQw zCz$?>{uEq8_H$I6B=A#$Z0JCqr)kn+lrWX%fS_^rOa4HWjEoK857g@ykMfHbdEP#`N(e+tSU>$KVEG(!;(WJ71K z{X}=NXh|3q%p6bThXC^pA_fLJ#7D%$1PdH1Ky*$Ywu{UDB{yg&L;dz{tXgVwN>t7t z|FX7b=***=cUIxTL!?ZJPox)5uZsv~E^woY#u~pT#K#UUE|w0iDi4c~fA?-E-t?uj zTc^!2{`-Igc;K5`Ti?pd(I~wrs4Nd8mt?#O zd(mHiu`B^k1B}$?yOx_H1kD5lv?vG?`Qs(LG4*1tUOHrmhBRz6f`N)y-Q4r)llqgT zZI5)f7WLe!vu3asu6K(kU61$nCRwS{IK6a=1b^j?K~%L;YI8)9OXa$%%525XW7ci+ zo*}gT?arJnQf?%Ql&wJ3;Jr`4pw?8g^z5CZ!`uZhwTij6m zk#HD+f(~*ZmNz@Zihw}2?m(*C!ro%=;jQt+IdAW+J?3zy zkRN;xdL+Jme<#JGhV!A#dBpJ6U!NEt!LKmBU2)t955h2%XYd%k6mPb6^Ee@ z2#PRz!z+P_>5uoEh%lgfPR}I{yiA6(p0n)%rxft}z||;O9q*o2G1rj<1cFj9Gr#Q+ zMrk)WGXl8>(ciwo7_T_5!<{TQkJlb>LRj2bTK7fVk7cdoY?%zH7X45wHa)mbb3Xn{ zDzcbAp2}XI^8U@6H&C>yOyX7p``f)G`>|&?!+~J=<>Om|%qE5r7dJ$s{og?a^TM%G zC}eOkdt?^kw(l?;9jHhaY~GyCoZXgf8#4ASD2UbNo-R)=!&JcB)^_yo1lgBCa%aW1 z7RZPUuHFyy;B{Q?&yWq*6{!|RM2d-kNnAWzVhmsih^?XR;cW3JNcTtbfe(kPDN4DH z6l!I9NJ6OU>PR!s8@*vkQK~dxUGOz|PJdpQEmP$eY30C2iKj_0fAJSdZT}Me@ss@7 z*_dUt`SIf_>yh%hFzkGTD=dGrTjpv7rNKU|DW;J5^$suw+(8qNkjTUE-vpjoqH$xGOWE&fs)0^S$ zew2f1ha*J%>0!S2=is|qtk$8daJQb0?Ta2H0D}!H1n_n>>$W#HM;|?^sq{+V>+0&T zXtlmoW{aP0{!-lGL&ABAfATjE>=J5D}E< z@icMcHOH4)xQnhWO-`MjF4h+}8SRSgdPLq|zXEkV*aa1uR1osy@W<7^dbT>I{m)G5 ze8Hq-g#_N0dgbEfbH+@#-|qa@0?7tnDtS*&a%pc3mtY_`O11CQ+|FS^Ssk@Y?69AE zs>%O6vL6GJhf}+cYkaGw35SPgBEbta?+wF(Yce1A-KI!Aq4A08TTIa7>OU^n|6J^m zm3fp_Ccy9V0u2<0!kJ9=sED?Zohgp%_x$JE=P;6B<0J35j1rES{PC&F{c)um^?zp* zRObtH6&;y%?Lc>M=H+$Y!W@prUnUGPGT%mt_bv;cJY|5@QcL80B( z(m>3O(K}rzVQ46^zW#S4JyN#XtcTBCNLUyO_^kkWvDUA*@GvLgZvRwHMbOdL#KU)F zA-_0ctepJ!w!x=$hIAS*{l2{zN8abims?-Sf{pThKsoS(0GJ36JrtXo=;yXa_rZR2b@46KvYKP0=mpXcH3?J^lT$si_g* zi~GRBoWPQz|IdJh2em@DbR%> zJDY{!Vg6^y0!K!AW2Gzgg}>+mZVZ{FTBb?rNy$>tZ^Z{?yt0 z`TzfGz<>cU=(XktJ2S`i_2C2%oFf|P#m;Af92*&XD=M5x=rY0qu5!^Ux>ZF9Ll@>hJ&oG~+@olMu>Ic$K=gCP`E`1Ow^Cir%4RpxXgMWOSwCJ?(qCzO>vAA= z`1H`z2SViIOxD$fxCpRXP`(NMpIvo@lu{oGKN8N%%L|+}6xGYef*PG9o!!Q(QZHtT zVVn;omS&23dU}5T`XymyMFX&bz)pzQ2g140iU<&$UM#BmD2s=;Rdvvs01<2^OqAmDSbVGc)h%znBnUun=Ky zYB?L?ya}~~$RvZ^V^9zzBz6cEoXI|Z{0PPk7=j*&y8o?uUn`{mw=iqhwyi{%@kv|D<3#-47 zkrC9wZkNS?_L-BT+(C9C(@>2G-U$WljHQ zH$5SYRp`i}HjrYqMET}(Lo~?ksPJXIYlQZv>A8Hy*i!{lRtPF=$LuV}QJmsmk}eFy zs$t%@@Hu&V3VS6LiW-_Z1ne-#7FZm6TPhgHXA)ErY(23w`3Bh);vWf!j7=K+U4BbX zWLEh!>oRC((o{M)j68V2xmeA(7rV66Kcjs0#*s;9M&s4izQ5dq@7ONpJpJO<-Oc|7 zVj&KzOs`oo6Y8#_a-v01yV^(7PtV@KAj8gn=|r7^??Vj0B%HPvWItqOf#H&CkqK3E zpYxWxm2X9wxJ;+H^L~`K@nMY=_@zx!gq>Rs=W+cN#OzAr`kEf8?RL(_h2`6>V5zo@Jh$%FKKez4FxHwiywjzRf^I=)S8Cn&RIY6u+(DM3ms_~q&OKU ziF^|Tms@E`4S!x))VN|f(8sTbklotH2$r`8y?k$MXUvoA=3y(*#`N3U)1Vyv)t|$> z8uKx6JmlXF!CL~H@te(jL*wjB@IwvTUnuxk!T|Pkc7zst>o7fcqC@p%djW;=ku4EJ z6ltQ^=}lHvwq~sl?I(E=+&dRv;A68CG(7U_f!D=KCDSsy>5$(&PEk;9GoQHqV@UBZ z?@`8oSD8-V=u2V$OMECw%z%RHBkq$!nNVh91*_Vm+4a`1ylRSyFtQmx=!U^q1e}wh zA;fs8s}XU3bY%FdwNfM)XKJ2Vv6r#j9L&Eohjg$|2D9+DL!06>3$qks=#i~mq2Yu5 zzcBdFBfBVLSuiAn$D(BMuPc?Sp7@J^oFVZ_XPDa<9y4WlKbXEfk$F2c7>ZI+tCU*VAFqWs2jJ3H=Q ze8%tVkC*a=1bjJ8{)&Ve1?}I9YC}jRQuTG%`xwl%iPYH6U&cSYK__-hKh-Aqj>a9; zYEFH~FTI}d5<6EU{omuv+N#9;^5cjMd=>%cPD?maZ`(7zi;hm7OG5^iziJ}GQVKhg zh!BJy_dh?WT$Tq~uog!?7Y&Kekwc>XHNTOHgVy^UN3zqpJH9l$21%av1}pFFcCOo$o{mV;#< z0Y+MHx7rB+Nm&BUEC60;Xlq0NjEU)bT*U^Sn`-HBA!`6P>5zK2zj`Ch#~<0;2#!}H z>8u$<%{S-(76lFV?V$w2Q$L^EdwjQQ!RMTJoc7CRTWA*36JLo#C-0z-#-K)ovo zSiXJy^aiw2F*P#jz-wd@B*DPIxU;^~V$g5)S4$CI@#FKJD5F@>ONTh*b&r!>bW<)@ zW1qEK0EdHIaFhd(QO{sC&fg{{#^~q?Eb6!nfn{DJbMqSWfBbvA_?!jZXUgX8r>6)& zJj!ON5#pFDeR_}~-RN+D%E7@=V?7f$^lliE$@aP&z~7$EcM;>wH*X9kCT9q_yi;9E zQXtugawx`gXZ8U z@68tn$1J%>nie{#a3Ht>Z7i~WdCa$XK|lgdpUjaXGibhU=!tg!!z}W^>E8$}gKq1O z(nu7Fuy=Xd{dIpAO^;tbkkb*yO9j=&*NQfYmFBrz^jT!Y!Vz;hrV=%yosv`Gy4?`6 z*(s#ptI}vb+9rS1$$<4O70RF6Ok7L^5NMWjXqM)WBL@X0Rp?=Dc&78+t2ZQ6T(}vj z={k+BvFW-zzsA9-STWCp`A0g}lNOZ!V)L8R+e?{?n^Wxr$ch^((aMB_KU{bJUv2)P zCbIy}9hVDG%&NtXK4XMoDDNY5H8k?J)5`Vpg}}H@Vtvyn@fIona9k!k>{UsA zCMPGml50Pjhf*Ki@iEc_az}wcm7ot`7j{0LO#uVg5L8O}`ER0#1=nwa!hxHqAsAcc zTE{oA;lJ2icw)#ED};RPmfN~tVb;5IRi6xd9Y48bmNbwK`}(`>`tRTKdG{2t9{>pj z6v13s$IwvN+wA@-%L@*2$*%tX6cF~uVZS_9y6E<|#V1}g#cB6R%tQqXpop(n4_@^y z#w8mtfot;Zj*>~eo4 zy5bc6S+6Z2ldL}$oV3d_>bI|#ALz7O{poqH|Ky2cIz33Mm1S#g-g_@(A%Mn>X{SCd zXp`|q5=>|6m0FdoRK)9J(`DN1V^~Z+eJxd?*%?nAn9HVAC^u96H(L}ODK`4|t;j28 zP8RGV3T6+)NNT+wLH+TcJP+g4G{X7T7bfG6VyLz6^AYa%|WVtM@N#kmyB{Bc%~KE7a|nb@ja>0cM6 z8qJ}vTj$d$N9 zn&JUvmqz2Ey|;JE{&XX+`FMt}Wiw`9)b^%WlC@CneGNaS{fcm|l47Bf=*~#;b6EEM zt|NfpwCe1nqq~A`ttSH1X&w$vkZwK3_`qjzIet0e{kMLQJ+*(^Ajygm8#7;Ji1I^D zj<|$TyY9pcm?DGjt3r+fBmlM%5D+$pY6*J!dM^h1uPi4E(z#zc$@(>K=N!dQi6#8> z-#dAF3-e*-Kg*anBX`4H_A$zrp)nKsuf3qgF0^+-z+n}X%vjhQNmA0+YFGT zo~(}7lq_%dW9?0c4lDb9DD?IKbnVL8+RgdISUrA$aDF-+LAk*A$*xPaW64UxUj;tv&@hNP9#Kqn3hcM znaRb=Tj{kZmYSOS>tzuNz&X7Wn;%)|F);~-$1i+f*Ag?#6+ojAtP-Dr_2_H#=%Jwd zFZ`LJ3fq1L2%p<$ZNLGRQ&C|X)vhO_q)Z^-w2v0l*8{2mE9 zQ(84vv&YZx`8`Lv#iP#TZj+>v?0y&lLAD$jK=kPWKRig-tKmuRLz| z5MukirH|QC9>J`dbQ^#NhOMJGM_N!~F1OFE&1(l~n101`_Kq8u9&lGjKeDr-MUimU zdiI_8Jn(HMd>UCOESC?BA%Mpix7Rw$I{_NmY)`Qah$=c(%RauYF0z5`OK zy3t4L7@W`$DEECI97&k5?eZfc%xt_(#CxTT0(9$`<_Iux`ioGs>0zLbxpnrZTFJml zo@3xsw%Rfp7!p-g(hRDyZ^5D!pb09M(fmEF6h##A-Ea5k0F|QCXpa&tTSQg z(jwYSO0np(V$A}UTIs40Hhj)MXXoQT!S}Vr(5H$?7!ib=@$3Zkrqt-CU+TRL_`NT` ztzy%Y>H84}A#H)VkOJ{H-_q+>MRmPsf@w@lEd2QDJGvl9${6G0i`z@&$D)Wa zwX^_V%F0Ths$YN{JSjo(;^j5w9V_4MH`*=ZH#3Cjh$P@_d|4y)@o-F;0x6G!qbc=( zAd$}Bq}XbuYGZ55<#>rJIQmOeM~5&rU(R^v(lUbh26}LCyl*kn7zb1}1uB(L-NDE# z`CgtTM>t!9u_O>YE|00@_t!q94o}KSPZv7i$hkk+I2Cd{q%EGSbmkE<{)_MGBR1WA zIHdm76Uh_T*J~*7+VhMWS)30~3xmG!16;aSTCK`;Q>}%MpGv(>CE<#kl9JhUhw-zR zD6r#TBt=GtUm>F$g_wei-q*-f_&tyq6)rfQ`G>RntihRj>O8rCO6vNkJ01b^dzm68 zz}^6$XKlTCvDW^Z+Q6QEqBbmBH2IV3dh=EEqbE~Bu&Ax20^I}YfeTy`alP24S-!Wa zf5*`KlYYqC=B;hdn{oi&`C7imgPui1(Kb$GVt0mj>TWZ)+C!i1ee{p-;!!^Go3qRZ ztri^tw*XyjPRHXe3ZI8fd4Qk3!PO5-ayVK*0gnqX4|JM+0vj)U&4=rV8XEC%WB?Ek zMdFrhegz0SXEPm{hs(Zx@1(IEF4yeB-q8nWmF_GJlBII{pj%thKI=6FShAX>EPAv( znE`BFe+2>@Ylh$Jf@Mpze9me6UoFm$$}gD#-b)n%H1T0O8GMVOQ4tYCY%k!QbK8q2#MQwx37{hdNvln`ox+2;1mBVU@e?Ra6iU8)y7;@Xfk~ypL<~mu=bVR> zH)^*7S*NcZ zevs{=D+I_>G_98WJEq%Af3DN$_>Rd6?ecgg5X8uBo}7q>j4w=FPt8K;G0G+k- z9TX;CnOJUsR)srzg+xHrfLJzG(0{KMS_iy6ne=;12;JtJtk3(7$nSZUvSbeGG8D6% zxy63mL;4}a<|$-3H_8ZpeL(v_Nl7_;K_Qj6Y1QO5R%gq*H=X;&U)5Tn{1eDk%#d(* zZ@7Cre^)%Ry|KB0$7BJILSTLbO}=Nz)gjBP}P7*5q<*K#B}eF=4ScyU*Ifx2#!br5pU`4`ocPmQON&#?Lhc zz@(Ma^KExq_3#%m@V8_rR4R|Ivj$IT{#B=%MYl<^P0FjfD%IW#pNi|nO z%EJ@(?c3*?1rui;BF|QVVYicZydW4sHiynB#ojpG3F0Q+7n+qOH2=g<+z3UuI6j%y zNK(-#hj`L#q0n04$cTtI!|iBEUdB)5a~GrECv5)Kz=Guiv^~oh82G&7uI!Gc3^sq% z0Xs#I?Fg6`Vt6>Z-65z(U0ar^{BHiXysnjC2PT)yGQE($eQqwVtgJLVi!qf+yw8YEu} zc{*-=8Le6Y^&>bfoHa_egok5R(P0u{hHGniczG?36b)d+UI+pb!H1x8DQw{qmzV>EC+DA>t5&L<@u4k=X1t@6m?0Mt4-G{Vy`{;||Q6$-OuUX)DD zo>%yvKFQOwA?XDx71sPopE|h0`$jQtJ(ru4WAY0tdHMnp=#CJO(9CE4u@iE8#39s!e>8RQ}t1A{2aiX|vJ-+qxpO+*G?lr37fL82s&r;-Iww0h)K)Al@^AfH%1> z5+AC<^$AYwg$b2O59~J`^B23^UeA1F?F4cl(&M$es~x5HEw9y_Lh=KclLQ>L>s`Il zCAzI20;yHzj`=+vZxIZp|7h3T+Jn~C-g5C9(3czod>H?3@gEhwtC08{B`*t`-bBYM z&GIVF$jiKL1CxcNN`+gS!7g{359KbjFPEMNrW-dj~P}gWAgqJO3V~;Wy<>GOqA4uz4G;rQgW>k6i9*=o~I@I=NO;*Qt;38(JRD`Qex@wMg>{N^%kE3~rS!UlRMQ(ESBd0%!i z6GFAKxo@ao)993k`jg*A>fTa~hu1EzezaE=A>oz4?Y6ccCpg4H1#nuWG8b+((f}h4 zoJk@IL45^a8KkDAffgakgR5Ppb@!m z`mjGhA2`}HX~uf4`s=RUYx#PMpbR`~%&sG-Q@L8A>_gB7tCm0RyOKHcq zrfh8O3F+vxh-wKNVW`_L3Fl9(1E*>k8?lpIkch?QE95nBxUskASMDa4z?d4le(^9K z3z>~h!NQY0%_JZoAd-xLW4_t-a%YW$i5WsJiM;?<-jupO|3UzC773)w!`*372Xh}t zNuvRgrgDf2rsv7f?V>BPgmOvW4d~6YoA&;p|I&p6Me#Gkvgv|h0 zQYj;pXl-(VXz;kezByjz+G zKc8)2QT@#L^M}djVcBB!<>|Zqe7oBlZI06Zncn!{zw<{Pwr`M+-t#!d+3E6Ln3mgV zUXSJ&s^>ZHtK&~y>^bv*maIm-8AY)=9Yx0~8kmoV^G+iRfyL_Ouo20`{iwuiz~fe> z*lbDi!E4_U;`&%n$if0wDmNg0_<85B&^Y2fI~0=@>Qbv0qsZd(^K*%Ibxfn+IFmK% z@rv>O_-_^JY=n6bXSHgPTHYN?vY8jGUsl4t0`ce_xtk$?QYj%XJ9BnXj@0P7iJdEr zo%^M;gp=i!ekEF{lBGgZS*K>4Jz;RN=GUB$)7jm=E|plOi4$jvIC_VdC>i_xM;N() zfdAGO`D?r8;A#gw=!N2ZiwV(*SPUO_qqEYpNDg&lNraxii053F%-kpjg)IfM%U@eE zZnV^F4kCN!Vy$Vc+12yU`Sa0601}e7*S; zd@pmmeJxt3@a1t@GyKvxe=KHrZjmq3hvU3kszgU9V($;T&0Q)z5apnCu5_EzO2Z4BG_b^hPNJ>o1r&#HPPq}g5B`2%b zb&yjSdaCw?>5ApVV;5(V6HIjq)Tg6&a?_~yat%k5%spn)!b$n`qT-c0FR;1b0!gK3 zc5y0Qn$Nx}lvl@3uKUoFwNCCsbS4D~h%$_m3AUwkG%9f%Y;S8i5?>YXYD~CW#LcXvRB5LME(g zxp;UctAftl_R^-p^JF8Q_2n|SV>3$!WBV>tS64tV1k3QVj{FqMaK?3+H>Q{mXKD-N z^XnWh55WsMk`EMV_-Q5&`en}FJU4e_XUnw3ZFKlHeQwjYrLkEZ9yK6_5~#b&FLo;| z)=#i#RDOa6Zop*g!-s6v*%|stPwNkB__U=V0{w;oob;X>Fa?eFey=9t;v;;3n(cb*-f~SGC#h+D|hL@>5 zkhk}1DYuJ$ipS=-!=5Y4nHIFuLgPev0w(l7F6J9nH+-y0QgRM2t+V)T9a;&!}3R*~$m`%yr)mkFd_PHCh9 zZslu|0r!PZu1NkdV8HxZtUtlPB#<7&=q=R9Ol=$iP;!A%_4~VvZBSIERse3EjZJ}i zJ>VL+0z2*+EqWIM4vG+M)UL@^ch<#7yFU%wsHmttIkF~z{T?u-y%KMKHQ4X{&`oYO z_7Dg#{I~Dl4lc+p7U;Gj0+^&w2k(_3yM@<&Y8g&x!Ddx=_*cZSDj^T$plznD1(hZ~ zrupVQ1{PsJePX_c`tib*HrL<9S1|wle%J7DE2+lrni<>+A&+MaNbjEd7b^zAR-haR zfE`w|a~Ul*%kOjq8nr&wd3hL=fQzlrEcWhMuklBm3Q?nXD>o3Q0s6VgfI9!Gg-k}R zW#2bTC*1R8u7A@70r?>W)P+0@x{ZAknC5%)cdXTo!rh*=x5j6VfXIJ39ve-hi?zz;a3+*3d-l!PzZ+h;A0_{cF`h|L(i|e-V z&>a?w_Dr}6G#UKHrg&H}-_$-j34VZ`yzH#u;iqt#SNN+fViX2RU=8cmU31v_J|x~_ zS63`IPJt>1nN%1B4*FjGlJpDkdv}`lYjTM=0r6&DP|&paqj0V#ndVXE&CBu7(1nD! z5&+i$0)g#+GuE#uI-4KmFo-icZnFCTaar+^#bM{3w88m~Wh##VHYisr;kR#lMElFz z)u^4b&5sf>J%B|un1ro0Yodlj07eqaHfx;M897?7hnr8-q^B~N##TF^%caQE|yLlBZ3@A z8&0gKP)MND9b2pJMnFK>Hf?C;cRD}VwrtTGfB4RPHyJwAl_)P`Lzijzb{_vA9R74T zt@E;repJ)JM8!F{fN7uEa&X>)4SOH(l#!2C2^i(k#3GUgx_XE4c+ahEY}k@i$m zI+O<3Z?o%3uHNzj-Vn;(9Dy4rr&@_t_e}AegOgjf?&2(Pe(P=Vs=1B28%cZNeS=24 z4v3qtvIao=Yx{SR3si`gg_ub!ws3~)>$eZv>vN6O787~=gM(UXWm?v6uPgLV(gB(k z+xmd^ld~hkHKQ&Iu=84-Bg&=Hbp#@w%}AuxOU?UQ1S{&Rvp=Qb*VtI8gsM_uMc@nk z>>Zx1u(^T*hsgngwf?B=wlU}3+pkl<!f%4v~=Puu#1i|Lp9H z&qMAj3xAq`btG`Ict9nh`0xQyKO+^4h489eJ-)X-cx@}(Nlczxw6k`=+ZG9iAEk44 z7Eb>p6`A0a!c+>Ib=P*Th#Ej$ugz0nIJ$P65D3HxwHk57rKNZa?Kq&fwJE8<1c(>7 z=c@8RZIo*H07OcASDs_3%>zknZ-V#tUrce9mK_xU;tSyBAlXINYSVr_s7EJOM-5-T zC4Kr5B%!l}1Md19xP`wXwJeBVThJq8S83;H-I>)z?A8yq;x zNF^;MZSKj1VzBm*StP09Vw5A1MparP`lOTPMmL&NpZH2ZrGLikOc-k-;) z-F;hmws)4i=KG6#E44_b!*OO)JEe!KzTdwOz#ju@Y|CA{!`dIi1V+t~3FH3#<$6K5 zK>sD+o!cVf<#>00zS%QqKTS++t1OZcRG#gG!%%H8IK*iMZLkwpxg)_#>F7xigasr} z9+^UG6J*)M-*&U_oS(AzpDo9Hd3{$BEUSGC@BUpmKerqa+^CbFw{Yj}=6kG{sgY4* zi7E3)e#rCrD#ydEywhy|r^l>p^4Q^?ZI<8bi&LKrvd=RpMQxDbN=ix<3stD%hv-&o zU$EZ;q$j&kS2btPB*5shgg>QYp&B7+CrUP9&@j=V#S?mDfnYQ%+X1F`@61(6i76jn zz4?&#Z{lFKnJS~a4>x-MZ`F13yaAYo!rPiue*GE8F0HHahB44kHps0K&i88M|3)sk zXUfl`8KO6v`=p;ynj$YImOGRl=GrdMx;K}fD7sKuoFF$kmdX{ExnzE{aH`onve^gi z?&*2jeY4tDanw|6t;8vt`==+jIw^#=rmoJct6@TO_R5=>l^{{6aDb_9WFTYiH80f6 zV`XeCfwB2(`u1J&3{;^e{htq|vf-3KVbk;Bs8~;zu5J8jkfkI;IMd*+iyPY#kQJ~k z10Sv`xK<1F=<#QYO`~Va6LW2=0f%^wQIF5#?DEUzfqyynm~^JXpDareEw<4GwB8G2 zn7vcWi^SQZJp=ihA4hu0VE+UR2H8zGJXp9@mqwoc*&p8$VgKF-T$f(MA)1%#W&(bHlcEqLe^NH8lVoIl|pdhHlXK4{`$-`gwW zLYVwR5=&raT&nR@7Wk7lRDgwnPYcUp82}Na7)$cBQ>^qUnb`lZ_U1#Ky>I0mZpTjFw1HLZtv6@ z`sUcnEiubCEmZ?B|W>G?jr!Bos%^KT`%lNyF_V7)qP)K9Y+rfmZpI zA~eVg+0bv_HhMFVQS+-5DQzcjC^;4ZnYKH7l511llr7$KHNWpmQW11E_4KVgDY83p zNp0l+Z~??`c{~&c$Qb8@?Jnjg%fHL|?1*gIB0)pM(WnOZewP&KkfM&Bk_=K`=-W%d4RFk7l; zct)+=w})da4u%&^d}Me%Xr3}3_e(A{IK+ZNdMvHI0@pe;qLO zH0Gi(KVFP{42RZgedN#BY;X>F zwv?@n3>~-#kkOqDm*Wvpw$Zqq5cDT9>UR*#T@?Xt(L&w*x8J`Fuo?8yJL`khQn~^} zr4bNOGsW|-@wzYs#`8-BNJEN|ke;>DNrxhYNNy{B!9V~SGc7Icv=s`d=DE1Jjg?z) zcXytJRfJ~8NgsZbgx&1PomXb}^Z^EMoC&J5MVQ=${ROSTPs5QNi}b!dJhhV~g}RoE zw9#`vsQ%O`1gU2M(d@Lb}v%4V(eErp4yh%`-H^hA?2J9n$P2R1utHsVNM znXoag7wRVkUMRM%eYvV$QO}z^e zhCFADWR`?Vlk4mLBsyF*wl`JxQW}S0J@&c!#FLLh4Kc=rMPQ6ZfPU8~+We^z_c+A5 zva(L60}QZf%j>9l!$sHZMnI;^vfS+ZD~KakW0#f;`rZk=ZpV(h$NZjP+JTL;5g^>O z>ZNAeMKgH+2-$uM3fObg$aq@Klyn01-Jb&#GtAcvKR_cG@i7Ok7NG8KPoy&)tlHi= zX=@XxvA-$g4wJR9p_5G!yby$9x_@}fR-m7FeY9{sw#hxK5SfE-AH=Izu1uz`ve@jGbxpU{|@x^R*_c9Msg(^%Y zYY>3E|4QU5QH%tgtn;Y|&5>08H$lRmtcn~qRLkvbL3%cPiHbiE7@`wm2rt8lg@l6s zqxaxvQIlinHB%i4CkdTQZ?PAolfba;A#@CkA>gpq=ySj!8jjU9+#R|0lZipURnA0( zO;)fhqm;;(AToF?qu%u2hzFf^;}75#)*b>R2myyi7>GIg@}IQ0I$IDKUicEBR#E#e zym#<>{_R;vT^|g`J8XuQ>Anf5W;+V7vtECmGSY)D9bSko zN}PA$PpWX|AC+!Tu3K*2Dn*2WP?@Dk=5oskZlgw&gvGPrr9iGi6?7q0GNG4}6fG+0 zog2z%JKj{37@`~6t5oJO$bCMsctfYfMBFN8?(B^5FbRh@_0c(Y9`F5 z?+E_3*`LJw1_?1!q-F|^Z+^@R9f|zS)fS!hKnmqwx>$FyHK@FSxdD>o$n~GSogZ7E zDWqGH7^4#v^X2G+QSf@IwrNAZN8~7U27uVhD9{4ef`^%my-Zau+}iz+1ZrroxUuFy zA`|hU`?m9y=(a}#F*6W|%k?6@+nZ=TutpNV=db}@EsO@c$IHlsf5so{4_C|4c+%3r zQ2RZ#Po;uPJ0`OGD<>=pbgTQUb@S^xKSmM&*|6gEA#V?G1>pACU@K!7gSp+!)GsgF8hqNQ z__j~2)j%>+4ClD^M-kv#k^nf+*OLGnE~m`6QfzkT%F!jlS{3)vS0yQe@-^OV(e{1X?(o7$}OTAn&8kzu`tIX0W zw0Y*D86?SZs$EYY?=(=`6hw2Fm(Ro`i@lPtG+VBB<*SJT;=;{34qFWwY@z|_R4H=jH{#r#Yd(3XZfy;&ZH zfVD%05LFf6W`2*xsXm()wksBafRl!wEt*#W479D?7lG$|WY|?c#=%e0@{#((!5Mi7DLEXQaBZH@%NW zmL_%vCowFJmo3_76F>2-KBEyOA5*WvQT4$^ify8& zlia$U4Tve)*&P?CSjlL9wylccJ{AqSnDK4Ziz+^t+EgP2@Abu`MTbxGrBgnt4uw%3 zED(h_8)K}!x{8HD*-FtrtxUHo-P!h)nA9Zmax&8YQ7lzRu;5>P9m)y$fa^>HsD`2g zGlJh~;%O(&;uAG09ocusbbp_>OEmg`U@)CV&&W^E59Y&%ugv!-=;k>W%1RA&mKr!C zugkU1LJ%{Q^1`+*>F*eBZ?^Z0vk+d4=#+lP3{Cg0ZTyxeiGX?$crQ61WELStW~pg} zcM%vKjuiYrwGM=KpX9#(^#2$rS4_&lfX#^mO{3XGLrV=?{uxixl?yvftTczHM#CdF z4=p~dDaI(~%gnAGV&PAqVV;b*y}dOW6KT@!obn$u`M|*N-p#GvJD<@zSyD!Z zoShv2XTEIsq~3Js@acqg%n{_0QtE~MRsdPmvgL^ zFzoK-ZfFn-UBg4PAfx&1o8j6%MLQ{U)MKG36YlBh>4002P?}r__`?MEs?rlgFD8Gx zb|P=%4W%}XOOIh`h7hjGjNGuNvhQm}LD)gJ-5BwXz48nJ=JvrjKIg$r>~#9~+yMeI z-DNoo90WM1V&G~>;6j6s!u(j>0GdUj#Z&A5vT`UcudYOv^H9Xw)5MbN-GFmGk5B43 zzt>N6&(NN4FiKOCOer$IM$MWbjs)rHGfB+f564PpPps|i1n1{}WME*}OU*&8*`9EA z07Q=|Ois>vo4e>&b>Zo_P>czkTIlZ%q2t~@YHDLu8WSuOzY$-S-*WW^tK|`T`I~w+ zgrEHVHA*mI$l=KUqXIMOHN*O2!M1nl3B+G#mzN!Lb7)_mJPFgE`9p({b{pE|HE`p- zoIhS2;$duTzKR!YIDl_6Fi{<-q60$!<9#tktX3&D?eps`5O>;CX3berYvnLSyPa$t z90h$U&iY;tl3SS}K$IQ4|##Q-PbBJPNF>DDg7V z*WZ@M$Rs`8ny_TIV7JOLHmcAnb@yyAt~HU_p}>3a>$+~|nffc7JrnaQEF4pK{Lb!pcZFm13RW^ko%K4S+W?Y0v}O*gUYt*kUOHfGHc z{IRP2K)b{dyHp)Rx6eoMhLzCxj- zoFq_C<1xz1dwY8`C)0P~zTPv`CxHst*;*}n@DN*9{D6-QxXf)l)u+8U>LrVb5B*QI zeQfD?&j?Mc2^csy(jdE!GIpYj`lt4--%)jmZ$SQ==oWBx1v8jXGA5dwwxHJi?iS=- zOGwI5j#_kCuo%{A{ESx21Yg|f&w&H;p2N+hSh4DR)h_h^T)#8WkQLEn*22k6S-1(L zs;CPR%O+yJlX=|U6+D6cK+F#+{@Vo+zcRoHA7$e&&u8W>H@^9rH_`u29mSucDPjL6ayo zPuC$D8mZ@fZ2BHXy3W8RU#oQl2dNMN%~9Dsa)gvl_`&4==pmn>{|aBQ;iJ5&5cQ&( zQwUz4)Wcou?eha6@iTP{$F9rfWU;#9My&kz@CXth)&!8=2xY8pwonXt2_6&7nIE-{O7m9l!hi;TX!mbN1P1ueIiU<`c|QLMJ!lAP>o8IQHlVcso9(GvnY< zEzZei=n+6d^wRj zmzH=D#N@w(y~hdVE!p?JJBLt34V~yWNI#)DYZ2{tzd(6)Whl)SBVSy{A8)t)27aAZ z-mbfwa%~5)uBJ2HWaJ<3=OBGofcf53UE+D!tE_imIj69Qa7>()re)0 zg8=if;AQKDjiV#r|Md#2gQM&yZtLu|jz%x?Px!V){co`2RDcbvQS>brs@-0HY>bXLIA@@vAZ$E^T{R;j9Dk^$Z# z+W7x`J1@AS=aqyI$;LOg_d^O2aebh$qNop1O0D;Naean8;e##|TlH!+;kWOBz{p?kj8smnuUIsf}0W*9g)repr+wZZp?E-8z7G;Hv_t}>9HBU{Y>{?-v|Zx;P=t0Ksv zy|V*etHCiy)D@kNpU>XW@iV#kWG(U!mNk1a6gaRLPq2WTexKJ>o|NZjT-tZZ7_7G< zl*`{l)K4q0{-YZ|`D3L0)y}y`qX11Y`WL{Knxpi#5u1jFkL?K`xxbd8FSpECCzJdv8%;0J?N;j1inth0E$i?}zNUP<|`_MXd+yk%#1r?Y1Kj2-MmEiZj zuDm(@-@7YW5qeEgpch~AMU@_>7`xc$bHcyF|Ghci;Y6)D4cU{&O$i)<_=!T|(^Fpx zob4Dt%eVKhS2 z9&kWM_KY)qFL+wHuJ4>fsDDW!;4^?w z!*Ebdhl>^0A`?-!Prpozr?0j|=c9nFx7~E!eG|;6u>naB8CWI`7o-0_*NtB{4x=7J zyaCHWfVvL|VE_F%V2$fnxKkHRTNwRA8F{ZR7DExl3X>m*+Q@(X%8N?&59DkM4kFbf zL&&XMPC{Eq>y{#ocOg0FXWc^H%O8pMKc4`VTZCCiOe_HqQBXCW9R8Af5}vOkL_q+qGv-rS0;K2!ewmrJ z!d#D;`C57VFfB8MxkT48pla|lHc(1&YnZQC9N z#3M6d)|Ljkb}#ROKK@O+wZ26X3%|win{I!Sj310Ak>G8#zv*XbYPpAnQEIylL1V6A zj`A=aPV2fzc7&O0c0?8y{`OMwcr$DDPvl&kAuhn`h?c5z{_OJuOCT_B6%7bs7K_Z1 zDNeX86ZtH*qMI&T@cXz3n;90`kqxg}91!;p3oIzHYW(6JgGk*evs%2rk<%l~V2hb6 z`_EZQLvt94SQR#H@i{R=Mks+&`C|klYY^SOlgH8$MTx4dG>3&>w;w&)q7AA!nk$RI^^a%>bL%1=_PT zKUt`eceZH(&7P~WenqcJTa;ob9+_n)KaP1cA($yxZ}prHGF7H>F5$YRJ9J679+sJf zr`^UCnw?4SNv3BqA1p}pL<2P4*y?nuW5>(EW#0LB|(M4AY|l2vWW7k=wb|< z@c`E3;-dFy^8x!403pAeovgVuI{auoLt6OxT9Sxcy8m<&tKEY`*8`q%G-KUJ5u-3e zCutHFnVlef84q-Ok%8hCZ_HN9P^~rS=?$WzUFWN;d4aPu1rJqVrLL@AkFB8K;5KsD z$w?~UE))7qWs~x+uRM6(SZo-nGHJfIITRNKZV``7)~8mJ;ww7Mb){fNgz#r^@jkl# zT357GbrBW5F6dlv9r}MNc*>R*FRa;aCw7Z1W_mhow+7Fg2@45HSs_DqjJ4y)9y2tPWgr@Q=?_N`g?j*SkUlLz!?#pkRX=Af@-KiP#6&UMyZXD zOULKs^l~#6zt_*T%|2ReoItwR$_{)wbt(>ycu+Vpm{j}ccL?gYoXc%R6TF~PrWrdQ ztF3MKtv}GbIJzbSl!Gm>d=BSsfhw5GGfw*rUk`w-GdL_~&j+QL1@_8bBl$TP56PEV zB~vel`%)hxxW$5ZkFDx_x4zGccE zyl>yWc!{uKFmA{ym~tWh-*X^!phS>D0^_%kGbxL&O8#jO#J5#8vS$Q4wC8(|feH$I zVPS4=K0x&(L&}*IdO|T*pzaN>kp|+C7+w!}c}zN@h9fp}g1Xf>0DcLb{yuF{>&fz( zgfWuigHq1Kc(bW1P=q7Xc)%y4!l{*Oo8Ap`1K-$h{r$~9UL@wLju4<6KJ!Y(v<_BU zj0qoK0UmS*Ivcky)Rxm~VL1FG6~vHicYX6VUucN#5^y&^hFmv26C&>gtTTMMXFom; zKi-@Msf&t=u|D2&UmQ%va_M+t`&|-p0}l zmBt*Ac~AycZx0(DLmz>?TR4obFL3l54T8T@sQIEFROP<+^hl#xLQ`+EY%*AbUt_sQ zpoW{scYL1$vs>*1O6?jubOk3mus;-k;HotA_twu&9C3r^G$;GN&pI|RM_)!mL zjd#UKHz1JkX}XEr&xF8b?)36<{m&7x+22HYObpDz#R`1YV$p&K=JU%$!>Z|Y?hI1B zsU+s)r_J|ukBv;4^FR+>mLskec z&7!yuPn>JBOQo;Lq&tu%o1EWG!-NqFe#=aJK=`xn-JEm*aRm~|pMdyppyJg(b`HX);Y=)%PV=RHt2FV={OY7DST8RRpDyYgn)KJKKb8kyK@CCe z3D)!O^=%Edl9}$eGMak$QIJ>;tk8_v_!o^D4f-kyU@B+l=-AcuFSy4F$g>UD@mcco zq7@rn2X}N3S*>R9v#?-syM%#NO!F2e@N>?(*N6tu-Z-plI$AhS7vY(??#&I?$y|I2 zxEIUN?Y4*apZ-F3vxco(N102JW8IPVBV3`f9*#d>=-(>K?tFvi`&aNm^;{IKQm zaj$x3cit4nQuRTs2WjhkdIfw#YoqC3Y<|%1C#z%@ezL3Bd_hi)M$A3S1 zGxTe1n8D{zKKUodUvgtd(n;(AV9gHeo?yb?U5-xr$iY7^frT)2v_-JUQqOUrCad54 zhf^P0z0WcTMFPXuuF_xM-y5%UaeMv4lfq%nk{5y@)-fCV9uOKF9u62Pu`aCv!1X#; zSLtK}d5pWX`D>+ta-r6M59y0UrXDs~c3vE&yPkC&ZjZx{q!;@`cO3Lc4{5y3q)0I~ z{P#RWLuKt(z>lIg-?)>x3Q1x1_mfo3R;7=mFP@eJR3O)!Uo@&UG%*awGS3~yo7ZQ;+er@( z286D22KUzMVZg0pdM}Cs5rBjxG1EufdaTJre6M0!y}vMM#EFz@RQ4HF9pJd_Iss^U zeSJM*MNcSa<|E$Aj2V_O!Swqv4WuUFS^~i7gDKNiLxDpK4h|#WM*}@neLYt!b%g2i z{uTW!eNiw?>RL& zOV$%Jx0P}$sutQ!AReCcpk7=IhxJFeFa{SNM#zTY-ysBS27o^Hm0K2c=SO=x>7&PW zjjtT22F`0%V?oKh&8+o8XoO&TuEB*8xH0P7 zc?XGzN5H%xB<$l-$_Kp@s5hn4`M!~w$UTc#ms$xS@@jzRt?R=N(nX~G@0`^e4{@)K zmJG%)K7e!OqvMrUodCm~J7Pzk)FiZ#6N_1O*j#Y2)>=C%=yB?o0(}bXijK}#1{~v) zZ@_Y`V$l4{?M?NddB~al-CB=9fM?6UOX(C_PQ!eB<%!{a<`X=D;G3efe53;yUXgUDXh93l+dW5WLwb`d9(k z@9M5SlY@G3M{xLQ0_tv(vmqFcNi8=Yx%oG59IkM3zHwmew#vIcJf+TUx9y?Py|{H5 zpz1+o%;i}BSgxxD~R3qC~Jt|5$0LOBXmq~^8YKw#u)B>`v^F0R_r^3n-F ztovpYKLx+e6GT%-RP8Np81v7KeC zz=!7<3-9(l;RtH4BJbQ|=ZeV_k0H5tl{bBm5rFddLfa;|>hVr2otQlNy!oQFWYtS~ zd0in?JYu)+#l5c(nOkr8U;sH_XEdGnq0WmEXh}C@MSpmIN+l#k-5=AmZ9x#av=|%z zt!_yn@t*HthTNQ)I>UA~fflp9iu8E5QWrdkD=RBroQH-}d+_rlq%D0XZ*zhpg7wGR z9&0Q&Kk6r)Y~iw-FTL?AQYjuUG3GV?3ksa&m4p2n$VN}-Jhh*-MlCo${OBhC+#W5{ z+#d+e*uZ4Dl%{~PY7(lw)Qmd>^$)7hi2JyVT|D!AG;E~TuNr~9w-Jb%l=4TihEmJ7 z%-g9YfOVXN#NdmMJa;60a%sQpa}dJt1b(b;O=o+24}wnFn-He)h6 zDlB%4)u$YnTEdqwp35e&Z>MfO<0!lP&Ie3$=Gve9x~`-WKQ%hh#y!45V|D*KdvklY zRjl((Xe|whz*r2AK84^QZrs)}e$ZsFT5e0V$1Gs=xv=x{#v)soKgV@ET6&pbz3?-J zGUP3ROMo%`EJkKj_d&Uf-S2aQU1Q03hKPgO<05S2))TJzeb44*6z6fTr!Hdx2b-syin!MKL{Uz=ml!eAArw+BV<0L# zY~Dzb73quW6deasS=lp8y|J)-JGC^7g!T`wiU*)G0TeixNh`V~+z(9M7`OE z9Mod?+#U=tlu~h7`u-kHK9|Pdzr&p?Cyw+R=9q7a5%G9o0n`9=NzWzI5V4Nl#=k;P z?#s??(c^`S{rZJnycs%H@4LHMY5WrA;R0LzLIW2LAZHEHkaN=-)gmO1l~jI@!(`R>}qQ0URa z*}YNZmDeM92A^OApU&uk|FP-KiNeDN*Zj#x?-uW`tp@lo!tYTbN&z3akmk$3)RyV45_PpCRLl-=7Uq~kL*B0+0=GQ$CzOaCp0N`$&x%`wOSiATHdmz z_d?L;428o|`K8`@T{`enVY!=(q>UEfwwP&v1*M-s&%U#Axd!*@B`!s@YtGkN7xG$5 zmA1wJ)Zg~Vw1DJ2t=uzP0qQL*IG#Gi#BN%A5ZI1{g zBmc+6!59u2u4)U27}BekiUq2dLqH`uaER9->LBXb%WO%2UfJXmBqjK zg9N}b{)OQ zC_WTxrfak;HST>s;8n{K0plvuh6*?2xoy4l#C|@93RfVTSy_%3Pv`y=`tkf_g8R#3 z1%)Vxak>|EK`zYN`nv3vp8W79%#I2+qKb*O+6r-Mdt2pXUfadH|gc zvs8k^$%($rO3UZdjd>Bljg3vWyvYL?FJANar5dzl^I|#Cpx3K(UG)LQw_>i{n^KM1 zfBrVdTON;*L>z&jCkz&qJPA69eo^AtN+=3H%TK+UJi|x4Cq(uW`z<;&{m8N3? z>>8+$g}Q;+~qa!z3{vzI?!HVkQ|Y)oBvG`Xl(+EsX?Q~;*wzIp)#UkC=nb`xvC>6D0t(E zosD=xGbP%6J9`L+!Mc$v#xrM*x2u`FtnZW)>JgJ5#LoP~m)NM9mq9JHA8y zJ~hRzrG4%rv&DYvo-%OEvW;6z!Ea~wRTxxLRI&K}WWJc63B_WAbrZ{feUX2+yz0xl zc-qj^F#g!eq}BX(^zW*g(q*zF-o^Z)Bpbo4ORv^@@}skfKRRIFcocece`7@tt($&Y z>JQ!VgUF>q+bJaQ&!eTuUaKDrq1Ga`xlJSr%I1GCx72Dwfs2GwPkyfSWZN?^{shBde0JG!!pLqy!ZNUnDC^gIVm z5M;4bH5e#7{j3CwRExW_yTr}svPiPJT{8u|XLNLB8MLo>!4MM{7k*VP)hbie^eO@d zl)TUFxx&Q6yus?u=zTP>O;|rZ?&H#p^aM>=z*r#a`t)owXn1xm&RLB=0n;HOuuOsX zQ3{8-)zhJ(SrY$4wpG5$y7*^hC-!<q`)(7%;tHsJ80sFm^&Mwba{{qCFM;+gv zi=2rixHTos%>@-LZVT(N6L7mc{Ls#20TX$EJ-E;iVrVTuJvP*IX)-?1VfVAzpPv4P z3EvJD)#B&G_=~Z3b|Azg8n~{1lBIai@llkR~V!wk!z=*2WUb0__2-mw0$Al zYuO^hX{R8pWcBI#Bvwv5Cdy|Cv~OBXSdup*sX%=HUe|k+9%NKsH(zgj%J+Qg3kW^) zBDP+v^Uv-Qtkso*d4)!@aS~0=p!Fegf4Yb#?wEH?#AZdgGjYR@+i?dZ3klEGufoMO zJRO^kBK#SfjRf$5k8Wj*_x<(x&;}{uiCP#OCbjoajBet>fsCZ&_7!ma;&J$)GL*;z zd1`dT#e13-g}J-fKm8gGo3%tp%v-aQJQ&eSrk)TS(kMRB8A1lK1uT?3^D&r`!&s{r z99a&LpgwJAv6q&!U8mRQU0vFZ`GnKO5GPhrY~pZ@Qn7;R?P4EOtp>co1=Q3=XI{9_ zo=@`fWDE7?dlf0X9uE{nf=bDXlu{KQ%VKmY43VK7e?X8I%#GEB8nDDoo#k!s23_ey z!F}^oskXlWDH=JW_P@Kw6@NSquMcE-Qg5+|yN-dnZ)@KfaNJHVPqT%WHJseqO1ruZ zp0aZe=V}udUEjY(q7!}O;Q$Vw+`y$xcr_BH!RwMBC?ZX7qe0;Xj>C~Gac0z?@*y?* z_2Un~d(T|Cor$U%FZ}cF;+btTqJXP|INwV~gm?lYwub!_k_RZwHuq z#RI3g)1IH%Xb>OHZF;%{=009q=jWseSklNW-*TJ7h3d@eou}Te<@BgE#TMwk6c%HQ z{tb`aE*@}(1pOif6I0nk`6qQ9-%Cob7GkvsHu53X=Bq=~>o$K;(WTD?lL}2v59we>`1 zo^iax9W_6$*Oj2keE@YAE?1eT3?!_6l6*JKuvw&Z75Sxt)dRwhrXxX4rjl_wQx0KJ84`ft)}&z^pLxe17DA;;3nyh}6TN@PZBtB_YoT9Wj*xRS z1_TCHx({)3+RpcGP)fNMXrj9uJ|N3a-C4gk0TY4VRk`44qU%dhFpH1|HTCeVK5Tor z)=L_enL6Qxu9(#;R^42+GHuT_m6kedEa55;q``I{bPMQO_UGG-2tMp_JgBJ(?GnfZ za0*=l_i4{VyR~fc3*rw+H!qCy&M!yjC+p@b+_`e|@YS#G={lKo_4i#@9hoNerh6Ut z-kSzR5*dOD_eY?%$!&oY_UYRP5wFHtwK()?WY4)uX%_Dyn&Jaa7Jc9cH( z9Kfz$fkE_`pYWCEpx9<0s-4uebn*vtAdeAy3m~;6;0k(SMM%KE7zkAL0?NK=`eXH} zWy0?SzgwZWlU1{18R+2)Se^N_UCFVfR5h;95x@cd3PJ6lpGMwTUncNXfUp+>>o+P! zZDj`Lxo#`Kgu|{zX@L0-2fueVlj`JtAQOT$+B|f-&tVBBO~4O7r7*UAeH^+RM!-Vk z*Lr^i=F1aS_h}#9LnM^);ZEjHvZu<`xHOYj)k<=^=+lXyta7idhm*Kd`_W+36` zEY2Kk>^f#oM9r=?hO?ukQ>99&V7j!~o?@4&`ZwBmdpK=$Tjsz#ooS`f;*u2Idv&K9 z6sh*>ovXk=UVxZ04meIul*=(UE4=3d^*4Y~yDZ{=>((>*LgqS~1XV%I3T26E@NvE= zItL;Wiow=d-b0ODhIXpKtZp;x=SfqYyW3l@f2M~EmC2|FGho2d_bjO57ia0!0bE{y zF0H>Ib@Gf9xp%F$>(Juk<8Scf3einq8k(^I@^?~H{E!K=e~m8IyRl5$_aE(@o!5J= ztg9M1k5@eXZf@Kx3Yse|Yc-y;27q~AtBe0u@yywD?%t08?X5@G6}!3Xxq->4q6~c} z^HVEKi)lOdPOWB=(LFZY-wLIYO@t0B2V;Z4+Wc%}D*20Q@RhaZnZZ5hZY>Y+&Wamx z^9G*fs?f>|oDqV_5(Rh-zQv?6TROnL@$ePuORSD`74u^K8TB`hMz;>dP~7KJ<*(0Q zq$Lf8XO$XU_|wEt_s{N%ncr$PQCQAA4b-JKL+*i{XSh4EFB#1Tzfoe{+%M}w3FR7n z%r;D7t8(>F^u3IRjqtEOXWPjb<_$C`0Gzdw;TG|?x*okXeV>I(Ef&LI zli3iAt`I7MF$8|I{1Kz|?Md?8%wX*WJNVy!sf%e1`njf<%wjdyWCAi9TglN6*eQ~w zF$i^NirZeK^;hfHE0PUv(pB*|t1H{e4Z&Rd9;{cCf);V?A5OFN#j0T(pc+S9LHFh?A% zzWw^@%-}F78ij;4ZEo%(g7+bwnhMQ3Kj!wEmOpp#V6zV5yg1_eIm_$nl?AP_f-*oB zYxOrXHE&!NuDy66p7*AJR{FLp2qTTsv?(+cR*g%oStuoZhB0-rsHd8{ZaUvt3Y-Ke ze{gkuP-#3~0R7Z9m~zD^#RNx2_AM@^qQAN_o;_*>O|EJ0F!ykOT)V)BiZ`U_Bqx6! z$LImW#ppc28nkZ4_a*AFy%&d2fpeS(Q3r=A?MneRW>>dw;L@Blu4f>;j%h8#f;G5f z480@EJlY;QO#=3eLIE($tylLR(IfEpFF9 z0GI-J_IbkwXd7gW6OS({~IJ8?K2RwM3}ru|_B=qDO^ z)IAQdytIq*J0i;Cbg49lcMK#=Zz$$>v&i2`V2CNLpu)XU8Q^-Ph%f?D zAM%Z*dM?`>3)Hw>kKpW2?J1~u=t#vs_ne$Yt%Ek6r*$EEG~I#oWIKRyZ~vB9*C#oF zG2`VL*q!El*I;~0w8s*x7jMi?{&-E}lNLxvfFb(VvjbL*TC-8lW0yRMO);2;=0?Xy zu?BvTKj0^ya}+jDjP2YDp`1WeceY2aX0@@S!@0{u|5-mafN5`CL3m*d;lT+3wRbFoTb)cCRc8Z1XvRBEe_^TNT59aV>=QRRc!O(?v0>r zu%GJEtNi-)X@_&hX6{*xWj>RuU&pF2asAQp2u@*w#m&P_k28f>K+ygS#sAgCU}ZR{ zTu(R0q*(ASS}rxNDmxzc#|BF^-zw9n1)|@()CckNE#{Jeq0P=&?ZH*NvEO(Fy)+)z zU;X$6DvYnWU*3SCo+8(CZvHtu3V1-8Ics>%b=4lMV2TrBeCvud5q zQ|QePQK|?+bM%)b3axaR;iDPZGzTnDZ`!a;>ap?55!o8Ed^dYe%O$^0u+ZnNw~jH> z`6lA*yKJ%kzlx5l4;fV9`{(n!4jB2lvQ>oWj#< zex7X-;JCi!@48xLi_E0{{n%s-Xy(j7A@TE7WO$_MZ~^Hebv_tBfg1Y*02*1#{PV+a zHi`!rbZZk>GP%b{Fo|>AKHlAJkriS}{g!Wc+dF=?WrZXU5^=kPc?g%S5iP_4=4Np_H0@x;{yq z#_ydErc5Ars9H#~D9-CWx15MLINi2>`&tyUn z-iw9w`WY+t`j~|d3&n?Te7F)U2Y~k9NB?W0u(!$hcN0N1){`N($UYm2W_SQjqN4c_ z1uzY_f}vEjvg=4Nkp2@XIBYPWXuNeNEBb<#wh&9!6>>aT&HL?Jz?t%FcX@B6`vIqH zI*)%xR)Inn0tz9|$Hm&C?QJn&R(WX7cD_Lj6S96V#4N^QxJ?9vGr+jV-+hj+$T0(R zvoE3-92B69Cj)q~#h9}G4K?ZMZ4xu-;5(QKqft2ZJkI4*O^qWM~2I+(`i+yNR^9~t_$f89TO}l`6`%={UH8X7!O`37&2z_M!p4} zL6Ht|`Y{56=_QDd3KqGrXVm6Q5 z>J30Rn}OcRGU;&gdwK~ywY3u0JvP|Aoy<2y;@M01UoHUN4mc+c=@+Mq)f@q*Q-cHg zwK?PG&o6-bBC_=K=(Zkk1=?K1*n#ecm&nOr91Zn>vRlsl@bUIo-`|&Xa})EZku7iE zs>wK*wOyKhC(l1=Q4Z8d#)CiXt`50_?vP^$_mY)a>yhPiJTC7EUUYV`ss3OKD&qL9 z8;Njvz`aa;?%1(iWz#HflS^s{EJdd+Os+8y(c6Qk>Fmi{moX?qc+>jIY}<{|`TM28 z8Osl`0IojZq3k;+1x@<$Z@hxct)QsCl#b#4?VyWC#(9(!HNq-Q;mULD!%a^p*KFo- zwp8mgW3SXZEeFvzH2hO z-&HI%VZ>}wQNl1iDNfYsf?qZ_$aYZO=it+KsjdC03^lqq`mTO`f0)lsO#ii)x8HGj z@eOwH(dl;38BkA02zVpDt@u=9mbXlilrkN6Js{(X6w23y`7_DOXTyAwOdPEH8heSy z9H1jssybN{Z77(+W!@jcae&zBz_E((E%5T-qnXEzo2Np735RUD($N?QAgnlEd%Ml_ z6`0LB(_msqPX=C^EsWpto8|P~+rkgB(E=;#{@*k%$AZ7Dh8&nIRZBNMaPxbvzuy`P z0MMVDl~pfWu2%9Jm!lQ&l!;wsDVleW8m+j5iJKeA)Ky11PZ%H9rcaYhz)+=4#~dE{ znmWcPZrr@8e(^(hz{p+(e6TqNmmy6BW2nvS(dosTn!7g7OcRh$;ucImhpf` zuJ`@fyT|&8dNtR(tNo149r*i8mjy0Dm11R+yD15Gi(>spkJ~1+L}=BAF=Kvy{@vws z|9b-Cw9{V8bT>dwxw-~L35(plO7Rtn<*Y4eDb61pt&iJ9c+z>=qzf*vsR_YO>2S9U z9Nr2R&pk$8T^x+~G3*@sT>G=}@$=(xQQcDv&T|j$wK1sGg|#jZ(>sGx6>T;fQVl{7 zc_31)O|^w;1*%kBK0Pn8K1n#jj~5v`$`UV^Cm~cd+|HIL$lN|g$N#>irX_F_V2#^Y z32;nX!;NEsi93ELA}T*PM>ac0G{-ZG=ssAMmhduqQ0-2@36=^Y(i3cv{+Psa5N$l%?{>JKFIMt@EZ z+s?_SFVuc*b>Q{|a6hm`om0D4E7j>rDAiQ2K691M4~w7P$*>I7NqlVCzr2l%RW2qm z7bPE{4Z*XXsWjC=m{X-S6iS};+2-u8^$ya}(P8(Uxs6fM(4bSy$I5W0{3Gb+S$X|4 zJluF)E_5%m2Y#eCSjkkq#>hS6U?ljM3OGYf-X8a6YPwb!TzWhYmv-d@^OX}2f6ZHo zB?y;~H3Xs{@RvK8je#c@ko4esLa?@Pf+wd+60{pmA665S_-)_vq$jlTuMFi+Wz62C zN5`afR>k|;T}7(A{t@+9TQa~Jc`w;=HaJ|OM!XibQq2dOfzKTf5~^Hq#RY(%%z1a@E^XhP{Fwqv%FrTjS|d zBUNHK7YzY2nch`?&wDV4KmKVr0Tl)7ns&c5`_o%^pCfl)0ZK|ru=9tsm*X0G)^GMM zzgS6ePTucWL9u{D3McaCZN#pvmWEKic|dN!eA_|lTkP1nD+(x_|JTXB$@^7PN*T67| zVDVvLyK#Kf{n_erv0>ecs8d~^^^@D8Y0Ize3ii~btVZ$~2aF28KCW}wV&J&U>)+p$ zQ^x>D0>I49F`uNu-wMSw9T-D)KZm$B0%S)i->2^^ubprF574#ZGBY!SU;;qsVu1X0 z)`yWzSMD}Ie)V!eTg$qSlsLBXeOgkQcb3z=>$TXOWDQ(NEJtGn`*)222Ve02NZ?Q2) zvfB7xBQgbumG;s}$WjOHdp(eB^#%&oSK5GKf9p9FN0Cb9^?9)bKC-C5UZCGix9Ae9 zafYO%?DoQ?9XJsk<}wJ|1lqby5shKK{2cR%H0Bo*)FlP^9@WNPph4>~V_n~gZ7%jw zbg-ziuC$+TZ2+|y7`R=Z>Bcyu(HsK|nmUpQc99zd5QmlrtFbe>ct z3?B1=VA$hK8e1Gzc2&9wU4>31SA7%Vq4pZa*+Ml(a;wGI;kI0_Zm@(WL$nsdW9ee> z_S|K1yf~OTvu5O$hh3v8QUWkrHWy1zrG3f$c8VJ24=e@wk5`I-IXe2ju$W3awGsx5)!|f#(ObWY= z5w7nLayTJvY$2E{K10wz-cPSuW-@<@DHwplV;X=!Y=(jzl|^RSbd%I$MT(I=N-q}8M&ZsKvZ zFVcL)e0G~u30TgGP0A7}oaR=z{LYS!vIZda}nv8e9HOp?bkhY`a zg&#ZMwhCp|x?lq{d2$}A6z296AqNLmfD0$=^%)(!f?? z3P&7vcdO_1bE6JQVE`m3HivrKDAVY!knnu;?qvW!eBJIIYrZ<7H<_skIA)`X`UU!r zci?OXTz>WR3=B2_fsFd5Hu0|fk=uVtJ%&Y~nEt+gya>bQpfK@3p$V=D^CE#R9ulQ>sxcBikxzD#Hsn zs@K2Vjt-EFTunyPl9vxh-M7^3Mk;C1R`~FO!Of8*4nHK6PV*Q7-Jfie& z*-U;pd3hMdR1TMy@W8vH&|IxIQCwW*Q}J{an~83)H7=uePXpp)*?zWjp4wtK$QTa* zytwFeM@sdDtqbYVc(O{<+9Cy-kj~L%%7aShAjj>pB9%1mH%97IeQ}z<)FuH-POBP6 zo6%&<2dP{V&yq8B9@jI;Vmi&(TfpVLk_q(n$AX7a&E|ai$j(~BlTb=6=}tv?p~ZWR zhVQU2r>DlAjKyN&RmW@V7?)Lxkg!J=x>(Z(w=T#M3Ic+_Lg>GLQV%DSEeB^SSU4KD zJ~Koy%z0-^O8!tgq)ph!tM`9aBaeME#C@Qb9dW+&p99d#rKVSLXI!5B&9e9ym5ab)A+M1WXL8`Rm^**p`geDUTc)ncNBmPG8j zS-`c~&UK(FtV1tL(g~~8!zstH0L`^ovQsdsJ{8vyK~8V{FuE-3&!O-t+|rdE%*W#f ziI^8f(hg9B;W{NV3RfZ6e-0E_FXSh_cqlS1eyO#BHNvWeLAN$8xb~8PIcdk zSD+}!wR?B<&JrY|%P7Fj7{t?wot%o3#uX34K9U-r{xwbm1eVzqx8m-kex`KZ1RgGa zp=%rM+oOBZpP_Pkoj)X;_T}$R;ssePrjQTAg2@;mtjrMv{tQE9WN_>2`YksP@rP%S z#d^b|7F~%iE&WPz=#Hh1rQhK1&U@50zqadR9&lN8E{}|GBs3`Yyk4j?4+kW^TD9Jj z{R7)7r&}a|-iB@1>Cw^%xnVOy-R>X)zkPv?S!;zcw=nwhY=z)qB z)M;t^F$fVqTnv~iNJ>wBRrMz!{PU|v%I+hP*G;?J&57@5w|Rz6#=jKXl>@*8{!?ry zc7E0TM0T|%Ok%b=aH(-{>CqcTqF&Y{DyC{OZU}#So5mB)PT^0lgY8_=U*&)R4u|=myB>LmBc3Boo^+SN?ZBcF>ZXiJH+;`pg+U*ZzuKGv%EMAk&K#8?lf${zNZWib@q$;!ePgSUH#eenke}K1 zT^`Ei7$e)Iyk<@^E3Z^kxSjstk#&44S*@v^;&JkAFiaj~4yTY16<0O5UV(q{>JF0t zWB9h=g-h?@=Yna6{X=d7&fjVlxYq_fN`l9L!_kX+u(^5Omszdyi=4a3f$oXI64+$5Dz4??aa3dCcIjx=`?dfV;(?G=9N+_qioNDLFaPyxqrlK#~u0Oyj!0`dDN?0{6x-wdt9J7|a(-Zq$>~vy2MGAZZOdR6xQ4{7(Z=%=bPgHmS9_3hb`; zi5SN2-8dupr+WUH{8I8UW~{B@(>AIjRrFN^ zA%RI-(#wla@bQA&wrx+EpxttoNj6h z;LpTzS)d&{4I>TA^VZ_0Ink5v*zZ?omeR3d->i0qtOb!A{brfSo#}}1mO>4Gv~M&F z%_dK4X(g;ZQ;@AdPJeXi_AqRIWB{{3UOAwDFy8D*j~9WTGiyn=A2?*s3qJno9#9IN zZw>mFYec8~I?uDdjPLYjjnJlGqMuLjfqTX4UR#kVkJa?Z>+a%6z04bzbaLb6`Hjm4 zmH|2m1vhuV&!2WFT$a63%qXrLC@^A?UT>S1I}%D4D;6PuE*~|KBlfr2bs!lH82F+1 z3i{$OXh?f`!BJHQS5zR?SgoYJB@b-uWfh!iBVHX#8=NX+6B-Z$OxZ5i~6m_(<~vp+Aoa@{|f9@c8+X_?+)ZZB|PyaRDJXnPt;cHl5p_LgIOT z9L+2s7p|Fvad@fL_Xb-Xgux{`LLXj!_y2A-|Lf%#s%M>O#dfPJG!od*R$KK*GHLzn z>RSv2r^?yiP;$wZ-dO?xFFbm+@(z>g(@QSQBzAqT#sjxvCR|E-J}?IXl!R6r_e;8o z8Yani*LSC<^@;MdmHbNK!?8yPGg5@B+thKEYfYDr7d z)}k#pIngNHa-A))MY4fXJE2@BQn;L1@`meq++V3b7- zi!Mhd$KU(`uYSDRlA5HgNPu`z@XylA&}3Fa9MZ=f8I-^SPeFVXj}`B8W1p02KT1HN zC&K%a1@)e5vBt$2x?~g5KFC`it8f@W0Kju6m=7bOA^n?JS$1zw@|@+dKDHDV6Qcsx z;c!!L_`68*-o;u%u3aG8cvVOH|D)+EqpIM#ZV4%slx`#iq&o$q1PP^6y1QFJy1QGt zySux)Te>+QbvMuZ-7);(5RRvItTopR$+3Rjt+?au9USl-1TuTLHM;r(&u}r=QdaW+m5rMASb?~bya5}9^*l}Z<6Q?oLqhM-QjO0__P*;`p$P__i$E5#~T8N$Gpuk8O--O>6< zn9%KpxE;`^|K<;d5S|}7xUgI1%9P;qjo%2o+z66e@ZccL8(vy{BaQ8=jIH6ac8kx> z7BMx1*N{A-8YHv$Irs4BTE!nE9ptv&v5y+A$N#f(Ke;D{_BoRV;mDxPJtKoU;Ld?xgMQptE z8cC(ig?ArM)!x`E8fnfdvLl1h;jcI8!}o<^B|bhrF6|i1Iu&PUu7-vN++f4a{(*sx zkr8B0PEMdwVqiEqQMjeur2T^SYD2qsX(Tvy?8(^(vT^M2HH!vGv_^OQ6~@8pYV6UM zxavP@wUMV1Lgf2H)iM>$%^J-uR#r?R;DKxT@;;4xi%qp5%l3tQ zLbr5qA%RUxiuGhQq`^b)4kKfCQVgq1rgEX!%;zK6&1T0N9n1hNvq-5V%U~!DBnx)L zYHwQtv<|%2joPVw)-B6^b|8jP1nA@J0C{k*z*yUf3A#fAMW!-D&k@A39iq2fpWf$X!> zxO0c?mC{aR&DNt|{>jk6J=~Wfz%3RknbRuQ4RTVfeJY)?(BkGaTdFCQ%HyCh;C0G@ zjs_MGnkBD)78)+vr*F1oLcO{AyChBycTP0e3Ns7yjFwlt@sb8=)cy0A1sNE>bf7U8 zkDN|P;e^mmm|a`K$_XfD?& z;f?h;wNw;bxJ@EL^4&c_q%V(jil-maXp9*YL47>G>S%3ky}i5J*xM5y11jo7=~A62 zC!fD$;5W~lvJ1K1wLMj_-Yy=YT=9I=!4j7IY9>-G`-&TG?}H36;;1U#GMf;W!kfx% zKd`lOkv=%2ehT;N_m%&xfG8Hlt|EF01->?+TdC#arg<+Xcpm~uoOgHodFhCVi1iz; zU=31^>Nz@c5O7##+3rZ#JLwDxq5bcxMyXsp`df|P+0`jA#*{IqD3U$1Ud~Hxtx)mK z@JM*T1YU7{5YrdG8eiOzNTok{_+emhn8|=3Cb~JH5 zMH)01Q{`^DJX3@a1F20`zFG~frKP1)=B}5`r}h*xDzc}yk_9-D!^7~Q=tUy_y+69w z#&EP5IlWs#Kl}S_?t41P1c1G{Vx4_wdJ4zU1nZx*et76Vq82cqFQ$kEK$1%OEo&~h z7`m-veO0mSiA{wr_fTpSHZ{%X6T7!<=Ob3S5Pt9Py5~`=u z*xG(J*t!6s%~b&b=e6UYmnG;eX9zL1>h-GP#Yda3wn3*JOC{r+`z=;PCOG!Hg$dQ4 zJ$~)Rf|_><)bWBJjK(%`a6@D>P+L2M>hV1KIgNCgZteIO;Osn!KOWB3M=N5ze5(Kb z`Rx_Ui;WhrP!VPR9Y0rt^)8N2SVWknpVj7MTYi$UccUCLD|Y29$a+eXy z6`c^`FC^PiC|{?L$GPSi-Xo<9b+*v=oMV-)MHx5t&xPap{z~e_()yz3^7)Dz?a4){ zbfx>?IysT=L=S2cgCUFN^+aT>#vviUXV7}aCvP81zElZS^X10qyI>_Iz0N(a!W4CR z6=Q?ze>U-5~>EGmr7p8}otR=`|bDF3*wb@!ql(YWqWbxNYv3 z*q7KbfACec!?8Va=L3xWy*JiqBZF{S z3HfuBn5-0|TO@Plc#WMXyhYKS1DxF5y)4?-l@#RA2(oEpL;4I(sl`fkhn$v<{8}xB zj~0Bo8(Qf27z$ZJ&8Ejxam6a?x~T)KUhi+tth^j%ED019@i=XGie@cx9fop4Tx@7$ z1y@NponX^F_!BkVZr09u2zhOSWc$8~mS zWO+#Jn9kSoMf&dbL;GlhSF5cpdy~;(1`pl(v6NNTmWh4(e zYJWduzd!#=)U5W$NBZ{ljnE!b@x1s6Gl!HAsSL$Oo}I!eKrb5@8L{JEt#LzLSm3_> z_iy>63+JgLiK9>!DCkZd*)0P_@!atJ86Vjg*D5`dofwBX7`U=M~>!>SfOI?KQJRT&4__PBUmQc zOmkthCIdrsr1ay+PT;)k36hCpYCH@KK&0lS^I=;1WKKJO%wZlB8@)Fs&)LWVj5ZBQ z=L%;=o0=`u>U7thRKQvRVWYX)YXV;N#~>sqfzN^p5bZi(@1tWd2HX=O z7un6u(w)|_lt$?8(BtUUI~eVD?RJm2VPIha+(XCsmLy244Br)&sZLsF%_o0=KdEhXz z^0_&~(|75&A@A}cC<9$<%r3e!#F<^|VMkH3a_HtN%*LQuSgH_F-|R%Dp*GgRb+VPc;cCsT!Ues!nfe7eusB*(DY^CCb{%sqQ%hl)B&EbYOP;cG; ziROCT<8_I*z_k~1zgeiYLrs5#JVVXbnxP(YSdxFZ!w?dJwm-c<0|H*#RG&^^I>9mZ z?hbNsXyeIpVo-ZLowd{n-XIPsmF)p_vd}N+YLl6eG)>;dQOKYDs&RrVP(*QT8>ITCrin%a z)_;(FKcymv7LI4gVDJlZKV+Dm_BZ@g=~p^e$7AQNt;PiRz*-19BYJzjQV%yK&D|tr zp>n9M_K^-aR6~@?IM8@QKZ}7)VoggHXkGvGndxZ`Fk6>25 zlZv^XI%?22G|ZgZFJCY(R;vQecYX2ckKvu2!fC|+UhGgnSSg@+G&nN{CJGLBn$}#b z@>(?x%47PH$s+fdkM>yBy+9vYE|1UoidFFPk+DD?eC7N{u%N&MJQU~wZjb*md-A%5 z0959DK>Re`wHUOg5J9|PvDve2J&ytAMAZhzcO)JHX|jUOZzQ8`b#7p4&BHe)v@{Da z(R+a;{!aG+Q;UQ>g-Us)orFRgO7 zo3NL7zD4-@bxAYR6;d-lf8x!EQS;qXp1o`NN1~-Q)EBMOoy)4OtJA6Uv%t#T@jOzl zhaf@nh)f^C63kf-=XWlW_)l%CD~M+hy_H^zqksi}nY^Zrd;Qa2BJTVqRH*#pNv-@O zR16#M-^7)6>W}W6{|c|4U8;H}B@_D9%2C4aZfJ=?dqPu@XHZm%y*E^@EJ|%7R?_Rn? z38&+xJ>W3NWWzhWgbRX%sxI*58J}9_%x*l^X#q#3IOmfWH@{a1a4->xu_8s-U%wQ; z(;~zC86#!#T=?^65UXxk1{6yB0_!IwOEOH-EQFU5!upg})=kT{v)Q=g1}KQ;%hgz5P*zDlb@Eh;iHTWm z(I?}0x*)EkRC{RouLhi7Bp#d6w;Y=dMlRPokzjb~qcax%@O_Vln%e(dPcYA}&p(0J zi;Tqc0WUBR2h1}90D%84Pa5IJLHBvJ*BSKYJBG>`K&~U2EAjD8%4~{W_=MDJ8^Yw<@ATwDy`J7+d|lpJU0~B4vBmY@L(_e_E-F|dPIVRjHCltnj!9Y4lk}M5kj!}nnk+~ zeU1EvB;aV5zP%862p+9UFJsh}O%mGd(dXk2HD9ds=_5ImibpwyGM>(0;5KA^6Ia53 zj^Y3NdfBo7R}p}MJozI*Qk;~sK;Zn%Ax2hf-4Lao@;Cdk&- zanCq432cUK4x;W&PgIQUe&f%dc@qIzdE)X4?#(SJJ1%ZyQW$`<;Ov6`fxf!Zc)=8a zeS_rujZov|oTn$3BoAssf|pvSRbX^s`Kuu-xG+MxHa=xfXee76OE6`N z@r{7niN&$g9<(1bMW5yC9Ff&HtiY`%5ycpvHR`05fnJyBa@QEw6ur;KL#AaHWycTP@w(+d=0(R)<;Sh82&~< zX;9BW{rzW69ap9S785oUki2aC&1}&(+Oz)Kf11K=7pING0NwsgPA*FZ=rT7i$8^~6 z3p9(UfceATZD6#%A@|TCu$zDa5)2Bq?u8#mlLf%*VP+frj4*&-#q`rUIBcf#v5#NF z1IGE}&r@nI-;N*aXI6dIG_OmN;+x+s7pm&zx{gtgha=O-UftbymZu3CNfdsqDeDxyu-6C5B5aJUH@(Uv)6MGuo z*x-f|mx7UqOi_+OmDo|h2mW8_IT=5bmJT zUE$#M#!tGPm+JX<@BnDqqF&4>^mljw9vrp0!ozwtk(OULIPl$Qe=biwP>=3p(Tj{B z@<1^gy1q8A<9c}wTWexk4xNZMxIW0?rV1(FKbh+ z7H8?kFx^I!q_49vy>_@s$ybM~}!X|<^SFpkdVsQ%T z?=pXT)h^+qm8+EGzm~x0$Nw0mWt#!k1&&!U7$*yv@Oj_RRwJ2jH9-` zkDED67ilKDCu4z`Q*T6&Gdx^mx>d)I@uuAQ$@+b{A!jmkmgI#BR`{Pcr~*)*tgX=i zRdN%O-KW~(xPlA|bB=`c^OE6mW+j=NPWj(==wC%{6rdD~SBbOT$5=l~W%BIYKCJy{ znW6n*=+w^uyz8ga6;zd4U96xiG<5W^wsaCuP|U^B0T(4Lt| zbV?!XV6Bwy%zYW_r=3uNGa@gme<(g8q>${qPtw8l^b`R2zhhXzXQHy4iPeW=x_FF{ z={VcD2PI9Ju~+MYBwmUK7tQW(7PzS%JjzaeZL1hgRTsE_UbZ;ZY(iuV!JW}y*0lN@ zxhYmdA*oP`E3*@{VSo65c8C3DZ6v90Qt{Auv0k9#7xZW*?q&Sh&;w--p?d2f#7tZ7 zISXPSX2>DR>~r`>xZv>^YNb{Bet`W{yg%AsfOn?RSKnIVDKd+A+cRy zJ)gLHM}!_$Kne^F#DivjwUDBs%96Z(ZIPdET*E?|J%Rs}PX$%1-iDq$0^g3K*LT@7 ze@+>9x>A=uJWT)a_*lBc!PL$@);yq}n~{OW$+;&GOmI9$OHbdtzg3BdgaS`mEKIY+ zQS#_hV`aH;p-8V<3j=KPJnw9%K)RC7EEuRCL$Y@m=!%8mSXis-6Jj(r@BU=e97w zrfGIh_YO_hwT};)PUbG?k{;hv=WSyqbBpdxo)#`J4=%A03f}SBUS2q#hE7nXL9Ctj zDm+`}0qZMJ^22Av(g6CQ5d3pCz<*(=DV}CC^6oU#-a5+Kx^5Ec;^!xiMin3u_Z>&; z^}NsuB&O(rdGL^Ih*8>XkSZ_{9V&fxd!#NT#(TCYY-;)2=Qm>tW+j0i-~kK_ zP_iw67v@drj2})M*c-DYVenh}wzZjA@jOO?Z7Fwzq97?6hY6~2Ln-JW3LU!Y^%8&l z>w}CWhDfVAGP>6S#3!$u-O%&q8)6b99`35xOlM=@UX{<3{(8)JX!9yc0+>_xWlB%ZdTQ?FCqpe|m zjw**Gl*QVHa8SK8;ScS|SBW_Q?b5)9c%ZW41Ox+FOG{$%zGg8Q85BLSmd{HZkl2Fa zdS@e41VL@>!q4IWeN?S6)ND9>HC}9@7e>rGI%c;!QRSu#rjM$1p5(w#2q6QAxpFL( zDGOQbuo-&vS(@-4_87V^H;hLAct^&*AMa;zyCL~AC4yRCyVUYb3Md4YN%p6w8M>cw zc}n8uyV@>)EYVfHerVLE1mym`l{s;FO4=xJI|JtIm+Y*>cbK9&pmgS>pIaFTeVy5H zwBQw?G*ZUa3po-DP%6sN{XGA1u0btUVwXWfM#KGmWGr2risMB!YlXH#4kGA$V(I+e zx!poApnv&@hg}Rk^fcf9%P%^>k_ygM&V^a7a3g_lNjod2>tZdv0h=A1WS666j4NmC zmJ`+J1hq%)hIQF=|1f~^pR-U*JP!X;y)sBSNNSz9-UM_c^c-nl;Bg@ZR-+dwe57ichK&7g6`sYue6kfg|S$v z$gE|t#%y!z_-oiecln;UE{N||oWYW&K?S?mPvDXwI#=4L|9e-ydLFjXSQ>Soeikt2 zsWz9tx!X9LbK(#f11XUn5GN>7&M_Bd9znr24X!wG=_2)}>ElJg@ub*8j`M-}z^naprb>Eds|j21VPdF$fRT)H&etUHymLTON=^+z z*8zql0rMhl=K#7gC|yehA0&vX+H%)`0gEXJ-vobDTY~iN8?8VV98ef-q@`O+QexrPd+_Ec(J`CA$t zY`=bVVNYHeeew}ER%t_FzAq(l{oW+T4*Fu_xmqkUPD_KyD^&m|h5=PvCAlGqo))3& zP~Vg6x8;wrc~jhN2@X7%qUez>{~_fk37fCHUkt5GH5*=i*6naxvOq?Bb$e?)@$Dxl zq7^OrE8#&L(t-8*&aqS)C4%anpIDiAY4Q9wo%f=TL_x1@z@}MiC$5kws)K!q)(O*5WTse>$2FcfGuy*g9GPL%5KTZ$r3uo!0g| zAA_+O$vHX0_xuFDg7JKC9KBu1*6~hHw^i@OK8{`k8jfu2Um7hB7YY)Oe{g$a+B6Vu zcbDxqz#E&ztK)qbiJ;^UInr?Cqdri-#vF#Z_}?7kUT-4qj%o)K7QXalJPzh#;~R{T zv9Tk5p~>eT4b0t4KzpKEajV=)2tmF_?Y0F-8c&VcM4l5WF$vq5-x47EKS55+9+=#q z*B<#$6=Gk&wA*%Z4H8;eR)EfGMk`0l$u@l-cOJ)B=pklz9@Ag*ln zr-=Lu*p<4pHnyE8xMsZu)J?#yO10LMA0QBvOzr97Zcf=ob75Ww34qqP>Y>LbrChuN z*~)u?(J(MNgrMJ^tR*9m7$2+E$v^Tmv+u%TGpLVnMz?iMP4z#PsE?O9K`;!$z+IMH z*T-bDhze|om@}+UK|`f*@WhyBv~JB6z<)(h;qTdGc?X(*psWY3y4p{dyFtMLH*{8gR^S=p9{kxztp&(f3o;lL@xYkzD(@W02clQgg zZix8d3bA_R1Ktm1%z!IwGjob`&uFtO{%m5A+l?zO=8uA~58cM$j!$;G@Mm9ATE);! zds_^uP-1_7)O~C`QI4!a+LwSN8SzV3_WXC6=izi$)Z2S|z{iIQ3Qz|{fh%}n;beol z6+?H=DDVfsn*KJe74f)9dV-11FLql3T1x;Fb-K|p) z#&eNqqKl&S&R@kc{lkxGOLc!Gr#EdCW2*uSa(2<`Ra>l-wEs6?5`A89sR%<_mWX^8t z>F}cmm`Q0vhP@Q3LS2dx{Ml^V9wwO-e$g+bNRV-BmdoLJ1M?UGXKPcnO`jVFrSDsgF4Kuv;?=PE z9r{11hKc^S zI5!;8p?|q`X@cioKRuQl@^e^tzCZn=`DJ6X!{=zBCV9G>qEP&|JXt*qPV{R@iT=)S zpan&yqOD{J(xoPC55IC}RZ8WRP%9217SDt=r!CByER^R*e~b3J4JnlV9qzhUUD0{j z`_}yb$~v^)OZ#EVvu1MH_#kC^w=dl=5)&spnSO&|Ace;><_C#(R#s88GjrrsjJXnn z>cBf$lw9G@xO>sx&QXwieqm?(qc%9g%}`}WXUK%o;LjC%QWw&_jA(lRSas*(f{Cg2 zp<2ym{y0BhLc+LO?X4@nB|JZ|*w$Jj!v9A7oq`KQO@pqEp9I5*1-i@L;OH{%M3@bT zly&YLNh_2{%Nf54iW&C(O|kn?rGK4FWZr2V%Chxo+G+wH?hOwoc~F*Iof;b|hnam| zAuV|3Wt~d;%t!|fB(^F*&}ZOs|~d+IYQn>jl~4YOe>#>pHJ1S;9IPH*kKNE42YQ=Iml6v7ZO zr?+XV3AUAzL&HMZb6dyzefxTOAB@vQo{dhZxtP{vl;vMabP+&+DBL6kV+w%RVeV>B z-S^8-sDAIopit=XQ~wOR{6HHKlOhfH7_O<u#=x5MUj%@hTn-$D^2Q!kYxuZ# zzBSx1#;k~wc!@oVjX3{VYqzyfo`s03uD;o+aa@6)Yqhhr{dqyucU(MGlGY|zBtI4R z^8YQ^?LOt)x8RA&&)2)~p1R-Sk&Cr;G(A6tI1tpW*V85*%pA5}%-#ci05m8U^*X0d zaj*Zmg}5GTwkGoV{AoIKdhQy#biVhI)d;t6Faa@CoAxt5?Z(`pKXQH$Qz~(i@V7>Om>|ty$ES$*x<4?XA&T#`0YT#j5#{ zn3=griqGrc6`oAGfXMI(zx(x09tgvWFVX;0*hzY=)+9wUPFwBZTzmI3&(YLiy{D|5 zgkcxp4&Bn-m^Ye|4S-u9q2%V~c7N#MtjD{uKfWUUPY8)3aCe|C@HD0ls9S51|7e3c z3u_3#%zz`7j@!TP?(Pl)gK%5T$m>W4h4^2p0DseDxo68#a;Q`0|~QL+>yRTRgRinGZS`thuFU0P;2+%C}@zns0xYAbdU(` z_PSx+w&dA~0_KIvBU~U2XDG1B^@}?BI@?jAhQ-RLZb@Dd&VM9XNYC&8a{*3{4$3BY z?BMeY0n2;y)5jB9b-$lbQLvJ6WpsTyRhfma0bbD!cRd1pO4D+N;$W?xA1&?-mz9m# zMs*NM)M`J;V|pg?$MXXdu5(=sbo7n6*vQoAuj%3lP)sL7WBcQo{7}BWzS4;tKB-N* z64K2Q04L$GcMdR0cX{o^Sw}FtzXB2XaW|;=;+is4e0mOH)c%>B0F->d9QjBPP3qe> z7A!ik@hlfsEg*O5En+A<ed`I8iL3;bvzT|t{oaw<;BE3p|0I+f|ckbYub4j}E z3O8S@P6TrxzECZNGObu0k@Q^2t==84i6DBd${6k4;jN1udwfpQ_MvPs3@ogDHLck> zbE9F)Rp&zsC`a>^fbj4!WC$L>@0GWD?QYM|0o}&=-vITX#@a>eTsRSLNB#Ml^+UHL zpxGPTmS@_ZOn1~QZ&)*V>_W^}D`S7(z7nSUJ(;bpoaU~J$dh^!2YC~bo<6qTm%a&H z-KGl_qe2#A7*j67|2n)1`^QKh9p82bxR0)dPZqgRLowv# z0DBMB%~5w?yb3VcKe@RpH;roaaP>{`kITmTceMzxC)F3kdM+P=yxoIE7Jdtg$E~;V zKd3lzr89Es$J3FXSyBJDb~_!w+Jzs(>j|8$a>nN?SgAOI{`CN6En?)#wvUp^j}Lfr z@p^3dfYsJ@PlcRbbR4u0+O_dkteMF@feu&hXn@)ejaUg9>MNel+jaACzPn*2&?s)W zo;>(+<&NEkZS~2rxNP2jt4roRRR&`UU$JbRqxf`m@F@fPEl@vx)zEMcw5Y7CETf|a zath!U9+9+bP603*d>CLssDwS78k!exlo*b%KAc#^$7;?62j>d~g0)6dxQD>UZu`)U z)OtJ2o#s5&>e0+*`xeA40rBw>2Z4H>h0)k*i*(W%GO#TCP~#rl(^saPs)8%CMi zf6a@Sui6^Cy#dmRKz`>0)wQvVFR_1ZL!-?;IC-RTg=?TO3^78MqglsFJAl^^aDldb(LWhw1qqYY&BWR%R z*oa15HGKGkM|k&$3nd%woapJ%I8|eZKo18;uig;07EZKh>q?a-hI%klD%sY`uhIAF z2TL;WNWse+P3y9BlA~;7f&F(w#YOXza!9$#{n{03)${q?iP>t5&e+=z*SJI|2*GhB z?{CJ@!MqarU?vmrqeIf!e30}6!8J*LjVtK9lt`|(_N&W$zB%;IHs*_*Suqs1Q!eO}(MlYoh z6p&%I=l{h#J~p*$)1Zh8NN4Wuu)V!u_a`@m)auL~r{=!V@U|#zMv4(jFBo>}ez%$9 zUOSAJMp@NP8%Vo^OQ5F>rNeV>2}b>xy*%&O+ese%9opGI2wuBnY!`w`3xgJ~v)2`y zNEpIM!LLjpD5mTwm1218z)EqrJy9PI1t}-adGIf0^M?T|-`H8jzrr z>+jowJ^xFL{PJ2^yPVUI_q62_8wzB20Hzr1oCQcFLvJO#XNR(CyXN_*&emp`3ik$Q zqG~Lg(({W{90${Kx1|b|I~HC{Z|w4Rf3GUuZQTSFbbAaH+}#kGMFePG2d1 zfUQ8#?D% zdeshq`0g*}z~Ya@R`gZ)du2;@{VVMQ>PSi;%dJ?GG1yD<=sx0apGTTHyS&bHC^{5m z3`(@KyIY^C0Bc~fQPB=Sw4w_7lyRjRL%koDq>7WZf9L>5ad5Nx0(L7pkzyibl@Rt* zcxkG$!`0`T<}x|J+c5O2!)|!3VzQ z!KaH0muD~S7fmIx>0I2+@nS@ZZQ!%#KdgfIDH9Nh7%q520|l~Jhsax+R?fJn#~va_ z)SV_P6IsV2W84Gx1s`lzT<3_%+xUJi9zr z`{Rz8ff_QoG?io%NS6RN$|`SIAF`^_zOZF+RQH`5Y-WoSGodzd*Y*94j$slbyNB(4 z+f&*4ib3enAD+Uwmk!PPw&OR!`Y4o`7aHnTtNRC1GQWgRB@4`X^)e+XVPEv>ed6-f z(1g#&D>;#?Z=i)wQ^$o*Tb$%xy&)AXBn={^MMQib5cKmWTaMqVTkNVE62&W+{o|o|FN-5ekbiAIhn>Y>qmLUL;UiQPOM)=%WqeY>kQz z`nu|zw7YdWzyfKPC}d1t{X~=ic!RvE_`pKKZm(+$Yjo0wzt6|%ov`gST0M~E|I){1 zxbonZH}9FJnA>q;bF+UOSzghOtQb-zCP?4hmxN>Q{}4!_u?!7Hn!n7uXWIysMg?{k zUR5jMzRMn8<3+}|aK(Ga6W=_48wb;a_*#O8Kn^qJ>Ud|m%YU~B>crBrtXh3<)nk{g z`B!YwczI?6fzzH0QG@;d>k69+3JMiN0nZ1nAawFR0CDV3SHOar??Gy5Ei!GSRiw$0 z&0G%k=k7?7_4Z$R;2qcAE-@p#c^Uy56bF|KXUn5`0&9Z;(tdlse&oaw`%B#;mOC?u4}aOru= z0$!t|qrku%P#i@BynxX1Ha4OOGCj6A%D)S-lp8s~q%%ATRG^~?06|RF`;%BQDSV<; zxuJF~D|&sh)_=!!pun*J>D-_z`!07~Nceb3vE(R_=+HUWC&eE*3Np^%{E$6a*z2mQ zSdkBBcEi~ZRm+VV8k^1fYE8p`9sL#zXrCJjwoF|r6v4uuE)l)$LbO~($+YolG+ija zN3GO=9*ZC2Qy3hX@ojDuP(6TLS$Fjepojh;OK&5A0!*p|{lw8@^{H|#i<)8zNx-%J z($Z$L#1M7un(NLj^EF6?znn#nT6(iRQ_tRV?RDAJhHz`^YQ3Ny;!LQz6PL#>4D70# zKXB`WlB3S&Nta2Gwyj*vI-0uApF32ov!Mf4Q$K(;W&X}!9K*oeccV(DXPqRrVkt`s zYU<0ogpF62OxmiJa%CLRxt?PO>!AwB#S-@R^pGx+Gsp1J=`y`1loE86L<6s4N^WjS zU^1d{}%JQdUMFpQURGsd*qa0yT1kzmRk7RzVcp=1-CP+Z?{4s_XouMkvE-GSy5K?yb8QtaQ;A+zu@2~2$QE_p-|L0$KVtF!MqMi)A26T*!#Ep&f z)mp#L5iiWzK~7rCVPR@~OgA?+g!J_(fCsqI;q3AJ!s53YHByr1IUH0c)ZmMpFLC9@ zUU!wdIxt`YT|9qzJ9db?GqrbqN=W%0%f&Y z8_a&lIaTsq-BbznbJTi&Mf5|fE58Xmtc4oeK<&q#VC>zh2@&Y=ygz7ISY0jU_k4(6 z0rCDjM)E}ya;@eCZPlkx92~O;9LQqi6U-H#`9jt^dxrXDd&6z+799p-KbSC!c*p1Q zvbH8h6q)EVYB%`B%838ip+40qQIh%*8)UxNIQ6naa9srnTLgR?YfRN*=)Tc_SXM#H z<;K{Lr6rRm0WJ96nD%-=ZScFccK<(04u^5(|@*ApOpL*BWtSN0c(D ztl5n>N!AXcHT6Ba{NsiVRe+nVVyoIudUQGb z?m-#rEtH(xr0mI*|Jy%n@mLFd`t5^i{0p3Uuo2*&QHGztNbn4?1eptSOgKBiUnxj55hP61)J=BO-S2G(+JSmnC1MrsF?H3dK$%{^)w#stk%WTTMyjtU@dY;Fpl9jq->8_@AOo@^X7*!}(f4TPGJ zBA^l*?uM957u_F8F(pYWk21{ZasZy*7+I7Nkur%tm z(s6w)d@Wk0O=sJz99|pmd8(~~gM+1NcV?{?2h*uXx9eAr0My8A(m78*1R>%1VlPdv zC-%0N^--TMd$wY8bHCh#&6TU)j7BwIe7=|4o}myB5Zqh% zDk^Ho$j%4c7+7eNHbq|>sm={BJGF7j%QJIv%^SIR8E|54fi+8HG1|46G;-fD(&L-| zx7bBZt9yjqEF2lt$~1gT8#IB^HDq`15$UZqmZWWxFc~V5CM*%|WILYOiXi!iypzS$d#grd0Q{AWOi@cE zq7tWhypN4s(So6DNuemvoTePUTumZ*_2=sQH&M-ZlF+L1!<;N}bo+EqBdUpv;>#H$ zWgZ{)HGdoM8yav=c*S-=L4A1CCt^vnx5}XClcTs=tyOEn^}J zcZz=SX9MJ?_n+Bl`L>u~eP8xMhh?qh&hE#JKR7HFUbpv0ir8sbwYoq3 z_Vax)Fbm$%mQ51|{uCdRIkRO_saK3I@9wM22ZS3T2P2R78`94wZc3#p|GwchX=-YM z;^sYYUjlZ$lYm2XU14W52D`p>?3vb=Q!hD+i;71knP!My z8eaWf4NXYE1zvz%!>V=?5)wa_!icZ|L2<4$Ixf!7;UC-2J;nq#Rd8c$?(YjdT@_V< z0}-#bW~GU-Op}r_Zn?Vc#bE_BJLW5fGT@^sI+;q8%955N*oaHaQzQCc$Hue zkWki83EU?yRkp9#hmfAH7*?S`oMfJ`i(UBd?QMO_wT8#eYwepmH!Y93vbBuQ1Dn`o z$pVyc(H#Lwc^(DF9Kh{Dhy*3WBaeZCih{*ywG0$vd`lcxUUi(0YsaZu9DBf@nFUPE zQJpx0cPvX8RQByD{y?O;JWgOd)=o+V(&CcDfURK9TUfm27QT_w!|G{pIVRT~>u$}a zUU@X}VhzQ2P(8A;nk+(82U`nOVZa&PRh+&%1^BfB#Q(^`_K|4kEay8{A7wAh>UntI zrAIhCTpvd?8B>JzDBkSZIM|UfGfN^17zKonB_V9>d)V8tBEaZw>7HL#q_8k>Frc8K z4$U2yNDd6B#`SdTujGZa`$+veL$P9%T|Krqk-TQwWHuVp*6)ft=5r++=<9+Pa7q>) z#g%?-CQJQIAf4T%c%-b!j`-uFDer~N)Df&VR6%cXpAV~3AB&z5VO68cc(XKdO)GX) z+DZ{4?{(0Ed|3h!d~|2ey-6`&Z1;9aMgQwDiz#h~!_}^T`+X^b!s$DRIhtPSwGcw%&WH(kaqo7 zp(xP;D5=p-xh!IApj+9dM{vXsrBbef#$?nAnf<6`?rTPE|D8)y6s+ z8XA9}JMKdWp5f+7HH|K3Evii?`1+QHi{*X(6(ehn9k3tfYq3Y*vlmu@h&1+k`L?HM zx)7kX`^@LsU2CyapjyQPb-Bkl_-zm25g5s`g;0eOTH$5&_UI zSN;RaWK_Ot3y)ffAGj(3 zjqt@cmW+WR4D!#Tzs78?01LAl*g_A35>2R#Gh8apAd%PUWOMuP%M?61Wn@6evb(Q` z*=4ZqE%9`^*_>rLg-07cdvTf8%<#Th+&j?NVAA5YAM=s{)IMPn1P;lO3Qo1%3y81m zzU;`j#TxJV7B@zqkJ>$$F?&gcD^f0-fINDF>Wuj)yW`4b?^wgCPS^M1)ho;l#o%4w z$(RVN9jB_?=FIajJW^pU?)6v_xtumwum)q5;@Q9m+)|Gjz5iD`4*AS=n_7x941qma zRAyd>UH3R1EC1!4)fOgprVm3bZV-~*feCagSyb>^yE`epz+C23o{3aYU^R`+Od~zQ z4N`A&Ir?UazB!?|z58ngUjXa5MIN=tXd(POnmb3}uj|zEI*ztn6EWYkOOg#^>T|PG zYN5r=RSqct(vYd^N#H$3 zfsSF~Yy;;dwDBN7Drwre`~OIL>!2#THEx(t5T&FWmF`Az6AFSL-QC^Ypa@8J#}<%o zkZzhksstAWzc$wEh9G8`sLM=v z>ra4UJsxc(@G%cTpw0#k2dw1f##(p%1*^Y;Fz;8uL^)lhr_?k7EN5L`-q)(mE(c@5 z)$MzanY$E1WZ62SPtB%&R`4+`jcZg}wSUpY$8xwJt1=zQl*|sSpR);k{SNZycQl27 z$KRyS?*i2Y9>6`sX4i&nF^hz@Td{T#zwiQWJsomXWSlVt425!i`29&ihKXSwuVN#l zEA5uv7vgI%{aiF}oXz2qRXmL7({W(<{z5bItdS@*=8HbR`9DKRSC_U{%D^k%!N zV@6Oi@6t9rEF-v6Q&ExW*mI7z(cIjmhEFmGB6lW@Wrm0U-n2Dgz{5Y?9)0vRV>Z40 z%94C_htb*7qtD(45)IfLzSj5j478|B79tgnST0{&gRrUJJ5Cmj+1c698zRAxIp_9b zZG5W=T|{JoKGM)fQdkhWD zzxLu>X*RW~vde^Hwwdb&5o3y3YA>_0vS;f(g({1|7ySA{^e9$nxeAej@aO<|jIFL8 zi1n?pknt(!e|P7Z`qIVl4Ctdc{XhE$MZCOcMQDa3!|<8qORda-g03Hs)~_sfrHQ7B z{720myDuPG1(S=X-_JdGd`>yz>Ejf!@l8|;-?J5}6tsQm8hGY=3}~U6lLSwjpIlpBfKfVw3(lU%WXvKb4zbUEBX!6w1*~TpJyi76z@w_#m71 z2dE9z_XZ7nCXRfwB>nlo6^3n{jBB+zr?xaVf&?U5oG9|_zD`*f{^)lbCUCYm-ncvi z>o{97+rvW86N6;4ZPoDa{#^8%0ECGRDV5?)mtJjg4DzqmxmxtPvUg-}-{K=RU3hrZ z-6~S$(lJ5|4$-pU^x3G-mw!86+t4=`@{bh{#xAF$lxUmt2&Mtw;qRu($i+Xgh)1X# z1t{Z}m){~I{v?9M1$dT$9AIOygEuu#@XGafl?B(QdgO|}F=dV-ZIXTKq5M@Hgv7ig zn=QSxMeh3qw?cpOP>t^vM`Wx298?7SWx2;ZqwjA*)Ps(j=nUpW0Ko9z7xrCvGXtTF zf&fm>(lFh)$wQpwg_yjY5$`w@MKZf0%|tGnOO_+4UMu&EBi5KuGj-wVlLf8Cq=x}n zF98Q0U`o?AG>L|fk53=R(sP{oPOb4e(lmn02akb)A>pnjs@CJ|yWOhj%-oDzz8)Zq zdH1UBD3%xl{RQiH=#|l3`>$sSWtZ#8#=1XcEmiMi5{J`iwdydrxw&zeG#-t`s;(n9 zcH?>cB9SnDIOauw$@~iF*Z??vX+FHq=zP9An{L~3AJQLBzq)$(Su)`S2FrQZSThK{ zsNy552k8m7d=I9DswE*ZdB3KHBQ!)r7?Xsi4K6#z`{S7g%3!{^^4UnBzXIpYvE9XY z6*)|p?^SRNm6TcnUm7>_f4KlGstJ<1GC$l-40w3?74u}Q6Ij%Z?7LhizUh12&(g%v zDW*(JC^ZQPnlIO&4J0yV$+g3+Z0Ht&Cpw-?#HNiBB1cC*2P3SG%4Ix-+@HMa>c`jN zEb&RD;%633<2OvFTds?(qB)guFt&)8g28w%iSGp3?xsq321M2 zg(f;IcDIUVkFq*!0~^~y5Tlr=USoi^#iEmCf7jq8;PeU@{f(Ql06zAVpLjic4gtT# zGw>}uv7M7S?mn1N(}eZ)wky#>xt2u(6NZRJ>qNBg1?)rILlYdX_n$89jfb|_x>B&Q zC07&I2-FUNXHyv>vOlBhFSGZ_ztMM#($dqCE@x?<{~76-B5j`V6!ILoa;rLCZVhcj1AJpW1fBFm3;L1o*EjMB8Tv6~UuO z#t>Yu-hl7B!hAr+WJDWOF|Iq4Onb$wxE~XrlL;ouSgI|POk?bv zoQ(cN-Me|ZR~X-rB|NkQYPyPUFJy87EuX?(aCkVy9~)Xax&#i4_wNO$!T@uQZ5@O_!#X*;St!PR-#JFWX{n#|i*6?rtE zhQ~~2gq=;{Z>8kxE=1^hxW`yU5w@}gO3oWdevDS1n@;V{sU(-VpqBV(bB%}sIFbXm zib1G23`tzXXUnf4RG-NPkQ~pVub%f5p(FY6?s+PRrgg>ZbM@(6JKp>V@{9OU)zy;tGtB%T02##`LShh9Ee zC4WWF7x*HvRIWf&J@l*B3p|#ssRK-E?`W>}bzMM?1WGp^KBG?^m$RL8IL9LlCa9SA zY^BR6*%FW=5)492!6yI*y?@7kf#XIBnr^_kr&NzP7}-8&k&H36g)13eAZ|+vG5E;w zz>r!Q7fl)7;Dcm+?ed;g7!b%>fgrZhYl9D1p@?;6_kg($I7U!?GX&m^OKTs4Q9+EI zi-U(T2m7-QpG@{;;&^&}jRmn!;eD8F#FB#Ys{NDD2sf69kfbaW^*G;GSTz!S+gyebyk|N2b- zq%uD2>edpjxTM5vviN1Se_`wD6iP>H=sz>$vb6YWG>sDX2Bz4)xTQ&5e=F%OOheX z$8&mp6>Y)VvZIZwQ3oN-VvhH~x-FRC8O48Iq_S9# z6L4YOK~$+B*%p*-z;85ahXqY;EC^m=*Um3*fHnkmr`<1qo1m$=i8aN@Nc~UOKSoAw z>i>XX#~K7-)Ws!E0Mfxc*?tLn5(y0kr-%#*4M%kNE+gUWD)$&xIr$8AaRw1BEd-wy zhy)Rnd6w(HL{Qx~|Bz`cV@+5Ma>iq(Mb9NF7zyR~74g1>v9$P1rarVP)oUD>LZFX5yEtDuI*qAmQ`D*{fII;jV6BYa1KEv9MUZf3DS+7Jl!qojJ+W8t<(0PcLcoG z<8P;sz(x4?=aRsd0<&-Jp)TG-JsMCX+Z_oi02(>l30WbRR&#~;;93lcfvktY} zuiWWo!v8%1ro(DA`tV@tbmYy&uVjcLH=qG^4^N#Z8-Jq4_AS=mkc7hT3BDwRlA{W^ zNf2i&5L+Gw(|lhjy`Lhed7x#^`RhPF6SeT>2c5rc!89WAkX8OgH&NB;-i?ziZ$(z3@)2 z4f$wwMNjs9Cj}VL{avVxZMjMSqYHcBJPktI!EwI{td9n-cJKpHePg7LW}r0NCoa0@ zhy23ePyW);Ewp$CEhm(GSFEjdBOyc)t}axzv$scT>+cV-P%c_oJ(L;P{8Uj{iJ&)q z=#BRnDu)Rd&!FOF!@3fwY`Jr6?yn)`XWAuY^I%lW8~FobpE|pGKzPbakCr=Tx`G1r zxkq0KK3CsVU^i7@4QR`ppbs$U9wg7W(R%TJmPdyr+B2XLWGjC_h8xr@|Mu>Bsh=5<2?vi3nmgeE-tuWXTJwI6JxEz7N zujs#jQZ)4hvD|6Yi(Z8m!!NXoLkfQ=&m`P}ho4`uETVo1*!#c4C&y^JJ4Rk;@J9BV zDoGJ?go2*t%4E*TNIh3#Rg_VkCvu~eNp8Nu?nL2m0R>*~HZk_wH@B~V7q`8F0eC1> z9Jw;xlam2Ie{y!qrv0#a_`RC_XsO}XMjx-vb3~Kt4Z}ss{tSK{$_>Cxj*~q)kP~h( zmEV2M)e76nY_=HjgKjacfa}cS=KSBA36$rR(-7$Rd~-w9{zd%PfJM{m!CAmkp^_p5 z0X9EvTM)*|hPTcrsHyQJVL$fnvU?)!9UV7DvwPrv5TZUNzIr%YfSC&ZbA0A+qk5eK z1F!&Vi|Jfke%B+QBdq{QxN|=(0Cf`?{UgymvU;2<_~ogo8X-p-vV7!x9!~ zD_?5+;&0ad(;0j4UBdCz*bcP8Z?;JUuXROH5^(QgXJs2cdT@YS!$93bxN~&$GjwaP z6AU`wEG;b|ME4b9#OHvOwcZ2zQ>d4~7p$&Ocvg!}OdyE?Hd#+>XWm~q80%hY{dao+ zo71Nxlqn6Jq24Xo_PWm*Ta%SK1pW;aQ;r*g2N zz9+bV&qK)Js|S{powIYM$E2_Zr>*FT0hjJf?)BL&fcxHSc?mSS9C0B(K_^4{?oyHh z`f~xtMgG^Wlk>Rj>2^16o>gGAoZSO%-sF6>rFu<)%J0~lx3-H&a-KACEUGGKNJc-o zq=?5nU%~yffpWDW*zqg=|8_99IZKRZaGaK7N%Zjy_NNo(CJ&d62|ep;Yi5%z94}u+ z(#kgx7>c9iIpjPixq&@_`anjrtbi3TO0hyP7&VkXtmrdqHAzTIGiufzJ({JUlHO)| zaNVmH7Z;l?6{CRA=AqB;K7a?TZC9+r{j{E3tKTC+MLMgYY+m>lD$R=wQWoJlJG*m* zsMX`DD{8v?`}++V!3=r#0IbJRGsbCn(2hJvRe$>5zHNJ{iGToS_#yI#;dDl4XQzp~ zRZpp)h0o`cbx7vNoDeuh?fW1xUhj|3!LS|f`-fU))a!C`dwcswF=?M-E;cUd&~{1keZ_F{Po^q z3c;f*PT&W2%tNV=zFS{b#iP##Vxb68Lb?zG!*w1IkQp2-xm)}kpj*|?kOUS3&6mB5 zqv6;%R^IRZu`p-Uc+jD1a8g4ugmu0<|09IJsMnzcBA2`ct z>$bkp=mFKz)0=)SybWn_Y_Jt|8^sjhD*2CqJUcLXZF0Y}vnziDtN=5sx{3PkxsS9J zBO6;qT^&!6>I#Euq40l@n!pOTJ_k6Wx5bg^>5GmR**57K0QJa{m0X1d>%ulJN5F*V zmeZ9CID!GcOIG{aJfQt~sZ_=Hn1g*br+0dFGhQwtSgO-FoEV!62fEtFp$t5%_}LbV z7A55MXL$c0cCZJ6m?L)g{=baY`J_s*U~(LlmNxiig6vkF9ZwA?HS{Azov z>nqZ8ZVhc@0aYAt*YoSv$1?0c*W~ntd7fpEg=YK^l_LG$L3b0*&vAN@lMvCt zZ%o%WOYr8c=NqWtsAEz5rdSA=;a2w6fAy}u1IH*h6n~e_Wnc9 z`sNID^W+kE;%9@7{EQ75*%MWq0%UUD$A`R3zcjp zGU8yq(&kPaj!ei$8eG&z|COuLD*)gC;Ws4{6B>)GECM0}$IbwX1-_4;0+p2AdN&i9 zpifwAHU$9fs9t^dyRIvX#As#GhEKvK_Bpvy<4nQtk?2ok062gDdvUSo7%*i=4=PeO&?>^nWq+HNWcUVIIw+!k1oIm5XkVYyT6GhqQwUBZz zBpEY-sEz`&mXwb(C9S$&R>TRgsNTiK#+Dj6)J|fx*-y2J0r=|7xUPLNBErb^wPKeI=!|+4QGuV;_(mU^us2st zRcO!5Dp8)@nsSUJOS4-}JuQ+%*R1tAvHSOdj}O1}J%OS=Td2nt9dJpX+`sDz!OH|2 zs%@R}|5*lKjxVlckpQME01t})IA-M-SqRxlpeE)2jv3#w2P5HE--Ugcq5LPgsjvU{ z8{04mv41pj|J*<#4}D~h!jX!wUb}+_LA>@R_x1zhFbvq1a^#MCESF-xGjvD(0}yr#SyUPeSCrk!hal@b%-q)NJFgF zd@kQplx{k{YXi-B_vcC6Bw!OO=9)jUPyq%mW2iMS3d~D$|^SgxvCEj|F$Sh z#ZDSHv+pncgX-$}C{iiIDl^=EIdjG+qgzI^($TriJb5!fl$K6{;(LrKq)QGSJ*xO` zx5V;qw-i5aMhUcvfXxNK-;nmGK=H9Rl0-jav`Y~XUcHvoz!7UV;sn3?*qQwSG+fxc zPye~7_XunU-@)fO?lt||N4x0qy#ol)|#o+CCuk^n9FQApUJU7j|6t;pIgkK zVPTZF?e9QZlFo(=MAhZy#4F-w<_&-RuVJF=?Ih19F|hkuSIXb1*hhJ&Gi}z-?vHJ=-NtL)4Yd z7;e%{ytkg;8~3FJ!?cI_nhRMjAU$Hd^BzjI35+U*Ej(KY3Q)Vnz2c0MaimbQe{*7M zE4wnSAEt(zvxJf20c{_kcsO^t#r3tsfRF71?8i1E_5$Q@gp`s19s$+|pgRDbmiOtO zu)&qrdHqg127CBsVZsG%3XHM>ozO?}}7baT5x=*n#78Jvst@|VQKEn@2tKM`{K zgE?owe(Cu-BIKQIeN`3`7P8q$zIn>So^om|HQFRC8547s#_txln5*I03`*5*Dp z^-7^st5L*Ejpq~5)KlT2NzlC|3b@qN7xqj}K0VsxwzGFpI3nlsNE4`WQ7TkB z-ymXsCR)lnYkpZA@B%Ib;le0y202<5NA#R^X5w91wr{4cgg zOJ1l|3tpV8`a{a=T94XZ3J7S4f$_w>IQkUU z(NyKFcQmdl1TA7S5%GkRB|XT*c;7(_DzB3ClL`;R)4CK9fkEiFU-`;6Yucx#WO`3h zN(yoCGu+dsmVJS^qgD0EE-y9dFz7-uyru%&=dF3DyS%vi0F$4SsG_E(dzg8PCI9gr zx9H5HJU%O({foXYO?Q;Vt245ID6QfTuXlexvVDqCzZU}?O=L9mu1*xb21#s&5&k{3 zHjC75fJ?kPQ80AD(IQsFg-mFnR2IlMz^GmG#jqz58_$K@*B2H%n$G`v@cx9RRIC0Y zXbQxh<7NQ`g45ody7dUa!8shKFaROybOV1qncEu)v>9=X@?SE^(K@E5gIHSc1FN=% zV|%6&fN+`BPT1G^2J8X|$9>!Kpz^MChp7M?pvfX-AT4`cS67#RW4}ML{+QTE{MI%) z3n;1AKgD*3yVV}?gPO6@^vK8O!7xX|=X&tSe7*sH<=Ns8j zx4++xTA*D4u*7c>!LMJv5&{H?Y=F|MPqFo}FMA86rZ>98Mny>+d`}4@;zd=Rrh(4^ zq#%>Qw60_h)8LSZOuJ0craKi`ke6pBKb>5)m2@-URjS?iS(016^1VHT&WIskY3(a$!4gZJA+7WEPzXFre|m~eu38`TN^LXwtPe&K?JHU->dJm z&np>Ql~`nB8C(BizFLLSWz!s2L0Z2Y)0qunjZkw~n_7W$GAP>eSH>X zeqqMXpT~&*dS^&mt5~L2W>2XuBTBO~)|05=V&IhHW0N#i+kWfA!NCEj1v>g>c|TNx`&5Zk_Cde( zVlwsw^%0iQ5z>YGxg!#|P9wu^cPs~J=tE4=l*1YPBeRZMk6C}q-u#nUKr=%b7Y$`vFR9S|AC9&0 zp39!DF8jM{G(a&X5IWpwykvL|;OF8c43zqT><^KI{F{b=?lF>5-JZtd^9A5qR#sMU zFgs+w?r1Uxf(d*t2RJrznE#6XVp_ZG01XstE!!Y+;e==o$k5Cd-irHAKeoxV13k_RC6 z7hcy@tm_G-{zN*-BQkA}L`em zn89R2_yVAOeC>R%rLC1OU(Ifht0=SdqlJm zp3l`R-1>U!%`f1i{}_Cte?bnK%=ZJE;RVWzr_O?Qe`L`~1p2|`i-&Jck*OSf0M7RQ ze){S80lTYR=avV1GGzwUA4l_xRd-g*ciw23nHT23TPZZ*PQ=23-lt_r=|%DTf!qY> z76af2>BC$OgM>Ok$p!9;P---kVl5FSNB%#|d)!u3;LTC1Q@?5zBAu`PVb=~e!?lZzq^TjxWfENYEz@`j# zu6^si#wkBq@#N3QheY%e=#>Y`gTNGDFgh;mc>Lb&FVa(f2H4GkepB!XaTc!&e}G%F zXwEHnn8l{_*_WFA*37(Lk(+*J$I9|vjT8wluutJJv5bw@a?NCxKT)yEcaMlk<@KY7 zS*?AczAMpMPraF7v>Nz3Jd2^o6GFUqLa_#&8kJO$-SGV3i0+)|GZHnj@$Qv6g|Fkk{`fjQHrzjHq4-nj zsmsCag~qNg?me2d7isSBZ7^>uqsozPlUMkMNGRoj#3!ls!TQkcGdf>)%_^V!_@m9i zhk>)Eu-<&LM^onCqu6eH{+WNtxf4K1bOKVUT1(IpOSRVb%&Xj8f72pGT!vkRssdAW zBl*pdSko(~HKl7Go*0%qAKBjKa?=Uw+j(Pjq_|`H!4K6pe=P0*(7-9Hy~DAEDF?a{ zqCTW>qx4WAVoFKFQT|u{;@PjMxR`pZU(|bI04VN5FjiNUz~RA$*a+F;&TKY6h=xv* zW{+)6{s@-G)$N`7U5`Tq$$}KrbqPriNDD{=+6S_$I^mO~vae<8jp%-pMbifC$-U4K=(`&i z`~71dzMqzsHbAlk2gXrC%f4^5OB|=qng04TPKQEYiLGw&R?W0HV}}zbmxYvR*Jkj$ zit02Sa+p;U4MTasme}>k^C9g)PsNVsvIbGHV7`?nlN#G92=1zqzp;(o;lxIY*|YoP zzDEIly#M6_yn!&I1EJAWu~z%%X8y^o)gGv?JR%Pv*x|2?4@tO-HA^ynlJj}JM=Kt@ zGlq{O|DaGOCvv{O(4Xnu_;}{S4PKcuV^F*SZNAsdqBLLx?kX2m`i`J;m+DFZi2rds z0*!Z03I@)KSAQK30|H-S%K1Hlsub=qk9W-Y_+5Ro`ToH{#Q^8#eUC0)rTzeV2riv; z?eGC@k`Q^3|64ZS(Mra>`(v2s?j*`uv#W=JrRjj}fGXSlK*5h6KYr)D(*&G}_Q%`M zds-*-43GKFe>;hw!Np1{HqaRbt#PE&HW$X)K_UP|6h<9?cD^p4QRRy(KRy765CAWr zyR;b@Zu>?C!(uu=(7#y_Tj(aKAGh+T;3YJvGwx_}Z~|6@dGD>grPvQC$z0Kvt~^Q)*INC#Xg9jH(yDn2HxuilwmBKoM;eFZY5u_{aC zTQ6@l?556EC#Ph{S%DuBkpZ|nU z#K6NEgh4w>O0$z^rwMpR=;ATA|5}V^CY{4n5{z=(VmTRaqUnhQTU%HvPdxwY3p1rw zbkg}6N?x&%2|~v@ud@aXo6(qMb?hcWAzfQQ6b;S`B33lH71p}l<3)a{6NzxRc~s{) zx*0l#vHocygJ?L}v%pJkK3G<>4m0u1xQ)z{8{6%IwB^9`cSHCH|IboG8HDv|fc8fj zbjsMng>NW-4(##ruW zysCh#ac1$dEfVIN?F&m$%N*%n!gIV{`A0jAPLLa_{Z&5}SCSaD@p7+w>N;Bf*T3 zV~d#`)^o*Ks2a-6L{zoa_Oe?xWjIU{x_+3@crxE1G(X<%2RMxDMtey`d_mMjMur3!F|L_?VxV7roOlS56hD8>P=M$D1SIZqF>S$r4%%6qT3lSHe8N=ecyn%T zv)F>z2G~usDuZ4DciHWTv(=Uunfm%bsGB+mK!W1wBY#)GF3+odHH9DjE}mu%&$Owiz{z3v|+h9YItRgTj(5VwcL#6p-m0)uM% z&}8^_k{LP~pXd!A!&o;cKQ1UaAXVd4~#6cj*3WMX>F_#~aL2Jn)+j8B?dj>_#uZ2@pK zoFN$5>Z{Z7iyZE^Z`%z(P}X*L`&UHBPkZ{6!Ax3{QX1*H8%${g0l?| zA+lxt>%FvI-VPaW*Fa`Z8pQK+ZG&r;h&Jq|(WZpR&7-c0&kg$%@f3xxk8ibC)ap5oZO-tEPZcKd_AuTiZbzt zyDtE2>VpFN8;b0i21g8SzMr}3xqcd>9Z!>C1EPQ#EFIvG{kAo^HC5OC>D_`@4Dn7o;{?dooz%!ux-n_pH!eP-!iiCK{k`if^xqO0< za0hBWzSJ_5VcIq-CZ*rA*|V@mv?~->|ET31kcO}5*^bP@@a=0Irp z(8ksdq0k`zE3Fg#rs%aL73n1`|8R5{4A`&H;aeSFn%-z64l-8#cc`8pF6H^kcx{Ds z@R_K*2Aj=F#RzD3&ASp07V?SoAmls$_X^DQrt^5@Ct>2)H`#AbdI=)pjf`j6l-Tkx zl_;P37K#3Gq&xld+DN5qP)rn&Hfm=;93d>^rD_RHc+>pd78V+rkU>jK*63|*p!84M zyW394gAp(3i~6G#c`PifWcCwm5>k?h4Ux$r*4xGg#&*Xo7WYA_~jUBnq(@?8s z%Z+KS?(PWKen96;Ffd#nOE}_MQCgKBuTpD{a>`3oAl4`dRm5S!U`lUM=#~ z^f}*r_03g%i3$gsxi5x?cRYg2E~Z(sBMmn?3#*1w>nkW6mlqfuQoUkG_9crl9?*Kq z5)*AM#2uu#9I((_v{Wj0($`lYo}Fn`KlUgYdfPlL5(uE7l>=jlTlJKnq>K{lE8%AA zyR;u(8w2qbO}Z^4KvA=zqecs7{4vqsu6u}4<0oirn6b&BRSdYi@|hj6-cC+VKLAxz ztH~|+_wV1THQ|FlHPFI|->yW)7x8*svW5{2j~Z@%jSMH6^fmcS%}$Titb-xE+7U3h zK}JS4n08X)Aw+*IS;j6qLX*61&9OlpWBe4f_C`P4Q}XG?O<0ZRV!eraURiu+15d*0>B&r0Ar?_=qteWASr`)g4z`D0*_Az=bR^jQn^m6 zNwsvj4p`D;%E&o~Zk$<88#y0S+bpygzQm-J|J%o=ts?}(tx=!~DH!-kga-BxRzjwW zGLti5E&_nD?Z4N{IZbPH!u4AddjXr>&h)$D4GRfb)tT`o`zq8qK;fF)SxtoIY&lu3 zHuXba8{A(#K<7}8{>-Qc0Egg@98dqcgD{%?OLI_feGT$F=ST$gIVg^DRBI#fj;>NB z3*`Vq`P9gx6^}{l_Z!TkT>~=aczflC{Fs7AnGFv6F zJvyD-E7qt*0V=6*i+D#Tt=E!RXM0HMjFw~N%)sDuqkBm6^2%czyeTetBT<7l(b1!6 zyuL5tsO~tTrhAYjW7Nvsc)=c)YZRP?Lx&?AEedbZeDChf>$U&H5&u?P?o9YOKD|VG z%4EJGtZ(bYcR$_L3rCFD58B-LXN;H*6M-`Gf{5>#1kGO^ocBA*A>aKB@L!6_nZL;z z^E@II#b`zn#W0W3VXmLQyRZ8bLlN-A|9D5>Hp{hY+}6c)GyTGQ>$I9Uj_yE8NeNY6 zo^a%TjLMtW?vkL7kMylAa`t2Tz+1K=S6FVidaL=3<2<9Aw;Psi6TZNH_+A&{+n9XS zv5+VN#7?9A{^^scO3GIeZs}nGe0z1JN0wcsoI{`MjFN-oJ(wXMH%!dvApxf~cuYt7 zpFe()XL#dYmWdeNueQ2X(lv53nQ;wce9D6Pdxw{()jw3Sr$S{uQ^1gcKJ(2eW)Ec~ zd4c70-1~P}+!mJ1l<8}*sCKhS?%5yF65Nyu{e|q|>2n+Euk0S$5l!w`6FEL_eG3lm zT3ifQ8Co_P%wJVg->|B!&>zRU*Jug(u32mE%=8}lp$3)wG{YPrF(w9NX|-0k@41i# z1+HrJ1_0gxyU8C`0fDgCw$9G?oA*tA+MX9L$;dv(#!~ojS+e@<)5gXYx{x;7b8O>{ z=Ssh!-Ny3-QMa?RC=WFygnXBRQ;nt{JC8LAOp|&W7!EK?X3Ai894lAm&dcJ4QhXzO8ow^j$b{W+FP9+Wzrx6%{P&NBKzGtb@NiWtfON&(UD>sXho*}9-1g!AS7|Q zzE;s-@<%@3C=oG)Q)}>I3W~`1?s_5PCAdceN9}!!Ypuz>aj>)q+M(j5@_0ql5o}ys z|1wv5FaQxwRv^T#EkBm<(XB~Itvy=V->G3 za3GN~2lXL%Z<(>a3^LH>ww$8rii(=tk2$AJ)2;X}H8r!F#CR`*L5{#!hnKxx&g58g zl`mfW^t&FJa?7LFWC35-HdUTIOxcc6T06wv&pTO6hb-S~1uW^Y&doVnNuAN-I$F*D zijo6SDAv?%wAy2JFR-zQK90U2)2ergeT~R55zfTK1m;0l@H8V$<-)HZ`vTD-soya8 zBhx={?F$sHYBa3Fu%GLEE7BKnSzlQZ zR#L*LYI%sxk#gXcP35q)Z^1j59sOyKxO6x!C{>qd`cfv1FSZDK_{b;i1K6&hUOX9j z@}pqfZ4zv0Xz4#S7DFSOJ=l>ms6l6LxY)6l>`-}yqLr3j`4yM`kEpaXsz)G6c6`;K0WLht4h(=^Til6{-}=UAF4QCMSvYMBf_E|Z*7^0UCPoQ(i=q`Gx{$EgXnWOpd!<`)p~l#J|9!0YmxZXzQU6;*(_ zVx>^X=Jav``+6fe=*=pPKVm!Waf8&M-i-m(E9cMl@O*}w7`GKxPx0{ZF5MDWmhO;? z)oVY4CaB!~4#ZzDCh^@l)>timrj*2C+I=iC0so0|d&;`TM$1rTlLdx%!b$&JQ6j1#lr|kG7eHJ51>7ntH8h0RjEKJ zL;tAx({Z||)j~~Z-Q|6$xLi$@m1YSpFMoDU zD$xB+&A1~ zJhW&A)P?sYspFOEhEq0k!?()`@>L$V96{W8L})L3ZUZHuUY`bX!_7vE*``!9AW*)~ z=DJ=z6T#VZJC=WwZr7WZ>s~sglBM6-J2>ebwP$-l)<1wp!5?9DeUp{Y+ud4zoc;Sj z7DtIuix=y{zkYpRs8SkbsLP^W69_i)?Wqc;u4VyV_f3K2;f$Wn(RcaD=F@hQ*Gx)f z3ek}?ksB_@f*`H(tG~Y>;>}qv_wpE3^Q`MJts_2Trgth{9P^HK$^y5wQA=or!_($%p`RGLcJ;BS9ueuLvB8lkMv>J7+K@sUI^RuudK6@P8 z0aF&*Yok*&E9gtL1=I#K0#}@@F{PAH2!{x>#t&9c46>obpbICP1JZ$C96~8AACL%1 zBWCMWvlXV%)ObC%7=%O)wa{K<{jx=*JCYAE_TDs7-^TyJ3KurA?*xUX@54gDeO@Vv z324?5?z)}CLuib0QYxuvt$RyVRbuSWz}%i=sDJTqGrLA ze7}Pa!12#_$mpZRJH)e7hUc}=je-^y(SXVvUMEVwUp+EON;e$X`1n0%skQmK^Z8SU zpdj`V6-_YpQc%EZh{MrJt7QJ8`!(Nc&?G}#%+S^}2yk2n&KIp_D}p}TUgUaAe#{Nn zRORp;j5%}%#5B|7cj7&rZwAy&-ri_ZW1i_&9asF zY?`2lWZ7-Zp$RN3N}t8XEkIo((9iRg>G5v=YDc7m?3`b?)0K%;z1`kkCW2yhU9N74 zSuHn1fj7jaMi2m56ciLHehy;Zc6WBa9;h^Dz8c8Nc`p!bw0|sec^xNQqM#qO!5DfD zIxU3?Ep>ptUXTmCS(zZ@wpxk+IkJD>+L&>fD?zElcVmmk?f1?}T@Ls=0f%C3ds`To zrU2L{Q*2z^X>dbA5+9!i%z>!6hoxzOO-8W!oWz%DsFHujb(>ouUR7uJm|GdoTt4Ag zZw7|O#_izPPSvoa$x-O$jUe0L&c?^okKDPpbHgY@Sa08^#8u=<5?U&tJx?UhDZjQt z&^`JluF_jA^FaBO{#y3ar%$O*`s?pGXgwGynYihRi*vIZ)fD;sBbz=gM-62^4wp*Cq0Z?)mW(wm0A9|qqvg!b7RrYGUq{t6 zG|G*8bQB>->L!E7pOs3KGB!UEDP(I z1{eW7Zq~q!a_cVI->pjWorOwSM>XPBoTghCGEM!=DatI26d$a30Ka=4r%- zEHpx^3J-vbB`{MbF4e(Ui!C!@Gn+tu%!XLJK~LsA8{bh-t2O7>=C8@|%yW5ErPYGW z3ib8t;y$DKgu?0Z+7G5+`Wv7-R2W9il@tS^Qmb|U5aDd+>ew|!{Z8;0#G5c`G<;EL zS&Cy(4B7svjm@t~T;){b!c7|@s^D;+#s^_Q?UnYrbRTB14T13T&F_7||20s-y0IYT zz_gF&oft{Y&1;CI_8XL(9$RmjCzL67$zP^=_}5KncIGZoN(4-bnAwEv)rowgtl26jTrRTqJ1$f z)J$KFd@rL{_+2GG%9tFEz@#r+#&bWNj%I8*UiQjQl`I1vFHYav`mi>~ZWM>2gdund znZ_+qGNDIT10a$ww-}n!f`vs4&kI%??LdKRa6&IlwAabxTQCy+u2SSyr9R#|`o__~ zU~n%>p}=gmW2Od3oe_>rs&ebEML_v+eHKRF&Y)WQ8uU_NKOMX?hXC5>Cg9~$hc`Ek z)rok+sg$ToNlS;xb6COffZe+2UH7MYdl)s~47X2C;`;GA`o8AlOF_mUXTRyG-=C>Z z{r0Vwk$&=VWMSy(8(Yw}&pXR+?KwM9s+{g%{INnGhHkUeVC}-BTxPl&W87OQeRnO+ z)I65s6=j^9-6Naa1v)abG5JEH_GcXkGHK$8eQai0S|ng;<~f-g-oLedy1uFMt-*&& zy2B5U0y;VWc(oa@gxQa{n%0iM~IMKKWhY7R;&w_~*niDaTZO#^<%a zW}GZk8a#Vu4JNJvp>AM6L082MObas99XDaeqw50GIdNqeNa~xH1so=;Tg+M}YjGoW zxL!268};=>LnMVtrQdXiEUXqQdj3>V(%^`#f|P0c!ZzqGh9+I?9r@Y6ZB+hk;QGMn%V9o zhAaLFgeF|o&DB-{P+`%JfhOu9I+-`OtVqmUt}E$RDiE^b?DwzHv@K^+zQWr}vbRUO zlVj*wh2~BrVn~iMsO3`?;!mGCo8m7VOfC$4fUu-~nZ*3PCoe=JwH^^2^33~Ldjl~g zy}g>0%hz4=n1(KL~(vV%sOR-=pm*RM}N=+$)T z^2+tlt2VP(0zAB7AdE5}t9#uQ>iO~|Vc(YSFd)3aEtxGuDGwy1e1Z(8^92Iuowb~S z+iEY|#&aXad?-_^V$XTpNB(o8nH5*{rau^1*x2OtTVZdsJ+g_*;gnD10 z!O23!SJ%^>-)@P^v4SBsTWWFBVk3iKBa!Qg?bGF7Ro{Y=`xE53n%LW250eAjb4UlC z%FQgIxz>vE@E9?^dG1*U41DhSuRQSIN+HS>{-~uJ{Pl}~dVs}&l2EIe?5lq>saU96 z5>qZi9Me$1!1UOhTOxh@zi`LW(WZTsT#TB{(em$>GgZ#$1>A5%wY(7G z`xCh=HR~&LW?Z+El6Ot+q-8=WC9~x3=D=fL_wfAy(!&`b0p6?K|E?x4OC3gJHMhZK zaxVSl=gMt9PESz!h&(}*jKiQdSw?cW>#rU6Vbv&pbd__)BmP9lfOzs(gn{cpcv?Gs zIKQc}F*kBrdQq$k%tQ~wY9q8>KYDWXr|>uvF;x1JLC>W4;PRv5jjTASXb*UvNxliS z*o6_iwc&gnN$$95^@`tR-=HT#+hjTvAGS6yl7}ene1t(GYxF1oqTcfelHTC-Ax9zE zi;~mw8p)R1GN$Yl`avJV6>@hg_4rRwtt({XL>Ai!Bz2{N;tYsr^ocla03nUXgwKVI zewVJav=nFfuZ+i5^XoA3w}EeoOcx1qGLK@^Wr=+97aMq6|YLjWe4L6ci+K$E0{{r zBq}N2H#$Y3)x$;4;^p$_!^MWpBA=%s4=sXMhdsKN=Qq#-m1?Xdst^Fq^~4uVyOR*f z?BNYbx4;2O_B(JsUs-l(fI*o2U=b}?n)b&MaA>>NxZj!ug+CT%u(aHL?nc{;tLMw5qu{vKbKLk(PJ8m7=}ta-j8me_|k3@!JfvK`YWL zVqQ(5#$2jxm;8@z)KR}#n*>7b?!+-^WRS<2)D53|HYI&3%gK3)K?Ye{+bD`-(tcht zbt!OKq#oa@Qu09_hp*Q5X2HwDb+7Y6h{snyn)N9^orr>)bBtQ_u&f(7&CXlcu=O_Gt5 z2>3{SsxFMDQ?0?7cR4VAd|O3|z}>H{YhW6TII?Bt{_P1qzr$ji~*;bv-n2KkAVmCg|58Eup z)VMuPYN(V{TmhTu9@X}wKY@0}zj?5Q-Wg`3ES3dL1c!3L!ony_A8eVwUuLkjP= zK##HcMGhnrvt#P6@Xt3+%OgV~L}DmO_74U-($h(Sq49Q-+W5emstj{PP>refs~euD5`y za*Mi#0qKx#38fn(1!)24ZV9EkyHjcDZt3psZt0W`>F)YB_uls(?>ENxF=QOi;hg8} zXUE!WuQk`4S}CcY#lePV&9wu9&`7JcrS;Y^CCYvp&D3gkYDaa|m zWsWC2eA*1KpY+~AcQq+MgU4c?M}YB$!i9xU%aQ&O2Bj;#z~Z+XQic2sl{j$n;pBi_ zFG2nXY2q2PCub|~#Ly5O<0#`pqc!o6YVo>pyG^N1lTUHKb3C<7Rj%~k$)>!-$+pHC zvntaborjBarY}vNesgnb?jG(Yi@6jnZd>NK4_VERj{NV)(#t*iWRP87g%PmpuisSU z5}ilndH73|Yy(aKEL+E1>ExNIeEF~rVNr;aol5LxenYISp?E!@by49tX{bN{lS=a* zl}cJ4pn<-$xkPso6WbfFjN03;uNX|7d<8|rHrGeI(Uax&pq57JD=I1gKZ2-rcbUq_ z8{2{Q#U|)|m9kY(*lfLfhW+gupkO;)OMS8$Z+02&!`}8citH@bYDszVYmsmnF$i&G zb0n}|Ku<)Ab*w#Rv$gD&mZkI>7lfR?zQF&Kz@`Hgj?9ze!Smy#(q=eJ+CnF(FZfLK z!-d+8UP`ay&^F1hsGNMy&KSwHN9&@$8A`vj_|(*3iy*rFE;17x`S}HA*Eu&JH5ut0oo8cXn`QUC_NLC#GMLqQ zRTF4IhL0>dW!F?o*%9CJ0smZY%%>}c+A*%SWLdwzy>7n2iN9#pLR6vXgjT_STzG{& zY&!g@M7}@?Wb5OijmEy}=?T-UU?98fFhslf8lj`+da*@L6LM66NEd)`QOgg9oazP2 zx0;(MrY^fQ;T|28rf?DU10Wssk#dYpHf$ApbX@W3B`Jrv;ZhQE*f%OocJ^la#Kpc? zKF{s*%SvLL1Crx`Jk9j8^A4^k1%pDI8vR3NRD#&|NwFqM7{{DW`fF=kzMbi4%mvhX z$vha*BsS@_!_Um^}Xc2#t5~av;|^`eE4P)?k*!&R{mu>1p|3&m1UQ`zcdF zFrYK;f&I514t83b<9!~rnveH8qkU#d)+iI(c!zIMf5=&Q((*81%T3NXILsG(Lqb@+ z9-ruPBxAv8YN*~BkpG)Zv);Z3PtQJmyokOb`B2vNdolq4RvkT5z6I0$cUf=wDP~Lq0Xm4NN z&YY+b)kY^jBjd6Z(mP7Z*Wyvb)hKChuReDDWFeVoyd|_hG;VbRer*^iPGC8nL+h&pJ@e>5 z5A_X!<@P)$8$A?m{KGt74{#Q1hZUCZg|y!^3u!4NoyKFv@Gjp3lW|6voS}tnMxkQ}b>p0BGgw%p!gm+rIe!c1tv@+CW8Fljd8#TkGce%VCGVvvk5_+1jZoFfwD@`VZ@R z=hA^?xn40juM_tp!&6HbR8&-^O0>wvHX5V~qUNvUxyNczLpS#3TjR)sW*CwZiFj__ zZ5dn>a#$>R1C0TF%*8h#=$-J9&n*m}*-|BxAnB5Na+GLcqCvPfB|lupVee+^K=xCK z!uUWNd#WvdHiQUb6RY8zTSG69yME?iII)`Sq<25~A=r?M`A}7iA9cN zPWD=mj*2zevqW^gXq3@?`on(i*JKHN0);-RWS*{%S3bAbjO^yS3LBT>u>Ilj$9!D4 zzoeeentr+xJ57fB*gD!~{VGERHRl#%`2l>A4V4Cn*_6?LD6Oo5wG3JV%Tq z{0++L%%QHvty8YW_fj=AwckxfPI2@}Vu@_nO(!Stz^gUpzOQzjF8mzT4buE}^iEy5 z3`-4WN&`AK`v8f8Q4XS$wmB)OOD*se9+KAFC%)=)Pr`jSM}08))28ko!I)=Up>PpG zQnQMRiU`ft_&Vw9V}{oYYrw$-UNP}^!?{=Z`ib~eI~BHvh7c0BF{|r2X5GDpb8=!J za;9?9LVugHp|ylmBheEy>0`LoCkv-AJjA+UT98PiJdxZ5Nmb>bX1B3z0g4=L>=^Xv z#j@89ZY2$ERIKI(aanf~0(dpYK#*f?LyKPQ>!MB($4fs;$B6p4HWtBk}YZmTZZ~BaGa0*)JrR!G?HunobCaFi}79f;1bBM4LTtL2-_I zNnET=b{THdFcFR|_@|Z6GBY+nwrMnlov-Qa8zU)N^AU7f8wd|aCE}+GfT!VDf9YwN zYP%&C{AJ62z03Q6pd=@kDMY5eIiOF2FFSEuGe&v(_2?3C~y7%`F zTvMOx`o-sV=!$mbSUZ)k5dyUPVcj*hk99MZEzH@h2G`)Z}p1@i?@X|(e6 zIsVE*4l9wJu+GSgHd5v-uF#9{ZfP zSADm|i`A(T*n<^r&tEYxFin=po!mB!D(uFR1=>sO^LE5Anwi1T-rVE*I$kr-kJrYK zxR^qp0;XgkwsU4gN(`=$90eTYKeK%lWl-Oyk>N%_*w*0jeU=S;O(?$Vl6@s7u0ygn zU38y<1=X=H*}uU&s?!W)y1vuMFCmc}&w2@qkfJ@*(SUta!{7ox772qE$my@UX)DN;^tuVH^V14b(J57X*C|E&GzOmizVDj$IMJ1 zU`Swb?PiWUYV|9p7LGx^hu07Tfy&vq605+C?V1HKr`<>AoHSiGZT3{TTqI3M?q zq}OSQywr5Wz{QPl3i{^Go;5&?a@^7&e#kuVfO;{lCu@l=y)Ua)-=9E<^4Kf%%I*zbO8(3J5&Wv z5=tZ*CT04grOBbaV>w({Ot#!+pK<96(&^D+%P`KChNL8AIR0SeG@&yVhD2oNkrW=U ze_`yPk*4RTO-zvWF&fj_JZK0}FO9(GK;xVhdWo^Kbir1Fd_(QwLPVLYO|AzstW(_s z(d4E^E@thKvN(L(PqUB5qLb&bWviE6oeCimsXvmXQP#itGT%Y{l$V_0GAYXFhZPXa zsoV@YLJnSrP*`4vs9&5BEy*u0pNcAc-r=onLk5f~+z`sN%EP?tZK0K0gBdjw3XS$O zV04&dg}geR`2p3n)AF9ku0m^BBQ1=Ck`xh$~?sRWj;s5zD4fqSx4kJA$N&-(eU;_h_z;yETfbc6M(HH2h* z+1B;#O50xHrUZzjjWU?|GIF@FhS?s*$(@>Z^^z-^D1Lg1Z0!wahJv^;1KUSLgtszw33)-P zlQM88owKd{!BMqy@$?OZZ7Fk8n)wv_AA4x@714P-$bwy6vm~e6P6o)Ke1H{)hkgcx z&Muz|z7dm(oAe0?-+3uT=~qt0GkY@1kw5_mfvB>(Qp1y?q^UwIR@A0`)Yhk{O>zZ& zB2)2HGj4>93V*LYLrt(9XcWI`IVE%^J`POJa9}pI%&!T@DxFGi>^j2AXBIfw`w1|L zf`IXih)_!yabBr2J4~yT2&+-zvY$OQApjcGXoR-LX5y zS7Smqr*f1%Og}TD0;5k70ou2j%pJa&IA00FG&Xn&i>RW)hB`R3q=bPs7SXT6n|>&9 zy%2b)Ah80NnxlSoc61OUNix8yCm{TL^Jtr&JU!r>W2S2YVguDY=v{?JdAMkmD8G7> zi{oMO+5{%N*)U@xcvEc4Mfi`9$& z5*8-yGb{hkSHOa&Ag3=hO!wcM*`xlg2>YH)9HX|j79>KSO58ynzXBZmcQBCt8N+|y zC+FR4@8=F8QUDkOh-Aj5ar#3=vjvN$laxeY{#zpdJYc1@BiK4ziZHU7WXaM zzwP;V%CGYOK!g9UxBp!NB%k1+|JRjCZ&2SsL;df|KEU0I$>RTWk;r5x;?_Od|M~pW z2E5P9*EK^8-Z;1Xuh$JWohHQ-kpJg>FPA?d`E<$5cBVWl_}|+5=b?i?6meDKMEG4= zZ-%6}$Mhxo|M3}AQkvi_a`N3D!9iqvEb8zu&`=$1h$8aCt&&kO=QS56q5T=uz0SMo zlN&S{pMz;Cq43&hd*LN=A11SCs6Pk$=BUuQ|Npm(m;#y~l{K098Kc)LfwCcrJaM}K z10y(QTj}?VMo3A`RjjfGXqGn4Ejm0n%88I>{9<~-{(K)(doqRw)gK&1Y>Wj@jYy4H zN9>PnO!z;e=xa|KL;E_)pWFZk{&W5W3=|#=)aTBEjQ>{9KM&91{sRB|x$iTn5b6KX z6JBT|ML@3_y!PY<0A>KM_IV};bV^6YEMY3+yUZEm-x?aYrO!_-2+3SXuSeN3m1`Byxo0Nx^AYTw!fhHzFVh z$B&+DXr9Lp>%R`)ME!T7bV&9HWs7F3pxp5soY#FY z*0YGv$bQSgh*QIjJ}VUPn^0fjGSI8_?SHf8`Tc+Q%7h|`gc5AND*u^U=Cw@sF0u9b zt|>XqaT$racxJhS;z4yQ*kC}R6%lwJO~GBfn%#fX){Fn&-tmq11@-gq2mw?qIHZcK zQc#GA3go&mWA!8ZzK|nJyf4+@*H;h7_TzmUA$&@&fQJ;OVT}0SEj^(8*AAGMu8}W# zaX5hQ3?f9prjv^YXjG2I3qX*Qi-%=V<6XESHgI9$5X;ER%C0H?H&m-uq~S!MhkSi0 zzh4%Ri_h!a;AW}dd%$vWb7jo#%VTXIys*K@MMa*U8p%b;W8wd!htr}Rk?r>igf22v zwvO2y?;V+y3-1kXixyW=fd0o2U>(;{h|mY@j_`#_*Lnx_2uv--!yjGlx~G;~+^(Oq z4cZvppSC!a3Xa6$k6NU@5;fW9NjUJgrm?WTO@U zgs1d~FQ75VJUTS?aEvcr0= zl==2$+WJCKv5R~Mj3c_tg%^@h{=T6a1_7zL&V26hpZg^cO=D}ssUZ^1WTeM^_Kw>I zUoxXM8_Su zg%2y%^s8JzDEdazg{@d;jE(s4-~cI01F8e*ZSVRsiDMkbF$bHM;ZQKEA_t$r+M3#e zQgx;SaGQ4W;;~y7h{K9XxL2Tjrv3h$V?R55`p0H=NUlHs5hCUI5H7CSwr;8>*E)eO z4Xq!|4AHOuJ|lxV(EV{BxFdmYM4yyMSmT51V@N8;>h`t-0B*!EY6atrtZdeaoZHwq zLV_bo$ng4dgI&B0Ce?Uv8`&ix=hh+QaV6j!tGa^#5F*2|+aOEcd$~MMJ#L5HQQ5X( zDw+=;W*enaIsW{Db=w)$g2>9sO1K)xyK(v}1(+xPUSA0S8C;_{%ZKg(Fhl#ionX=3 zcNkmlTSmkb`9^C)K@l6b)4uqQCGQ8Db#jO{26aeJ)Tsdpd*l%{>hmB<11J_V z92T#t%~lez=~T}zLla+Gu$oCWysixrqv+Ya6Tj5y2skY|0I^!n8NWBlkH3ZaWz zd()KFl>dyyBCEk36{xt`N;15dv#~##i53ZoPY3`Ut3H4)aQtFqhRt%p_idM`?b>?I zm-B^smk{w0=f2TD>ZLjjwqZ_iC^_0)l=i#JAzDq>hCz0l{qnG=_<}%)#CRqPMK~~Q z6C^tfZI+4HV!-OdKYT$rz0m81D>PL0?`Nov)%!$3yn!gLf_}hkce$WpY+5k4A8`oYbATl5V`hRz( z3YFF9Q9ww@mu57v$|rZ_`sz?u@dhyZORoVdD!m4n-?GLuU*)gV(}X40>bCRvRUS?f z@i?;RuIj$vY=&l@Ud+=c(bj}P8ySzt+50^gheS@Vw=WH;H$k%e(>SvFmdyBfJ^rg@ z{FuW8FYSiJk$Qk8?-@JC`JG|I3Mz=Mmuj_}?FXFOI4uX;uLG??PQ7Y>_qOy)O)nM> z39c35O0@yu&vUimaA4pF(}S#<#p}{M=4w55?Di8s90L5(L}d+s%^us!gpj&-=6?G& z;ymR$sgV+f`@%9lDjW*y^{2MIz2>-0HYRL>7mwxyL$1>O=J_31Sgk{1b^uJ}sKG;9 zO`W<^9fW`Y_DjP=Yon8QsZo!b!!t8+-Tu&kw_S%+)(((ooLpOVk$5p~0RAXYd#!6? zGEgvSgvVk^&z(A|hAF-dMlGeyI#SVSG+`JCoMHh?;C=D|>`KE>r$`8xiQGVsnP<^f zwfM`(cF7`ef`^lCQTX_lKZdzVT&H;i{rJ&6F@Zs6X2z)9*zJ!>$?ACV8MbwDg+78@ z>pkEfSqEbjj7t}HTuVhx&TQEUm%BoLvh*P^3@;3v;n&vCA-a3JPxjX->Kk#gWXsYZ zq*6Wx=s`l}dWy@0lb<=0`|ONXxc0nnZjppIncvZ=usO$3bkP}02ue;ii*p5umEg$i zWhx2^7*Ik8%o|YCPQav+Jv(0_IW1CV?c*3uxNS)!asc#aZx?Gp36;rAnRa9uOQqAX zC#B5OZ>gJ?n4U~3-w1%s=Bw05#IdOY8iS!SLkMQ`e$s_%OA|-lkME)cy8AYuiAU&1Ag>>o+;MqtMG<+y4&FVf@IJlYfIMD+Q07+o8I_0 zDjNTz`7d#xm{;51|5>qZ``UB4(uGj7)?!SNCpro!x|qj^>%Kr)z%ZWo>`b0oEmG=u z#Hk+r@e*izdL_NV%E1vAk?R?n`sch)6qH^ZqY#_Arb$G70+(dM3@@fnG(ryQ}^5Gmy#MRMQ&Gy zh(^czH->I(JH)>-RIv)NYxE1-SZgGVbv(c8i$ao5J`bb)vnqd**lG-ISN1-7MI-3r zz{c1y4J3#jZ_;^yliW1x=u;>kG!}5yBqp+==+95{M52@^8GMETiZ055#;a*$*Eyb< zQ5V3!$FtFpVH3;RZgBvy1q&0?OsR%Ud*rKT)>R9Lmn}V#Obl|a+=dlsED4vWytp8? z?^l^8bMc(4BZzr=!E&yQaXNZ{0eYN}-Vv0tzbsQ1AYSB=e*E|h;&kPJ+##HJX{hNH z9K;ErsaAJv0oCB)5;@;Ce|!Z}F5D}JK!u4hm8Zht92U0u5l0sbBcB|5==mXa#Z${0 zl*vQ%LCHY$_GmUTuXc%JesJ1`HdAHjo0tHxkJQ}-PyzO$UoMqy?R}x ze+qSQa0rhYv08>*bhd;Gz_sqzUFM*3KpNl9_}Q}7TthY|7@HYS-!tXhE{<({d^|Ln z869+6@(cq$hrM+i*9(H(YBmm(b)DQAO+tdVz9#tq)eev{!#X!~5Fo?9+1{FZu-M`U zt_kV_?L3pjHlrU734nDf_t;^C5PeNDSg;{B-(bNC((2??`)XB|uYR_jI?|JR!(Q&U zS?g@|F6nHe6Jq)`H4(lf?oWJp?RTfpy>+&k*buhfS8ZPN&s8~*%&4dgr#|u?0R`c8 z=X$qt%S%L0oyz`SFflHiE&!PdkA&JcC^3XuWoWy1gKp^lNC84iA6>CJLFLqtDi6d= zsTQh0b|4Cu=LMSB>jRd=w~saM6cE5-(n}28ZxoQ&SU)q?TFn0lsX_u;cDl_aa&4b?3vi+J(qw7_7ipHV=70>6`@JWu%M73ts)5B z=MG{2?_vNHtJbp41SLyibVsEKnY}GweUcilu=D(m0udjE?mC^I22NNHh3$^R-|pH0 z*>bZzrdFn#e8~RpL#VKz_!NDq!Pwk8h)}Dw?xf5BNarZsjQQ)gUz~pml`UMk&x{A` z_0$Z{;g4jIRk#!mJ?6PKK46I2R}Lp|65%ZlM~aQRHZsAJ(VnLBaD13`$$!W85)@dq zI)AQR9SbuGfE>CX?dhQP;)rdGiJ@PB41Sj#MK3WgZT<(E2T4a? z#AsVgAkM&^28IgF+Ho+!KTu2*9F`hVIphgB9oY;9@z^PPLkT*TTkc&>PTszpUl>!rCKoDv&{B3(0o9w}#M#tWc z4r8j@<#tGVpw$FUbsxN*-j!$?%vmokEL3jJ!sOO7b^h!)USB&J1x0o@j}iQk#*4M= zv9!w288R09fFAv`RJD!M@bYqa08%JYs|LI2&!i-Y{*69L%<%kb=X=yQpX9MXxdZ+O zcWT<$V3usra;p_jU~_e8pF#1{QnLx0EdK9jhAC6*8B;nCCyocK>1ebKfLjalOsw`NZlHJsFot)OcldD;;w6+#QiDAF`+}3#L>%x&MxaPK6`$Rf3l#Kq z0D){N53M)@O3xGY2;r_58j5u3v1R?P@$pD;%)V@e0_yFpp7owKo9kr{A>pu)Lt(LS zz<%i~n!Q%Ft0;Z=u(?4~ZLtyrs#U{bQhw>(JGxj(esOQ!)&6LIL*3KdY%JHt=tZwC z1$K&B6Q1N-e5<~2n3I8O8m1}Lv5i&1jdv|#b*11mf#@Ac7KlNlr}T`1f|U2^6d5G} zGf&3u;x7wCuGjl8MC?JMs<`?LggVlK9?5`4!3J!h-IP^j?29n z$kk{QMK0F^E}AhH)M+3X7=Yy2+q;ltyEO>XCSv{BXp@(DXE6?vs9Yx{1GAmpM zUDZx5uc)gD=B`_*u4a0jI=o@8*zTWLCOO4ybepNLLkJ#Wu^bc>OUqByHVMBL#5h*1 zp35*!U2z<3qnhiEg_SGGtW2}~Iz8XF-Bfvh%AJ_*)}+wem0;zkpz`3z7FVu@ZlN5wa7;~IR^1gpz^Jq4XnI#gnP=P{nrSUWK5+yt`8`G1W~XNn;=+sbUo&cw;Z#l<0F z=+!5%jsP>t0TArw7|9eClS(JyADw^AQ#IqQ?An&co1K-NEls;RU8u}12{1Uy92UzG z#>Q&33kVJ44T^BvPizqY9?*L0h_m9(gG54NrBP@8^}R=ZI>)x2o?d}Qwqx8!;*ZV= zxgxePEXNU?O;@K=sJk;YA-it>9IR-|9K`!NJAKc#h7wyc%8pH^#O5*fu*h5gT;2`?g-XKrzJ}jU9aOJ5yTvmj@Y`lNvoO7T z*Qs9XPOF6sS{Hm z2Jth4kw52C6uBcnhvrXA(B*!d9D$F4!m3`9*1Q2;UNFujC?P$3vr#xvd^F+Ijp8b- zvC#b73%rU?pFY_FcLE$!mezH=A($ZKZ!GP4N3&CYfem-^F!xJ2t81fLtj*G*P1r=`#alzfxn~lwG zSvybND3SUhGG$>`pY0$;hZHkJ#>M@OH1F?9UrY<@gLac-3>U!?Qbv-bD) zSW}bwZB)#qho(0u#Yy7h=SeHBRv8l=6KX%jL?-zFs~$(AJseVZcbSVT8{uHra`aoD!$G8WNPXRQ?3I z$O;OI1=LJCzUVuI+f5lTgTl%-L>=w7#ucX1a7r=pdUcB~_an2$PC`7s9U17Df>fML zxO~qkBmCwS-99&l84R>QS_A&-KCgHLbWd;J{- ziX?A$RCUIiRGG+*gzfByk?QpJBlsscs|jvEc;MuY&}!Ybf!-3*u_-l?HJ9aiE1>!M z$rgwH%W!{DS@7=AGM1JwBwk2x@CW<9XuG=_+#@ag$9m6@(z?4i?;~dJ%d5LhZMBpQ z93%@0i?gx{gIhgh4ZCl<3X0B82lAdJ*Ftx0Px}s=+uUKIDVK?_k1Zbi(wXPj)XhzF z*hN$fDJ|;=(}9kh#bT(q$!>>28oWBglH<@wuM>*(^2ygF*K_GFp3|QTX^Rpb*S`P? zgwguZyy&zv83_qQY;0^G-ZHQre156A0`Z%VCj?ad4Bgxbm>~QSo)L&ekKEng->0GB z{d|8#T^oN!%tTYPIxwBm8i+vK?*ohPI5-0+8ylK<>yMcc}8pK6{u zIZ(iz*x{r)>z-;;8_k5vGB0IXZz*e?uV*Q(O{fFX^Z_7<4|x(cuwDSv4sqxbb4Q!( zc4S_E7a(oFUh%T3XR}zkD0vI}w?oRoC*v2Jd%BWJUm6yNn6|c>|Kr`hRj6(vJ3G7i zN<~Y>ALM1{a|YiI(%V~J^+tX4;H>i9#pBmS3_=?>uV}5z^Vc`S8Jigo1Ent)@V!*t zU#F}%e?im{zDU@dpEmf|Qkb$kYiKR#Gt0FcFzFRuxQOLZoMb-96a?yiUicp=rG^u*yojASD~)cCZ-Z76X*K)+q<@JxtIl$4O1Z?MB%bUN7V zs@cEkswYHApGoeEc$MLHZ5bGGFgYfrcORZQH8U;GrI-8lh)=&?7XlV#3aZ58>K3>7 zOAP^-0+q8Xo71y#74AI}3S=sbZdV5~8X5$g%NeO=fPMI3%$6ZwdOlOHrO_ULYildN za8UEOc_`5cWqY{ld%4R{Tvy|Ec97*V5i;z2g&wYFTRQH}a87P+uY#2vHSeFJtasbP zsePHk-<`jXE>UAH0+!>Sgc?DfyE!hJ%czf`1`TxvZ8@F(jKmB=P|Ld6;1g+jPkH6LGxd~ zp{^*dc5cyjy&T->CA4a{tW(rf?K@{R>hs`Btr{ipAKL0XzCQ2Yq=|`-2f9mw#Gd-^ z3k#!GPw@SG!s?7W`-=1K4ND9v|g%!`78Qhe8EO2*Um^R0%9 ziptFc5-!8n#aa^e7FQIzcfPOVVq*y(TXomIx$}`wVP9nGth9=zezJlxO7rA)Yzlh? zC*gd4=mfz)(2`bol6FswoXQ!EzJpr#z#76LeJL_qdiqE8w6lSXk~OQ}0z(dlHC zDh-M)@^3e$8_D?N8z!&BJ81qxMo`n0D&-+$M>jAsk||5=>Fb$Ztaj=|hSa&)N6cSr z2L^cpoEQ&Xw?}qDm^kGW6goF)BFs9~S*HT&b7%+YxU4h$2L~m{e&k!*!xx_Xbu&0% zK(!uo+3mf8)LLr3RA!&P{7RR82OC>tbhvSMt1^pn&U`p|$j4o0XKm2-Nf z+*7io?lXRNuA5mLC_92*b)H0_T7AjT#H|7Wt0edKin z3{9FoP6UnqMb;v+{L2mmA*O{4Vcqm)fn1=bBF_*z_Gx4;7Mc`(P6JI2O`xZ=eonP1 z&ZEW*f>5yLby+qmuVP;&^}_Az@3`9TLEx9h^u9dPFD%x*>CdQ#S7WlKRdWBR7Qf9r zwyIv$Jo5Qkrtm0>?@uk(Gumb6eT!O)`^cuF+vqJPw)KZK91=}dTpjzjOe3j%;6w6i zTz==HM7(j2spgs{w}@#?$>mb(*LXJc&o%| z*6bBKAGV;e(yL;?RxQ- zxXO~Adwa{KtDgU4DKyc&+>pUc2=eN}ZpCqbL^S1lrZDXJ^8FCNuq-)5#0|V-VTlB+ z*@MzH%%cR(uni3#uyBvpP&X{QH^!y9E?15d39|}HSyHeJZYW9Fi z_jsb9$f5Z1!8T@#IEl>$Y820K2V%ntug~qcto`_!^|1k8zG!!ZWRqs?i^-(U-~A;WQ;V{ zkj*&=J=-`SxvXxO-@kv~-`}r$k#DA6Zq0(q`!Ufdk5^1Y^~`DYLm4U#hu(&=sD}3VF6jH`lpBuNQ>s5F)?$+)YjYhd8gxd z5w|WYe1Q?aq5P6ITHny3z+tD<=_2rBW1qMGsnb+Imr1)rR#(RlZF{=ztzGAJOJwz8 zc_p(u1ha#}dti32nq8=E@IwrHQH&HCmo`#r%-i z5{m!Rf3o!CXqMf*cCs(ZW`T9ua#8`ObpDIvH9eKl<%zYa5vS~f+)(-oUv~yEu_MU^ ztD6>#52^Xn{8;X~E23>Ou@~~e+BVoLzp&VmsHR8#02|}pckb6pF!jw=-=goIqW)dB zMQAu!)!3DKbj*LVyyq1%*t;PhWc!=d?C`DqCy=>?%fb`Bg|sp>N+qLpbB8bIrDf~# zr6(o|J}cgxu0CF$4J*oO49bu&d#mcG_k(phFtF=Ao7`?C%N@l3hjl<{nO z_iq6~K@v*-JLc;@fR{|?czhLD%JaF+@4HI{Wa8pM$Xqr2A=+tTg6S&|Co_x33d+g= z^3crX2CMqT6CX@1d*k#GB23GzETrs7ve$&%2@Q6JHh12TTyI`~L_h#r$Itlf;o1b- zzV$KGrM_3!e27!j5Nowz8wo9X+)k4EQFTAq zO6#XDV8gSrZ#&)STeIf|u#%6gCFLSJ(DiE(vx+wdlP~QvF){g-mt%Kl@+yJ%{6lXz zzQGjF^-|`N-E6A;@KD$Oc{?d?NhWYKHJ~n>>1UHzY{sEWo=i8JLyiAf3?qn{eqO`zg3mQ3IBX{ zX4~*kDG8}|daZ1}aA6TL>LzrD&|G1UKvvco3|L*)9GiCMtKjrRBfiZSg^0_{q=j)L zhZ&{0`c}cBkkfn!0_)b3-;pi)sGfH(L6IcA6Z9kne;XB$sN_0uY;-*AWZ(?roREQ9 z;x&2qe7Wj@g>})fa~>I+II#8XNE3qlFrpS|iBNI(Zu!2D!s65!!};rsUCP2U7KlFo zy>E)9Nx7%2a&g;`S=`r~DxTzCnb~K*N6Tx?e2IFzZ)^uDm`=!fE`}$bFdT7=E0<-?FFFL)OMa7y% zh3+Yx=$~}2<*`w%rfg9Z;Qo%JwW!!R=y>q*I34C*=q?T_$jK9Kj^^YnHB`p{J&?p} z6ABYz!hmen1jjJ;6Z0~2DiW!|OtU~gd=izjo8b2c9BPh=9Kx<L%B{ldq@ z>r8Zm`K3seCwJz7&y=-DOXF1U4eT!Pq~yJ%hjfA71NtYv^*}q64RXPEAXvFG3Jcy*-x~#c(l)G!i zuOMqi&zR`3x>d#cj;wpRBCMM$zV%3yHX5?2hQH~VFep=0RvsBtE>gc9cZHDQFEQBN z{YP=hm^k)CD8cjrSCph_kXix_Iy2xCehpo4g}h)8`WHDx*zou{XyA`5-*>HOT)&}F zdA62=FTAWj#IKgcGyV0tn&$P@bZHJ{Id0Q>a8bC7g9A1VjhN~%S~%ZqXme5o|830k zW7}B5M+4gEJSmv!`?bBI($HP*e>JNjzlO?USD~2a4_~N50|7999CQh zr%WglPv|B5fW8@^LmbgJf>dRAvK5E|7B7*()|S4J!T58GCj(+56?|uf(cJI1o15F~ zrsVC}r0{)k(&3zBgP&e{`^R^@b2k_YM@UM{66&w3bDHoE7|_0g?-y#aog*TKyPw_^ z-!jHn%2A*dizwF&wDRrF4#w2)=~G^b73_Z!mEm!J3e4FOqd_JFV;Yz~XK9qo*1$^vnQpx0~rhu;%>Abo+8?-O9pT#d>n16KE)=>GN+zbGRo$odor;iLPry4wEXl8=^_R(_fkSkyUl4k#ap4qYzhj1j{wOD7Cu*n-kh zQF(K~x3Xd1aC%GFE&Z=fStdd!KJ0Lp$~n(;KO5;k_ws`*wXhGFd~U`Uj!lR;-!$4Y z*)OKro`8TLzo0aCR5d~ib)O9a0`5}$lb}*rYnp(t-)Y~#k@c!$!%4jsIc46uwXs(r zdGddEBKboB2Et;vFmDh?Pyo%JBcQO*reuO}=2a$^Z*Z2H9+YkvLPo+FDlK)sgqn-8 z@-%^(T>F|e;Bl51tg!q~(ya^)TTV#G`$L!aHw9QMk;s>+^3Z5Pgz2$A)jF=|{|SQP zf>h-q=4A%G{QGU1-#a&{R%NOW|IYyV22tkivmjziGW)2xX>Pz%Q`c*ak~zx$qsRy5 z3oD|3Z{k`$WZ!Rjl*< z&2f7B=*-KwZ>uy_KD9Ua?_5~=Bz>99JY4nLy{1}q01yfn0ka%wiqP5IjUj#J~NHE$gd)8ptI82O>dYp*F!J#X$uQQ zu>QNt@E?VH8sLS2&$1kW*=uSufIzHkLs_JTd>98v4LW@DoDvC!kmiLQDS)dPyA#8< zlb<7zlx6VVs(PI_G*grGOx_3w=H1Z6t$RIv3gd5DG~-1F!FKv?QAB8*uL8

b^ZPwOrgnoEtzIw+yQazC7JImrBl$}#&1dQ*x9yU2Fky- zJrV&5uj-mQ1hNDpa$P{O7~0-Pfb)YKikbfpeU`X$<70=noeJje?Qv5eoK+` z*k*vX|Il9y*V^3LCf+x!HVF<6+2Xl@p{8Zb6`LLHYsBwl$vHjMx2K6KR{BclVb@z| z9fU!!VRDI`?R$$HJRhda9-9p9?OgKef;$}N3QYFSve!Lq2oYta4kRAQriHI6IvW!7 z5UBH$5^)N%@X8VvzJ7qq2qa!!pzPTs%%y8FgnWCk({%@!xo_?S*}x)C5)Q(76M+L) z^eQx=7Pn4#E!Jj!Ge-ykSgmYn5usd~nw|5S#~6VA|d&#(Ex1lz1`sijp#UgmJUqFVAC1X;B=WU zGq&P2n%=BD8Jz|Ugz^ffK0~4G>WGj4SAxSP*KhleJ^C=Z^| z{IlHO;*2L1&MUWnOSVFXyO0f2NBBDfNnLnqT8{>>q+jK|@w}kh$sdwLprZNlg zOc)_;Q1t1Kuuc!a>`oT7&=MCA>DW-Zv2OyR0h&X%9?dNK`4%pXGleUV2SxDK`|EyO z2PfUJ_{h7dx->d%HF4jU8T2}%{F+^bIZ|bQ=i(KbK4~XerUzprVG;GMti9ZivI%9A zeZIm?35$wimu9HW&u>q^G^n2Cca%HJ8V$S$umn(~PssG3KQS7LXu=tjylmQ;Bfl}| ze6}O>YCvV0$yS&q!lz!W^8DI`VkM%3`VG?iyLV`L>bLC21cHP0`qRc@C~c|=j0I_7IoBuZix$W zZFwn&*uVScrCz{xe#@jrtW|LpofGoYD;w&4#QkMAM3;kATCnO2XS7rP-~l^1tpsxngJ`8aZAwKcy&DDt5M>x|Us`1gZP&SS;lbsdgGdxzG7Z z^{*NcXO>gDVy<6ToZV{}hK;N`5oHI2kma!7tKZ;KbiRX_{Iy~_fR6!TzHO6Yx?sr~ zc3BL^(W@)!-SJZwjRrPh|5I8M`aT49z%v|bz$cgjzVQPH@FOlF^Qlxw*Z2PeR-M{K diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md deleted file mode 100644 index d4e1e51bdcb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: 'Index page for Development and Contributing' -slug: '/development/' -title: 'Development and Contributing' ---- - - - -このドキュメントのこのセクションでは、以下のページが見つかります: - -| ページ | 説明 | -|-----|-----| -| [Developer Prerequisites](/development/developer-instruction) | ClickHouse 開発のための前提条件とセットアップ手順 | -| [Architecture Overview](/development/architecture) | ClickHouse アーキテクチャとその列指向設計の包括的な概要 | -| [Build on macOS for macOS](/development/build-osx) | macOS システム上で ClickHouse をソースからビルドするためのガイド | -| [Integrating Rust Libraries](/development/integrating_rust_libraries) | ClickHouse に Rust ライブラリを統合するためのガイド | -| [Testing ClickHouse](/development/tests) | ClickHouse のテストとテストスイートの実行に関するガイド | -| [Third-Party Libraries](/development/contrib) | ClickHouse のサードパーティ使用とサードパーティライブラリの追加および維持方法に関するページ | -| [C++ Style Guide](/development/style) | ClickHouse C++ 開発のためのコーディングスタイルガイドライン | -| [How to Build ClickHouse on Linux for RISC-V 64](/development/build-cross-riscv) | RISC-V 64 アーキテクチャ用に ClickHouse をソースからビルドするためのガイド | -| [Build on Linux for s390x (zLinux)](/development/build-cross-s390x) | s390x アーキテクチャ用に ClickHouse をソースからビルドするためのガイド | -| [How to Build ClickHouse on Linux](/development/build) | Linux システム上で ClickHouse をソースからビルドするためのステップバイステップガイド | -| [Build on Linux for macOS](/development/build-cross-osx) | Linux から macOS システム用に ClickHouse をクロスコンパイルするためのガイド | -| [Continuous Integration (CI)](/development/continuous-integration) | ClickHouse の継続的インテグレーションシステムの概要 | -| [Build Clickhouse with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl) | Clickhouse をビルドし、DEFLATE_QPL コーデックでベンチマークを実行する方法 | -| [How to Build ClickHouse on Linux for AARCH64](/development/build-cross-arm) | AARCH64 アーキテクチャ用に ClickHouse をソースからビルドするためのガイド | -| [Build on Linux for LoongArch64](/development/build-cross-loongarch) | LoongArch64 アーキテクチャ用に ClickHouse をソースからビルドするためのガイド | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md.hash deleted file mode 100644 index aff40ebd896..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -4e194f1411af93cd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md deleted file mode 100644 index da43bf283af..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -description: 'Guide for integrating Rust libraries into ClickHouse' -sidebar_label: 'Rust Libraries' -slug: '/development/integrating_rust_libraries' -title: 'Integrating Rust Libraries' ---- - - - - -# Rustライブラリ - -Rustライブラリの統合は、BLAKE3ハッシュ関数の統合に基づいて説明されます。 - -統合の最初のステップは、ライブラリを/rustフォルダーに追加することです。これを行うには、空のRustプロジェクトを作成し、Cargo.tomlに必要なライブラリを含める必要があります。また、`crate-type = ["staticlib"]`をCargo.tomlに追加することで、新しいライブラリのコンパイルを静的に設定する必要があります。 - -次に、Corrosionライブラリを使用してCMakeにライブラリをリンクする必要があります。最初のステップは、/rustフォルダー内のCMakeLists.txtにライブラリフォルダーを追加することです。その後、ライブラリディレクトリにCMakeLists.txtファイルを追加する必要があります。そこでは、Corrosionインポート関数を呼び出す必要があります。以下の行はBLAKE3をインポートするために使用されました: - -```CMake -corrosion_import_crate(MANIFEST_PATH Cargo.toml NO_STD) - -target_include_directories(_ch_rust_blake3 INTERFACE include) -add_library(ch_rust::blake3 ALIAS _ch_rust_blake3) -``` - -このようにして、私たちはCorrosionを使用して正しいCMakeターゲットを作成し、そしてより便利な名前にリネームします。名前`_ch_rust_blake3`はCargo.tomlから来ており、ここでプロジェクト名として使用されています(`name = "_ch_rust_blake3"`)。 - -Rustのデータ型はC/C++のデータ型と互換性がないため、空のライブラリプロジェクトを使用して、C/C++から受け取ったデータの変換、ライブラリメソッドの呼び出し、出力データの逆変換のためのシムメソッドを作成します。たとえば、このメソッドはBLAKE3のために記述されました: - -```rust -#[no_mangle] -pub unsafe extern "C" fn blake3_apply_shim( - begin: *const c_char, - _size: u32, - out_char_data: *mut u8, -``` -```rust -#[no_mangle] -pub unsafe extern "C" fn blake3_apply_shim( - begin: *const c_char, - _size: u32, - out_char_data: *mut u8, -) -> *mut c_char { - if begin.is_null() { - let err_str = CString::new("input was a null pointer").unwrap(); - return err_str.into_raw(); - } - let mut hasher = blake3::Hasher::new(); - let input_bytes = CStr::from_ptr(begin); - let input_res = input_bytes.to_bytes(); - hasher.update(input_res); - let mut reader = hasher.finalize_xof(); - reader.fill(std::slice::from_raw_parts_mut(out_char_data, blake3::OUT_LEN)); - std::ptr::null_mut() -} -``` - -このメソッドは、C互換文字列、そのサイズ、および出力文字列ポインタを入力として受け取ります。次に、C互換の入力を実際のライブラリメソッドで使用される型に変換し、それを呼び出します。その後、ライブラリメソッドの出力をC互換の型に戻す必要があります。この特定のケースでは、ライブラリがfill()メソッドによってポインタへの直接書き込みをサポートしているため、変換は不要でした。ここでの主なアドバイスは、メソッドを少なく作成することです。そうすれば、各メソッド呼び出し時の変換を減らし、オーバーヘッドが大きくならないようにします。 - -`#[no_mangle]`属性と`extern "C"`は、すべてのそのようなメソッドにとって必須であることに注意してください。これがないと、正しいC/C++互換のコンパイルが行えません。さらに、統合の次のステップに必要です。 - -シムメソッド用のコードを書いた後、ライブラリのヘッダーファイルを準備する必要があります。これは手動で行うこともできますし、cbindgenライブラリを使用して自動生成することもできます。cbindgenを使用する場合は、build.rsビルドスクリプトを書き、cbindgenをビルド依存関係として含める必要があります。 - -ヘッダーファイルを自動生成できるビルドスクリプトの例: - -```rust - let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - - let package_name = env::var("CARGO_PKG_NAME").unwrap(); - let output_file = ("include/".to_owned() + &format!("{}.h", package_name)).to_string(); - - match cbindgen::generate(&crate_dir) { - Ok(header) => { - header.write_to_file(&output_file); - } - Err(err) => { - panic!("{}", err) - } - } -``` - -また、すべてのC互換属性に対して`#[no_mangle]`および`extern "C"`属性を使用する必要があります。これがないと、ライブラリが正しくコンパイルされず、cbindgenはヘッダーの自動生成を実行できません。 - -これらのステップをすべて完了した後、互換性やヘッダー生成に関する問題を見つけるために、小さなプロジェクトでライブラリをテストできます。ヘッダー生成中に問題が発生した場合は、cbindgen.tomlファイルで構成を試みることができます(テンプレートはこちらで見つけることができます:[https://github.com/eqrion/cbindgen/blob/master/template.toml](https://github.com/eqrion/cbindgen/blob/master/template.toml))。 - -BLAKE3の統合時に発生した問題に注意する価値があります: -MemorySanitizerは、Rustの一部の変数が初期化されているかどうかを見ることができないため、誤検出を引き起こす可能性があります。これは、一部の変数に対してより明示的な定義を持つメソッドを書くことで解決されましたが、このメソッドの実装は遅く、MemorySanitizerビルドを修正するためだけに使用されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md.hash deleted file mode 100644 index 8ba87948795..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/integrating_rust_libraries.md.hash +++ /dev/null @@ -1 +0,0 @@ -41e87956ccbc1a66 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md deleted file mode 100644 index feb16068fad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md +++ /dev/null @@ -1,809 +0,0 @@ ---- -description: 'ClickHouse C++ 開発のコーディングスタイルガイド' -sidebar_label: 'C++スタイルガイド' -sidebar_position: 70 -slug: '/development/style' -title: 'C++スタイルガイド' ---- - -# C++ スタイルガイド - -## 一般的な推奨事項 {#general-recommendations} - -以下は推奨事項であり、要件ではありません。 -コードを編集する場合は、既存のコードのフォーマットに従うことが理にかなっています。 -コードスタイルは一貫性のために必要です。一貫性があると、コードを読みやすくし、検索もしやすくなります。 -多くのルールには論理的な理由がない場合があります; それらは確立された慣行に従っています。 - -## フォーマット {#formatting} - -**1.** フォーマットのほとんどは `clang-format` によって自動的に行われます。 - -**2.** インデントは4スペースです。タブが4スペースを追加するように開発環境を設定してください。 - -**3.** 開始および終了の波括弧は別の行に置かなければなりません。 - -```cpp -inline void readBoolText(bool & x, ReadBuffer & buf) -{ - char tmp = '0'; - readChar(tmp, buf); - x = tmp != '0'; -} -``` - -**4.** 関数本体が単一の `statement` の場合、それを1行に配置することができます。波括弧の周りにスペースを置きます(行の終わりのスペースを除く)。 - -```cpp -inline size_t mask() const { return buf_size() - 1; } -inline size_t place(HashValue x) const { return x & mask(); } -``` - -**5.** 関数の場合。括弧の周りにスペースを入れないでください。 - -```cpp -void reinsert(const Value & x) -``` - -```cpp -memcpy(&buf[place_value], &x, sizeof(x)); -``` - -**6.** `if`、`for`、`while` およびその他の式では、開括弧の前にスペースを挿入します(関数呼び出しとは対照的に)。 - -```cpp -for (size_t i = 0; i < rows; i += storage.index_granularity) -``` - -**7.** バイナリ演算子(`+`、`-`、`*`、`/`、`%` など)および三項演算子 `?:` の周りにスペースを追加します。 - -```cpp -UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); -UInt8 month = (s[5] - '0') * 10 + (s[6] - '0'); -UInt8 day = (s[8] - '0') * 10 + (s[9] - '0'); -``` - -**8.** 行終止が入力される場合は、演算子を新しい行に入れ、その前にインデントを増やします。 - -```cpp -if (elapsed_ns) - message << " (" - << rows_read_on_server * 1000000000 / elapsed_ns << " rows/s., " - << bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) "; -``` - -**9.** 必要に応じて、行内の整列のためにスペースを使用できます。 - -```cpp -dst.ClickLogID = click.LogID; -dst.ClickEventID = click.EventID; -dst.ClickGoodEvent = click.GoodEvent; -``` - -**10.** 演算子 `.`、`->` の周りにスペースを使わないでください。 - -必要な場合、演算子は次の行にラップされることがあります。この場合、その前のオフセットは増加します。 - -**11.** 単項演算子(`--`、`++`、`*`、`&` など)と引数との間にスペースを使用しないでください。 - -**12.** カンマの後にスペースを置きますが、その前には置きません。同じルールは `for` 式内のセミコロンにも適用されます。 - -**13.** `[]` 演算子の間にスペースを使用しないでください。 - -**14.** `template <...>` 式では、`template` と `<` の間にスペースを置き、`<` の後や `>` の前にはスペースを置かないでください。 - -```cpp -template -struct AggregatedStatElement -{} -``` - -**15.** クラスおよび構造体内では、`public`、`private`、`protected` を `class/struct` と同じレベルに書き、残りのコードをインデントします。 - -```cpp -template -class MultiVersion -{ -public: - /// 使用のためのオブジェクトのバージョン。shared_ptr がバージョンのライフタイムを管理します。 - using Version = std::shared_ptr; - ... -} -``` - -**16.** ファイル全体で同じ `namespace` が使用され、他に重要なことがない場合、`namespace` 内にオフセットを必要としません。 - -**17.** `if`、`for`、`while`、または他の式のブロックが単一の `statement` で構成されている場合、波括弧はオプションです。代わりに `statement` を別の行に置きます。このルールは、ネストされた `if`、`for`、`while` でも有効です。 - -ただし、内部の `statement` に波括弧または `else` が含まれる場合、外部ブロックは波括弧で書かれるべきです。 - -```cpp -/// 書き込みを終了します。 -for (auto & stream : streams) - stream.second->finalize(); -``` - -**18.** 行の末尾にスペースが存在しないようにします。 - -**19.** ソースファイルはUTF-8でエンコードされています。 - -**20.** 文字列リテラル内で非ASCII文字を使用できます。 - -```cpp -<< ", " << (timer.elapsed() / chunks_stats.hits) << " μsec/hit."; -``` - -**21.** 単一の行に複数の式を書かないでください。 - -**22.** 関数内のコードのセクションをグループ分けし、1つの空行を挟んで区切ります。 - -**23.** 関数、クラスなどを1つまたは2つの空行で分けます。 - -**24.** `const`(値に関連する)は型名の前に書かなければなりません。 - -```cpp -//正しい -const char * pos -const std::string & s -//間違い -char const * pos -``` - -**25.** ポインタまたは参照を宣言する場合、`*` と `&` シンボルの両側にスペースを挿入する必要があります。 - -```cpp -//正しい -const char * pos -//間違い -const char* pos -const char *pos -``` - -**26.** テンプレート型を使用する場合、最も単純なケースを除いて、`using` キーワードでエイリアスを作成します。 - -言い換えれば、テンプレートパラメータは `using` のみで指定され、コード内で繰り返されることはありません。 - -`using` は、関数内などローカルに宣言できます。 - -```cpp -//正しい -using FileStreams = std::map>; -FileStreams streams; -//間違い -std::map> streams; -``` - -**27.** 異なる型の複数の変数を1つの文で宣言しないでください。 - -```cpp -//間違い -int x, *y; -``` - -**28.** Cスタイルのキャストを使用しないでください。 - -```cpp -//間違い -std::cerr << (int)c <<; std::endl; -//正しい -std::cerr << static_cast(c) << std::endl; -``` - -**29.** クラスおよび構造体内では、メンバーと関数をそれぞれ可視性スコープ内でグループ化します。 - -**30.** 小さなクラスや構造体の場合、メソッド宣言を実装から分ける必要はありません。 - -他のクラスや構造体の小さなメソッドにも同様が適用されます。 - -テンプレートクラスや構造体の場合、メソッド宣言を実装から分けないでください(さもなければ同じ翻訳単位内で定義する必要があります)。 - -**31.** 140文字で行をラップできますが、80文字ではありません。 - -**32.** 後置インクリメント/デクリメント演算子が必要でない場合は、常に前置演算子を使用してください。 - -```cpp -for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) -``` - -## コメント {#comments} - -**1.** 非自明なコードの部分には必ずコメントを追加してください。 - -これは非常に重要です。コメントを書くことで、コードが不要であることや、設計が間違っていることに気付くかもしれません。 - -```cpp -/** 使用できるメモリの一部。 - * たとえば、internal_buffer が1MBで、ファイルから読み取るためにバッファに読み込まれたのがわずか10バイトの場合、 - * working_buffer のサイズはわずか10バイトになります - * (working_buffer.end() は、読み取れる10バイトのすぐ後の位置を指します)。 - */ -``` - -**2.** コメントは必要に応じて詳細にできます。 - -**3.** コメントは、その説明するコードの前に置いてください。まれに、コメントはコードの後、同じ行に置くこともできます。 - -```cpp -/** クエリを解析し、実行します。 -*/ -void executeQuery( - ReadBuffer & istr, /// クエリを読み取る場所(および、INSERTの場合はデータ) - WriteBuffer & ostr, /// 結果を書き込む場所 - Context & context, /// DB、テーブル、データ型、エンジン、関数、集約関数... - BlockInputStreamPtr & query_plan, /// クエリがどのように実行されたかの説明がここに書かれる可能性があります - QueryProcessingStage::Enum stage = QueryProcessingStage::Complete /// SELECTクエリを処理する段階まで - ) -``` - -**4.** コメントは英語のみで書くべきです。 - -**5.** ライブラリを書く場合、主要なヘッダーファイル内に詳細なコメントを含めて説明してください。 - -**6.** 追加情報を提供しないコメントは避けてください。特に、次のようなエンプティコメントを残さないでください: - -```cpp -/* -* 手続き名: -* 元の手続き名: -* 著者: -* 作成日: -* 修正日: -* 修正著者: -* 元のファイル名: -* 目的: -* 意図: -* 指名: -* 使用されるクラス: -* 定数: -* ローカル変数: -* パラメータ: -* 作成日: -* 目的: -*/ -``` - -この例は http://home.tamk.fi/~jaalto/course/coding-style/doc/unmaintainable-code/ から借用したものです。 - -**7.** 各ファイルの先頭に無意味なコメント(著者、作成日など)を書く必要はありません。 - -**8.** 単一行コメントは3つのスラッシュ `///` で始まり、複数行コメントは `/**` で始まります。これらのコメントは「ドキュメンテーション」と見なされます。 - -注:これらのコメントからドキュメントを生成するためにDoxygenを使用できます。しかし、Doxygenは一般的に使用されておらず、IDEでコードをナビゲートする方が便利です。 - -**9.** 複数行コメントの始まりと終わりには空行を含めるべきではありません(複数行コメントを閉じる行を除く)。 - -**10.** コードをコメントアウトする場合、基本的なコメントを使用し、「ドキュメンテーション」コメントは使用しないでください。 - -**11.** コミットする前にコメントアウトされた部分のコードは削除してください。 - -**12.** コメントやコードに不適切な言葉を使用しないでください。 - -**13.** 大文字を使用しないでください。過剰な句読点を使用しないでください。 - -```cpp -/// WHAT THE FAIL??? -``` - -**14.** コメントを区切りとして使用しないでください。 - -```cpp -///****************************************************** -``` - -**15.** コメント内で議論を開始する必要はありません。 - -```cpp -/// なぜこのことをしたのですか? -``` - -**16.** ブロックの最後に、それが何だったかを説明するコメントを書く必要はありません。 - -```cpp -/// for -``` - -## 名前 {#names} - -**1.** 変数名とクラスメンバー名には小文字とアンダースコアを使用します。 - -```cpp -size_t max_block_size; -``` - -**2.** 関数(メソッド)の名前には小文字で始まるキャメルケースを使用します。 - -```cpp -std::string getName() const override { return "Memory"; } -``` - -**3.** クラス(構造体)の名前には大文字で始まるキャメルケースを使用します。インターフェースのプレフィックスはIを使用しません。 - -```cpp -class StorageMemory : public IStorage -``` - -**4.** `using` もクラスと同様に命名されます。 - -**5.** テンプレートタイプの名前:単純な場合は `T` を使用します; `T`、`U`; `T1`、`T2` を使用します。 - -より複雑な場合は、クラス名のルールに従うか、プレフィックス `T` を追加します。 - -```cpp -template -struct AggregatedStatElement -``` - -**6.** テンプレート定数引数の名前:変数名のルールに従うか、単純なケースでは `N` を使用します。 - -```cpp -template -struct ExtractDomain -``` - -**7.** 抽象クラス(インターフェース)の場合、`I` プレフィックスを追加できます。 - -```cpp -class IProcessor -``` - -**8.** 変数をローカルに使用する場合は、短い名前を使用できます。 - -他のすべてのケースでは、意味を説明する名前を使用してください。 - -```cpp -bool info_successfully_loaded = false; -``` - -**9.** `define` およびグローバル定数の名前は、アンダースコアを使用したALL_CAPSになります。 - -```cpp -#define MAX_SRC_TABLE_NAMES_TO_STORE 1000 -``` - -**10.** ファイル名はその内容と同じスタイルを使用します。 - -ファイルが単一のクラスを含む場合、ファイルもクラスと同じ名前(CamelCase)にします。 - -ファイルが単一の関数を含む場合、ファイルも関数と同じ名前(camelCase)にします。 - -**11.** 名前に略語が含まれている場合は: - -- 変数名の場合、略語は小文字を使用するべきです `mysql_connection` (ではなく `mySQL_connection`)。 -- クラスおよび関数の名前の場合、略語の大文字を保持します `MySQLConnection` (ではなく `MySqlConnection`)。 - -**12.** クラスメンバーを初期化するためだけに使用されるコンストラクタ引数は、クラスメンバーと同じ名前を使用しましたが、末尾にアンダースコアを追加します。 - -```cpp -FileQueueProcessor( - const std::string & path_, - const std::string & prefix_, - std::shared_ptr handler_) - : path(path_), - prefix(prefix_), - handler(handler_), - log(&Logger::get("FileQueueProcessor")) -{ -} -``` - -アンダースコアのサフィックスは、引数がコンストラクタ本体で使用されない場合は省略できます。 - -**13.** ローカル変数とクラスメンバーの名前に違いはありません(プレフィックスは不要)。 - -```cpp -timer (not m_timer) -``` - -**14.** `enum` 内の定数には、先頭が大文字のキャメルケースを使用します。ALL_CAPSも許可されています。`enum` がローカルでない場合は、`enum class` を使用します。 - -```cpp -enum class CompressionMethod -{ - QuickLZ = 0, - LZ4 = 1, -}; -``` - -**15.** すべての名前は英語で書かれなければなりません。ヘブライ語の単語の音訳を許可することはできません。 - - not T_PAAMAYIM_NEKUDOTAYIM - -**16.** 略語は、よく知られている場合(略語の意味をWikipediaや検索エンジンで簡単に見つけられる場合)は、許可されます。 - - `AST`, `SQL`。 - - 許可されない略語: `NVDH`(いくつかのランダムな文字)。 - -不完全な単語は、短縮版が一般的に使用されている場合は許可されます。 - -略語の横に完全な名前がコメントに記載されている場合、略語を使用することもできます。 - -**17.** C++のソースコードを含むファイルは `.cpp` 拡張子を持つ必要があります。ヘッダーファイルは `.h` 拡張子を持つ必要があります。 - -## コードの書き方 {#how-to-write-code} - -**1.** メモリ管理。 - -手動メモリ解放(`delete`)はライブラリコードでのみ使用できます。 - -ライブラリコードでは、`delete` 演算子はデストラクタ内でのみ使用できます。 - -アプリケーションコードでは、メモリはそれを所有するオブジェクトによって解放される必要があります。 - -例: - -- 最も簡単な方法は、オブジェクトをスタックに置くか、別のクラスのメンバーにすることです。 -- 小さなオブジェクトが大量にある場合は、コンテナを使用します。 -- ヒープ内の少数のオブジェクトの自動解放には `shared_ptr/unique_ptr` を使用します。 - -**2.** リソース管理。 - -`RAII` を使用し、上記を参照してください。 - -**3.** エラーハンドリング。 - -例外を使用します。ほとんどの場合、例外をスローするだけで捕捉する必要はありません(`RAII` のため)。 - -オフラインデータ処理アプリケーションでは、例外をキャッチしないことが許可されていることがよくあります。 - -ユーザーリクエストを処理するサーバーでは、接続ハンドラーのトップレベルで例外をキャッチすることが通常十分です。 - -スレッド関数では、すべての例外をキャッチして保持し、`join` の後にメインスレッドで再スローする必要があります。 - -```cpp -/// まだ計算が行われていない場合、最初のブロックを同期的に計算します -if (!started) -{ - calculate(); - started = true; -} -else /// 計算が既に進行中の場合、結果を待ちます - pool.wait(); - -if (exception) - exception->rethrow(); -``` - -例外を処理せずに隠すべきではありません。すべての例外を無視するだけでログに記録することは避けてください。 - -```cpp -//正しくない -catch (...) {} -``` - -一部の例外を無視する必要がある場合は、特定の例外に対してのみ行い、残りを再スローしてください。 - -```cpp -catch (const DB::Exception & e) -{ - if (e.code() == ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION) - return nullptr; - else - throw; -} -``` - -応答コードや `errno` を持つ関数を使用する場合は、常に結果を確認し、エラーが発生した場合は例外をスローしてください。 - -```cpp -if (0 != close(fd)) - throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name); -``` - -コード内の不変条件をチェックするためにアサートを使用できます。 - -**4.** 例外タイプ。 - -アプリケーションコードでは、複雑な例外階層を使用する必要はありません。例外テキストはシステム管理者に理解可能でなければなりません。 - -**5.** デストラクタからの例外スロー。 - -これは推奨されていませんが、許可されています。 - -次のオプションを使用します: - -- 例外が発生する可能性があるすべての作業を事前に行う関数(`done()` または `finalize()`)を作成します。その関数が呼び出された場合、後でデストラクタに例外がないようにします。 -- ネットワークを介してメッセージを送信するようなあまりにも複雑なタスクは、クラスユーザーが破棄前に呼び出さなければならない別のメソッドに置くことができます。 -- デストラクタに例外が発生した場合、隠すよりもログを記録する方が良いです(ロガーが使用可能な場合)。 -- 簡単なアプリケーションでは、例外を処理するために `std::terminate`(C++11でデフォルトで `noexcept` の場合)に依存することが許可されます。 - -**6.** 匿名コードブロック。 - -特定の変数をローカルにするために、単一の関数内に別のコードブロックを作成し、そのブロックを出る際にデストラクタが呼び出されるようにできます。 - -```cpp -Block block = data.in->read(); - -{ - std::lock_guard lock(mutex); - data.ready = true; - data.block = block; -} - -ready_any.set(); -``` - -**7.** マルチスレッド。 - -オフラインデータ処理プログラムでは: - -- 単一のCPUコアで可能な限り最高のパフォーマンスを得るようにしてください。必要に応じてコードを並列化できます。 - -サーバーアプリケーションでは: - -- 要求を処理するためにスレッドプールを使用します。この時点で、ユーザー空間のコンテキストスイッチを必要とするタスクはありませんでした。 - -フォークは並列化には使われません。 - -**8.** スレッドの同期。 - -異なるスレッドが異なるメモリセル(さらに良いのは異なるキャッシュライン)を使用し、スレッド同期を使用しないことが可能な場合がよくあります(`joinAll`を除く)。 - -同期が必要な場合は、ほとんどの場合、`lock_guard` の下でミューテックスを使用するのが十分です。 - -他のケースでは、システム同期原始を使用します。ビジーウェイトは使用しないでください。 - -原子的な操作は、最も単純なケースでのみ使用するべきです。 - -ロックフリーのデータ構造を実装しようとしないでください、専門分野でない限り。 - -**9.** ポインタ対参照。 - -ほとんどの場合、参照の方を好みます。 - -**10.** `const`。 - -定数参照、定数へのポインタ、`const_iterator`、および `const` メソッドを使用します。 - -`const` をデフォルトと見なし、必要な場合にのみ非 `const` を使用します。 - -値を渡す場合、`const` を使用するのは通常意味がありません。 - -**11.** unsigned。 - -必要な場合は `unsigned` を使用してください。 - -**12.** 数値型。 - -`UInt8`、`UInt16`、`UInt32`、`UInt64`、`Int8`、`Int16`、`Int32`、および `Int64`、さらに `size_t`、`ssize_t`、`ptrdiff_t` 型を使用します。 - -これらの型を `signed/unsigned long`、`long long`、`short`、`signed/unsigned char`、`char` の数字に使用しないでください。 - -**13.** 引数を渡す。 - -複雑な値は、移動される場合は値で渡し、`std::move`を使用します。ループ内で値を更新する場合は参照で渡します。 - -ヒープ内で作成されたオブジェクトの所有権を関数が取得する場合、引数の型を `shared_ptr` または `unique_ptr` にします。 - -**14.** 戻り値。 - -ほとんどの場合、単に `return` を使用します。`return std::move(res)` と書かないでください。 - -関数がヒープにオブジェクトを割り当ててそれを返す場合、`shared_ptr` または `unique_ptr` を使用します。 - -まれに(ループ内での値の更新)、引数を介して値を返す必要がある場合、この引数は参照であるべきです。 - -```cpp -using AggregateFunctionPtr = std::shared_ptr; - -/** 名前から集約関数を作成します。 - */ -class AggregateFunctionFactory -{ -public: - AggregateFunctionFactory(); - AggregateFunctionPtr get(const String & name, const DataTypes & argument_types) const; -``` - -**15.** `namespace`。 - -アプリケーションコードには、別々の `namespace` を使う必要はありません。 - -小さなライブラリにもこれを必要としません。 - -中程度から大規模なライブラリでは、すべてを `namespace` に配置してください。 - -ライブラリの `.h` ファイルでは、実装の詳細を隠すために `namespace detail` を使用できます。 - -`.cpp` ファイル内では、シンボルを隠すために `static` または匿名 `namespace` を使用できます。 - -また、`enum`のために `namespace` を使用して、対応する名前が外部の `namespace` に落ちないようにすることができます(ただし、`enum class` を使用する方が良いです)。 - -**16.** 遅延初期化。 - -初期化に引数が必要な場合、通常、デフォルトコンストラクタを書くべきではありません。 - -後で初期化を遅らせる必要がある場合は、無効なオブジェクトを作成するデフォルトコンストラクタを追加できます。あるいは、少数のオブジェクトの場合、`shared_ptr/unique_ptr` を使用できます。 - -```cpp -Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); - -/// 遅延初期化用 -Loader() {} -``` - -**17.** 仮想関数。 - -クラスが多態的に使用されることを意図していない場合、関数を仮想にする必要はありません。これはデストラクタにも当てはまります。 - -**18.** エンコーディング。 - -どこでもUTF-8を使用します。`std::string` と `char *` を使用します。`std::wstring` と `wchar_t` を使用しないでください。 - -**19.** ロギング。 - -コード全体の例を参照してください。 - -コミットする前に、意味のないログとデバッグログ、およびその他のデバッグ出力をすべて削除してください。 - -サイクル内でのログは、Traceレベルでさえ避けるべきです。 - -ログは、どのログレベルでも読みやすくなければなりません。 - -ログは、基本的にアプリケーションコードでのみ使用されるべきです。 - -ログメッセージは英語で書く必要があります。 - -ログは、システム管理者にも理解できるようにpreferablyであるべきです。 - -ログに不適切な言葉を使用しないでください。 - -ログにはUTF-8エンコーディングを使用します。稀な場合には、ログに非ASCII文字を使用できます。 - -**20.** 入出力。 - -アプリケーションのパフォーマンスに重要な内部ループで `iostreams` を使用しないでください(`stringstream` は決して使用しないでください)。 - -代わりに `DB/IO` ライブラリを使用してください。 - -**21.** 日付と時刻。 - -`DateLUT` ライブラリを参照してください。 - -**22.** インクルード。 - -常にインクルードガードの代わりに `#pragma once` を使用してください。 - -**23.** using。 - -`using namespace` は使用しません。特定のもので `using` を使用できますが、クラスや関数内にローカルにしてください。 - -**24.** 不要な場合を除いて、関数の `trailing return type` を使用しないでください。 - -```cpp -auto f() -> void -``` - -**25.** 変数の宣言と初期化。 - -```cpp -//正しい方法 -std::string s = "Hello"; -std::string s{"Hello"}; - -//間違った方法 -auto s = std::string{"Hello"}; -``` - -**26.** 仮想関数については、基底クラスには `virtual` を書きますが、子孫クラスには `virtual` の代わりに `override` を書きます。 - -## C++の未使用機能 {#unused-features-of-c} - -**1.** 仮想継承は使用されません。 - -**2.** 現代C++の便利な構文シュガーを持つ構文(例えば: - -```cpp -//構文シュガーなしでの従来の方法 -template ::value, void>> // std::enable_ifによるSFINAE、::valueの使用 -std::pair func(const E & e) // 明示的に指定された戻り値の型 -{ - if (elements.count(e)) // .count() メンバーシップテスト - { - // ... - } - - elements.erase( - std::remove_if( - elements.begin(), elements.end(), - [&](const auto x){ - return x == 1; - }), - elements.end()); // remove-eraseイディオム - - return std::make_pair(1, 2); // make_pair()を使用してペアを作成する -} - -//構文シュガーあり(C++14/17/20) -template -requires std::same_v // C++20コンセプトによるSFINAE、C++14テンプレートエイリアスの使用 -auto func(const E & e) // auto戻り値の型(C++14) -{ - if (elements.contains(e)) // C++20 .contains メンバーシップテスト - { - // ... - } - - elements.erase_if( - elements, - [&](const auto x){ - return x == 1; - }); // C++20 std::erase_if - - return {1, 2}; // または: std::pair(1, 2)を返します; 初期化リストまたは値初期化(C++17)でペアを作成します。 -} -``` - -## プラットフォーム {#platform} - -**1.** 特定のプラットフォーム向けにコードを書きます。 - -ただし、他の条件が同じであれば、クロスプラットフォームまたはポータブルコードが優先されます。 - -**2.** 言語:C++20(利用可能な [C++20 機能](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features) のリストを参照してください)。 - -**3.** コンパイラ: `clang`。執筆時点(2025年3月)では、コードはclangバージョン>=19でコンパイルされます。 - -標準ライブラリが使用されます(`libc++`)。 - -**4.**OS:Linux Ubuntu、Preciseより古くはありません。 - -**5.**x86_64 CPUアーキテクチャ向けにコードが書かれています。 - -CPU命令セットは我々のサーバー間で最小限のサポートされているセットです。現在のところ、SSE 4.2です。 - -**6.** いくつかの例外を除いて `-Wall -Wextra -Werror -Weverything` コンパイルフラグを使用します。 - -**7.** スタティックリンクをすべてのライブラリで使用しますが、静的に接続するのが難しいライブラリはあります(`ldd` コマンドの出力を参照)。 - -**8.** コードはリリース設定で開発およびデバッグされます。 - -## ツール {#tools} - -**1.** KDevelop は良いIDEです。 - -**2.** デバッグには `gdb`、`valgrind`(`memcheck`)、`strace`、`-fsanitize=...`、または `tcmalloc_minimal_debug` を使用してください。 - -**3.** プロファイリングには `Linux Perf`、`valgrind`(`callgrind`)、または `strace -cf` を使用します。 - -**4.** ソースはGitで管理されています。 - -**5.** アセンブリは `CMake` を使用します。 - -**6.** プログラムは `deb` パッケージを使用してリリースされます。 - -**7.** master へのコミットはビルドを壊してはいけません。 - -ただし、有効と見なされるのは選択されたリビジョンのみです。 - -**8.** コードがまだ部分的にしか準備されていなくても、できるだけ頻繁にコミットを行います。 - -この目的のためにブランチを使用してください。 - -`master` ブランチ内のコードがまだビルド可能でない場合は、`push` の前にビルドから除外します。数日以内に完成させるか、削除する必要があります。 - -**9.** 重要でない変更の場合、ブランチを利用してサーバーで公開します。 - -**10.** 未使用のコードはリポジトリから削除されます。 - -## ライブラリ {#libraries} - -**1.** C++20標準ライブラリが使用されており(実験的な拡張も許可されます)、`boost` と `Poco` フレームワークも使用されています。 - -**2.** OSパッケージからのライブラリの使用は許可されていません。事前にインストールされたライブラリの使用も禁止されています。すべてのライブラリは、`contrib` ディレクトリ内のソースコードの形で配置され、ClickHouseで構築される必要があります。詳細については、[新しいサードパーティライブラリを追加するためのガイドライン](/development/contrib#adding-and-maintaining-third-party-libraries)を参照してください。 - -**3.** 既存で使用されているライブラリに優先権を与えます。 - -## 一般的な推奨事項 {#general-recommendations-1} - -**1.** できるだけ少ないコードを書くようにしてください。 - -**2.** 最も単純な解決策を試してください。 - -**3.** コードがどのように機能するか、内部ループがどのように機能するかがわかるまでコードを記述しないでください。 - -**4.** 単純な場合は、クラスや構造体の代わりに `using` を利用してください。 - -**5.** 可能であれば、コピーコンストラクタ、代入演算子、デストラクタ(少なくとも1つの仮想関数が含まれている場合の仮想関数を除く)、ムーブコンストラクタやムーブ代入演算子を記述しないでください。言い換えれば、コンパイラが生成する関数は正しく機能する必要があります。`default` を使用できます。 - -**6.** コードの簡素化が奨励されています。可能な限りコードのサイズを削減してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md.hash deleted file mode 100644 index 9e14df51dcd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/style.md.hash +++ /dev/null @@ -1 +0,0 @@ -fb3b0b8dca031287 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md b/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md deleted file mode 100644 index 55f9da84d90..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md +++ /dev/null @@ -1,504 +0,0 @@ ---- -description: 'ClickHouseのテストおよびテストスイートの実行方法ガイド' -sidebar_label: 'テスト' -sidebar_position: 40 -slug: '/development/tests' -title: 'ClickHouseのテスト' ---- - -# Testing ClickHouse - -## Functional Tests {#functional-tests} - -Functional testsは最もシンプルで使いやすいテストです。 -ほとんどのClickHouseの機能はfunctional testsを使用してテストでき、テスト可能なClickHouseコードの変更に対しては必須です。 - -各functional testは、実行中のClickHouseサーバーに1つ以上のクエリを送り、結果をリファレンスと比較します。 - -テストは`queries`ディレクトリにあります。 -サブディレクトリは2つあり、`stateless`と`stateful`です。 -- Stateless testsは事前にロードされたテストデータなしでクエリを実行します - テスト自体内で小さな合成データセットをその場で作成することがよくあります。 -- Stateful testsはClickHouseからの事前にロードされたテストデータを必要とし、一般公開されています。[stateful test in continuous integration](continuous-integration.md#functional-stateful-tests)を参照してください。 - -各テストは、`.sql`と`.sh`の2つのタイプのいずれかです。 -- `.sql`テストは、`clickhouse-client`にパイプされるシンプルなSQLスクリプトです。 -- `.sh`テストは、自身で実行されるスクリプトです。 - -SQLテストは一般的に`.sh`テストよりも望まれます。 -純粋なSQLからは動作しない機能をテストする必要がある場合のみ、`.sh`テストを使用するべきです。例えば、`clickhouse-client`に入力データをパイプする場合や、`clickhouse-local`をテストする場合です。 - -:::note -`DateTime`型と`DateTime64`型をテストする際の一般的な間違いは、サーバーが特定のタイムゾーン(例:"UTC")を使用していると仮定することです。これは事実ではなく、CIテストの実行中のタイムゾーンは故意にランダム化されています。テスト値に対してタイムゾーンを明示的に指定するのが最も簡単な回避策です。例:`toDateTime64(val, 3, 'Europe/Amsterdam')`。 -::: - -### Running a Test Locally {#running-a-test-locally} - -ClickHouseサーバーをローカルで開始し、デフォルトポート(9000)でリッスンします。 -例えば、テスト`01428_hash_set_nan_key`を実行するには、リポジトリフォルダーに移動し、次のコマンドを実行します。 - -```sh -PATH=:$PATH tests/clickhouse-test 01428_hash_set_nan_key -``` - -テスト結果(`stderr`および`stdout`)は、テスト自体の隣にあるファイル`01428_hash_set_nan_key.[stderr|stdout]`に書き込まれます(`queries/0_stateless/foo.sql`の場合、出力は`queries/0_stateless/foo.stdout`にあります)。 - -`tests/clickhouse-test --help`を参照して、`clickhouse-test`のすべてのオプションを確認してください。 -すべてのテストを実行するか、テスト名のフィルタを提供することでテストのサブセットを実行できます:`./clickhouse-test substring`。 -テストを並行して実行するためのオプションや、ランダム順序で実行するオプションもあります。 - -### Adding a New Test {#adding-a-new-test} - -新しいテストを追加するには、まず`queries/0_stateless`ディレクトリに`.sql`または`.sh`ファイルを作成します。 -次に、`clickhouse-client < 12345_test.sql > 12345_test.reference`または`./12345_test.sh > ./12345_test.reference`を使用して、対応する`.reference`ファイルを生成します。 - -テストは、事前に自動で作成されるデータベース`test`内で、テーブルを作成、削除、選択するのみとしてください。 -一時テーブルを使用することは問題ありません。 - -CIと同じ環境をローカルにセットアップするには、テスト設定をインストールします(これによりZookeeperのモック実装が使用され、一部の設定が調整されます)。 - -```sh -cd /tests/config -sudo ./install.sh -``` - -:::note -テストは次の条件を満たさなければなりません: -- 最小限であること:必要最小限のテーブル、カラム、および複雑さを作成するのみ -- 迅速であること:数秒を超えないこと(できればサブセカンドで) -- 正確かつ決定論的であること:テスト機能が正しく動作しない場合にのみ失敗する -- 隔離されている/ステートレスであること:環境やタイミングに依存しない -- 包括的であること:ゼロ、null、空のセット、例外(負のテスト、構文`-- { serverError xyz }`や`-- { clientError xyz }`を使用)などのコーナーケースをカバーする -- テストの最後にテーブルをクリーンアップすること(残り物があれば) -- 他のテストが同じものをテストしないことを確認すること(つまり、最初にgrepすること)。 -::: - -### Restricting test runs {#restricting-test-runs} - -テストには、CIでの実行コンテキストを制限するための0個以上の _tags_ を持たせることができます。 - -`.sql`テストの場合、タグは最初の行にSQLコメントとして置かれます。 - -```sql --- Tags: no-fasttest, no-replicated-database --- no-fasttest: --- no-replicated-database: - -SELECT 1 -``` - -`.sh`テストの場合、タグは2行目のコメントとして書かれます。 - -```bash -#!/usr/bin/env bash - -# Tags: no-fasttest, no-replicated-database - -# - no-fasttest: - -# - no-replicated-database: -``` - -利用可能なタグのリスト: - -|タグ名 | 説明 | 使用例 | -|---|---|---| -| `disabled`| テストは実行されません || -| `long` | テストの実行時間は1分から10分に延長されます || -| `deadlock` | テストは長時間ループで実行されます || -| `race` | `deadlock`と同じ。`deadlock`を優先してください || -| `shard` | サーバーは`127.0.0.*`をリッスンする必要があります || -| `distributed` | `shard`と同じ。`shard`を優先してください || -| `global` | `shard`と同じ。`shard`を優先してください || -| `zookeeper` | テストを実行するためにZookeeperまたはClickHouse Keeperが必要です | テストは`ReplicatedMergeTree`を使用します | -| `replica` | `zookeeper`と同じ。`zookeeper`を優先してください || -| `no-fasttest`| [Fast test](continuous-integration.md#fast-test)の下でテストは実行されません | テストはFast testで無効にされている`MySQL`テーブルエンジンを使用します | -| `no-[asan, tsan, msan, ubsan]` | [sanitizers](#sanitizers)を使用したビルドでテストを無効にします | テストはQEMUで実行されますが、sanitizersでは動作しません | -| `no-replicated-database` ||| -| `no-ordinary-database` ||| -| `no-parallel` | このテストと並行して他のテストを無効にします | テストは`system`テーブルから読み取るため、無変則が崩れる可能性があります | -| `no-parallel-replicas` ||| -| `no-debug` ||| -| `no-stress` ||| -| `no-polymorphic-parts` ||| -| `no-random-settings` ||| -| `no-random-merge-tree-settings` ||| -| `no-backward-compatibility-check` ||| -| `no-cpu-x86_64` ||| -| `no-cpu-aarch64` ||| -| `no-cpu-ppc64le` ||| -| `no-s3-storage` ||| - -上記の設定に加えて、特定のClickHouse機能の使用を定義するために、`system.build_options`から`USE_*`フラグを使用できます。 -例えば、テストがMySQLテーブルを使用する場合、`use-mysql`タグを追加するべきです。 - -### Specifying limits for random settings {#specifying-limits-for-random-settings} - -テストは、テスト実行中にランダム化可能な設定の最小および最大許容値を指定できます。 - -`.sh`テストの場合、制限はタグの隣の行や、タグが指定されていない場合の2行目にコメントとして書かれます。 - -```bash -#!/usr/bin/env bash - -# Tags: no-fasttest - -# Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None) -``` - -`.sql`テストの場合、タグはタグの隣の行や最初の行にSQLコメントとして置かれます。 - -```sql --- Tags: no-fasttest --- Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None) -SELECT 1 -``` - -制限が一つのみを指定する必要がある場合、もう一つには`None`を使用できます。 - -### Choosing the Test Name {#choosing-the-test-name} - -テスト名は、5桁のプレフィックスで始まり、続いて説明的な名前が付きます。例えば`00422_hash_function_constexpr.sql`のように。 -プレフィックスを選択するには、ディレクトリに既に存在する最大のプレフィックスを見つけ、それを1増やします。 - -```sh -ls tests/queries/0_stateless/[0-9]*.reference | tail -n 1 -``` - -その間に、同じ数値のプレフィックスを持つ他のテストが追加される場合がありますが、これは問題なく、後で変更する必要はありません。 - -### Checking for an Error that Must Occur {#checking-for-an-error-that-must-occur} - -時には、誤ったクエリに対してサーバーエラーが発生することをテストしたいことがあります。このための特別な注釈をSQLテストでサポートしています。次の形式です。 - -```sql -select x; -- { serverError 49 } -``` - -このテストは、サーバーが未知のカラム`x`についてエラーコード49を返すことを保証します。 -エラーがない場合や、エラーが異なる場合、テストは失敗します。 -クライアント側でエラーが発生することを確認するには、`clientError`注釈を使用してください。 - -エラーメッセージの特定の表現を確認しないでください。それは将来的に変更される可能性があり、テストが不必要に壊れることになります。 -エラーコードのみを確認します。 -既存のエラーコードがあなたのニーズに合わない場合は、新しいものを追加することを検討してください。 - -### Testing a Distributed Query {#testing-a-distributed-query} - -functional testsで分散クエリを使用する場合は、サーバーが自身をクエリするために`127.0.0.{1..2}`アドレスを持つ`remote`テーブル関数を活用できます。または、`test_shard_localhost`のようなサーバー構成ファイル内の事前定義されたテストクラスタを使用することもできます。 -テスト名に`shard`または`distributed`という言葉を追加して、CIで正しい構成で実行されるようにしてください。サーバーは分散クエリをサポートするように構成されています。 - -### Working with Temporary Files {#working-with-temporary-files} - -時にはシェルテストで作業するためにその場でファイルを作成する必要があります。 -いくつかのCIチェックがテストを並行して実行するため、スクリプト内でユニークな名前なしで一時ファイルを作成または削除すると、FlakyなどのCIチェックが失敗する可能性があります。 -これを回避するために、環境変数`$CLICKHOUSE_TEST_UNIQUE_NAME`を使用して、一時ファイルにそのテストにユニークな名前を付けるべきです。 -これにより、セットアップ中に作成したりクリーンアップ中に削除するファイルが、そのテストでのみ使用されているものであり、並行して実行されている他のテストによるものではないことが保証されます。 - -## Known Bugs {#known-bugs} - -再現可能なバグが知られている場合、準備されたfunctional testsを`tests/queries/bugs`ディレクトリに配置します。 -これらのテストは、バグが修正されたときに`tests/queries/0_stateless`に移動されます。 - -## Integration Tests {#integration-tests} - -Integration testsは、クラスタ構成でClickHouseをテストし、MySQL、Postgres、MongoDBなどの他のサーバーとの相互作用をテストすることを可能にします。 -ネットワーク分割、パケット破損などをエミュレートするのに便利です。 -これらのテストはDockerの下で実行され、さまざまなソフトウェアを持つ複数のコンテナを作成します。 - -これらのテストを実行する方法については、`tests/integration/README.md`を参照してください。 - -ClickHouseとサードパーティのドライバとの統合はテストされていないことに注意してください。 -また、現在のところ、JDBCおよびODBCドライバとの統合テストもありません。 - -## Unit Tests {#unit-tests} - -Unit testsは、ClickHouse全体をテストしたいのではなく、単一の孤立したライブラリまたはクラスをテストしたいときに便利です。 -テストのビルドを有効または無効にするには、`ENABLE_TESTS` CMakeオプションを使用します。 -Unit tests(および他のテストプログラム)は、コード全体の`tests`サブディレクトリにあります。 -Unit testsを実行するには、`ninja test`と入力します。 -一部のテストは`gtest`を使用しますが、テスト失敗時に非ゼロの終了コードを返す単なるプログラムもあります。 - -コードがすでにfunctional testsでカバーされている場合、unit testsを持つ必要はありません(functional testsは通常、はるかにシンプルに使用できます)。 - -個々のgtestチェックを直接実行可能ファイルを呼び出して実行できます。例えば: - -```bash -$ ./src/unit_tests_dbms --gtest_filter=LocalAddress* -``` - -## Performance Tests {#performance-tests} - -Performance testsは、合成クエリに対してClickHouseのいくつかの孤立した部分のパフォーマンスを測定および比較することを可能にします。 -Performance testsは`tests/performance/`にあります。 -各テストは、テストケースの説明を含む`.xml`ファイルによって表されます。 -テストは`docker/test/performance-comparison`ツールを使用して実行されます。呼び出しについてはREADMEファイルを参照してください。 - -各テストは、一度に1つ以上のクエリ(パラメータの組み合わせを含む可能性があります)をループ内で実行します。 - -特定のシナリオでClickHouseのパフォーマンスを向上させることを望んでおり、改善が単純なクエリで観察可能な場合は、パフォーマンステストを書くことが強く推奨されます。 -また、比較的孤立していてあまり obscure でないSQL関数を追加および変更するときも、パフォーマンステストを書くことが推奨されます。 -テスト中に`perf top`や他の`perf`ツールを使用することが常に意味を持ちます。 - -## Test Tools and Scripts {#test-tools-and-scripts} - -`tests`ディレクトリ内の一部のプログラムは準備されたテストではなく、テストツールです。 -例えば、`Lexer`のためのツール`src/Parsers/tests/lexer`は、標準入力のトークン化を行い、色付きの結果を標準出力に書き込みます。 -これらの種のツールをコードの例や探求、手動テストのために使用できます。 - -## Miscellaneous Tests {#miscellaneous-tests} - -`tests/external_models`には機械学習モデルのテストがあります。 -これらのテストは更新されず、統合テストに移動する必要があります。 - -クオラム挿入に対する別のテストがあります。 -このテストは、Separate serversでClickHouseクラスターを実行し、ネットワーク分割、パケット破損(ClickHouseノード間、ClickHouseとZookeeper間、ClickHouseサーバーとクライアント間など)、`kill -9`、`kill -STOP`、`kill -CONT`などのさまざまな障害ケースをエミュレートします。この後、すべての確認された挿入が書き込まれ、拒否された挿入がされなかったことをチェックします。 - -クオラムテストは、ClickHouseがオープンソースとされる前に、別のチームによって書かれました。 -このチームはもはやClickHouseのメンテナンスを行っていません。 -テストは偶然にもJavaで書かれました。 -これらの理由から、クオラムテストは再記述され、統合テストに移動する必要があります。 - -## Manual Testing {#manual-testing} - -新しい機能を開発しているときは、手動でテストすることも理にかなっています。 -以下の手順で行うことができます: - -ClickHouseをビルドします。ターミナルからClickHouseを実行します:ディレクトリを`programs/clickhouse-server`に変更し、`./clickhouse-server`を実行します。これにより、デフォルトで現在のディレクトリから設定(`config.xml`、`users.xml`および`config.d`および`users.d`ディレクトリ内のファイル)が使用されます。ClickHouseサーバーに接続するには、`programs/clickhouse-client/clickhouse-client`を実行します。 - -すべてのclickhouseツール(サーバー、クライアントなど)は、`clickhouse`という単一のバイナリへのシンボリックリンクに過ぎません。 -このバイナリは`programs/clickhouse`にあります。 -すべてのツールも、`clickhouse tool`のように呼び出すことができます。 - -もしくは、ClickHouseパッケージをインストールすることもできます:ClickHouseリポジトリからの安定版リリース、もしくはClickHouseソースのルートで`./release`を使って自身用のパッケージをビルドできます。 -その後、`sudo clickhouse start`(またはサーバーを停止するには`sudo clickhouse stop`)でサーバーを開始します。 -ログは`/etc/clickhouse-server/clickhouse-server.log`にあります。 - -システムにClickHouseがすでにインストールされている場合、新しい`clickhouse`バイナリをビルドし、既存のバイナリを置き換えることができます。 - -```bash -$ sudo clickhouse stop -$ sudo cp ./clickhouse /usr/bin/ -$ sudo clickhouse start -``` - -また、システムのclickhouse-serverを停止し、同じ設定でログがターミナルに出力されるように独自のClickHouseサーバーを実行できます。 - -```bash -$ sudo clickhouse stop -$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml -``` - -gdbを使った例: - -```bash -$ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml -``` - -システムのclickhouse-serverがすでに実行中で停止したくない場合は、`config.xml`内のポート番号を変更するか(または`config.d`ディレクトリ内のファイルで上書きし)、適切なデータパスを提供して実行できます。 - -`clickhouse`バイナリはほとんど依存関係がなく、さまざまなLinuxディストリビューションで動作します。 -サーバーで変更を迅速かつ簡単にテストするには、単に新しくビルドされた`clickhouse`バイナリをサーバーに`scp`し、上記の例のように実行できます。 - -## Build Tests {#build-tests} - -Build testsは、さまざまな代替構成といくつかの外国システムでビルドが壊れていないことを確認するために使用されます。 -これらのテストは自動化されています。 - -例: -- Darwin x86_64のためのクロスコンパイル(macOS) -- FreeBSD x86_64のためのクロスコンパイル -- Linux AArch64のためのクロスコンパイル -- システムパッケージからのライブラリを使用してUbuntuでビルド(推奨されません) -- ライブラリの共有リンクを使用してビルド(推奨されません) - -例えば、システムパッケージを使用したビルドは悪いプラクティスです。なぜなら、システムが持っているパッケージの正確なバージョンを保証できないからです。 -しかし、これはDebianのメンテナンスにとって非常に必要です。 -このため、少なくともこのビルドバリアントをサポートする必要があります。 -別の例:共有リンクは、一般的に問題の源ですが、一部の愛好家には必要です。 - -すべてのビルドバリアントですべてのテストを実行できるわけではありませんが、さまざまなビルドバリアントが壊れていないことを少なくとも確認したいと考えています。 -この目的で、ビルドテストを使用します。 - -また、コンパイルに時間がかかりすぎるか、RAMを過剰に必要とする翻訳単位がないこともテストしています。 - -さらに、大きすぎるスタックフレームがないこともテストしています。 - -## Testing for Protocol Compatibility {#testing-for-protocol-compatibility} - -ClickHouseのネットワークプロトコルを拡張する際に、古いclickhouse-clientが新しいclickhouse-serverと動作すること、新しいclickhouse-clientが古いclickhouse-serverと動作することを手動でテストします(対応するパッケージのバイナリを実行することで)。 - -私たちはまた、統合テストで自動的にいくつかのケースをテストします: -- 古いバージョンのClickHouseによって書き込まれたデータが新しいバージョンによって正常に読み込めるかどうか。 -- 異なるClickHouseバージョンでのクラスター内で分散クエリが正常に動作するかどうか。 - -## Help from the Compiler {#help-from-the-compiler} - -主要なClickHouseコード(`src`ディレクトリにあります)は、`-Wall -Wextra -Werror`でビルドされ、いくつかの追加の警告が有効化されています。 -ただし、これらのオプションはサードパーティのライブラリには有効化されていません。 - -Clangにはさらに役立つ警告が多数あり、これらを`-Weverything`で検索し、デフォルトビルド用に選択できます。 - -私たちは常にClangを使用してClickHouseをビルドしており、開発や生産のために使用します。 -あなた自身のマシンでデバッグモードでビルドができるが(ノートパソコンのバッテリーを節約するため)、コンパイラは`-O3`でのビルドにおいてより多くの警告を生成できることに注意してください。理由は、制御フローと手続き間解析がより良く行われるからです。 -デバッグモードでClangでビルドする際には、デバッグバージョンの`libc++`が使用され、実行時のエラーをより多くキャッチできるようになります。 - -## Sanitizers {#sanitizers} - -:::note -ローカルで実行する際に、ClickHouseサーバーまたはクライアントが起動時にクラッシュする場合、アドレス空間配置のランダマイズを無効にする必要があるかもしれません:`sudo sysctl kernel.randomize_va_space=0` -::: - -### Address sanitizer {#address-sanitizer} - -私たちは、ASan下で機能テスト、統合テスト、ストレステスト、ユニットテストをコミットごとに実行しています。 - -### Thread sanitizer {#thread-sanitizer} - -私たちは、TSan下で機能テスト、統合テスト、ストレステスト、ユニットテストをコミットごとに実行しています。 - -### Memory sanitizer {#memory-sanitizer} - -私たちは、MSan下で機能テスト、統合テスト、ストレステスト、ユニットテストをコミットごとに実行しています。 - -### Undefined behaviour sanitizer {#undefined-behaviour-sanitizer} - -私たちは、UBSan下で機能テスト、統合テスト、ストレステスト、ユニットテストをコミットごとに実行しています。 -いくつかのサードパーティライブラリのコードはUBに対してsanitizeされていません。 - -### Valgrind (Memcheck) {#valgrind-memcheck} - -以前はValgrind下で夜間に機能テストを実行していましたが、現在はこれを行っていません。 -複数の時間がかかります。 -現在、`re2`ライブラリに1つの既知の偽陽性があります。詳細は[この記事](https://research.swtch.com/sparse)を参照してください。 - -## Fuzzing {#fuzzing} - -ClickHouseのファジングは、[libFuzzer](https://llvm.org/docs/LibFuzzer.html)とランダムSQLクエリの両方を使用して実装されています。 -すべてのファジングテストはサニタイザー(AddressとUndefined)で実行する必要があります。 - -LibFuzzerはライブラリコードの孤立したファジングテストに使用されます。 -ファジングプログラムはテストの一部として実装され、"_fuzzer"という名前の接尾辞が付けられます。 -ファジングの例は`src/Parsers/fuzzers/lexer_fuzzer.cpp`にあります。 -LibFuzzer固有の構成、辞書、およびコーパスは`tests/fuzz`に保存されています。 -ユーザー入力を処理するすべての機能に対してファジングテストを書くことを推奨します。 - -ファジングプログラムはデフォルトではビルドされません。 -ファジングプログラムをビルドするには、`-DENABLE_FUZZING=1`および`-DENABLE_TESTS=1`の両方のオプションを設定する必要があります。 -ファジングプログラムをビルド中にJemallocを無効にすることを推奨します。 -ClickHouseファジングをGoogle OSS-Fuzzに統合するために使用される構成は、`docker/fuzz`にあります。 - -また、ランダムなSQLクエリを生成し、サーバーがそれを実行中にクラッシュしないことを確認するための単純なファジングテストも使用します。 -このテストは`00746_sql_fuzzy.pl`にあります。 -このテストは継続的に(夜間およびそれ以降)実行するべきです。 - -さらに、ASTに基づく高度なクエリファジングプログラムを使用して、大量のコーナーケースを発見できるようにしています。 -それは、クエリAST内でのランダムな順列と置換を行います。 -それは、前のテストからのASTノードを覚えて次のテストのファジングに使用します。処理中のランダム順序で。 -このファジングプログラムについての詳細は、[このブログ記事](https://clickhouse.com/blog/fuzzing-click-house)で学ぶことができます。 - -## Stress test {#stress-test} - -ストレステストは、ファジングの別のケースです。 -各functional testを単一のサーバーでランダムな順序で並行実行します。 -テストの結果はチェックされません。 - -次のことが確認されます: -- サーバーがクラッシュせず、デバッグまたはサニタイザーのトラップがトリガーされないこと; -- デッドロックがないこと; -- データベース構造が一貫していること; -- テスト後、サーバーは正常に停止し、例外なしで再起動できること。 - -5つの異なるバリエーションがあります(Debug、ASan、TSan、MSan、UBSan)。 - -## Thread Fuzzer {#thread-fuzzer} - -Thread Fuzzer(Thread Sanitizerと混同しないでください)は、スレッドの実行順序をランダム化する別の種類のファジングで、さらに特殊なケースを見つけるのに役立ちます。 - -## Security Audit {#security-audit} - -私たちのセキュリティチームは、セキュリティの観点からClickHouseの能力を基本的にレビューしました。 - -## Static Analyzers {#static-analyzers} - -私たちは、コミットごとに`clang-tidy`を実行しています。 -`clang-static-analyzer`のチェックも有効です。 -`clang-tidy`は、一部のスタイルチェックにも使用されます。 - -私たちは`clang-tidy`、`Coverity`、`cppcheck`、`PVS-Studio`、`tscancode`、`CodeQL`を評価しました。 -使用のための指示は`tests/instructions/`ディレクトリにあります。 - -`CLion`をIDEとして使用する場合、すぐに利用できる`clang-tidy`のチェックを活用できます。 - -また、シェルスクリプトの静的分析には`shellcheck`を使用しています。 - -## Hardening {#hardening} - -デバッグビルドでは、ユーザーレベルの割り当てのASLRを行うカスタムアロケータを使用しています。 - -さらに、割り当て後に読み取り専用であることが期待されるメモリ領域も手動で保護しています。 - -デバッグビルドでは、呼び出される危険な(時代遅れ、不安全、スレッドセーフでない)関数が呼び出されないように、libcのカスタマイズも含めています。 - -デバッグアサーションは広範に使用されています。 - -デバッグビルドでは、「論理エラー」コードの例外がスローされると、プログラムが早期に終了します。 -これにより、リリースビルドで例外を使用できますが、デバッグビルドではアサーションとして扱われます。 - -デバッグビルドにはjemallocのデバッグバージョンが使用されます。 -デバッグビルドにはlibc++のデバッグバージョンが使用されます。 - -## Runtime Integrity Checks {#runtime-integrity-checks} - -ディスク上に保存されるデータはチェックサムが付与されています。 -MergeTreeテーブルのデータは、三つの方法で同時にチェックサムが付与されています(圧縮データブロック、非圧縮データブロック、ブロック全体の合計チェックサム)。 -クライアントとサーバー間またはサーバー間でネットワークを通じて転送されるデータにもチェックサムが付与されています。 -レプリケーションはレプリカ上のビット同一のデータを保証します。 - -これはハードウェアの故障(ストレージ媒体のビット劣化、サーバーのRAMのビット反転、ネットワークコントローラのRAMのビット反転、ネットワークスイッチのRAMのビット反転、クライアントのRAMのビット反転、回線上のビット反転)から保護するために必要です。 -ビット反転は一般的であり、ECC RAMやTCPチェックサムがある場合でも発生する可能性が高いことに注意してください(ペタバイトのデータを処理している何千ものサーバーを実行している場合)。 -[このビデオ(ロシア語)](https://www.youtube.com/watch?v=ooBAQIe0KlQ)。 - -ClickHouseは、運用エンジニアが故障したハードウェアを見つけるのに役立つ診断を提供します。 - -\* そしてそれは遅くありません。 - -## Code Style {#code-style} - -コードスタイルルールは[こちら](style.md)に記載されています。 - -一般的なスタイル違反をチェックするために、`utils/check-style`スクリプトを使用できます。 - -コードのスタイルを強制するために、`clang-format`を使用できます。 -ファイル`.clang-format`はソースのルートにあります。 -それはほとんど私たちの実際のコードスタイルに対応しています。 -しかし、既存のファイルに対して`clang-format`を適用することは推奨されません。なぜならフォーマットが悪化するからです。 -代わりに、clangのソースリポジトリ内にある`clang-format-diff`ツールを使用できます。 - -また、コードを再フォーマットするために`uncrustify`ツールを試すこともできます。 -設定はソースのルートにある`uncrustify.cfg`にあります。 -これは`clang-format`よりもテストされていません。 - -`CLion`には独自のコードフォーマッタがあり、私たちのコードスタイルのために調整する必要があります。 - -私たちはまた、コード内のタイプミスを見つけるために`codespell`を使用しています。 -これも自動化されています。 - -## Test Coverage {#test-coverage} - -私たちはテストカバレッジを追跡していますが、functional testsのみに対して、かつclickhouse-serverのみに対して行います。 -これは日次で実行されます。 - -## Tests for Tests {#tests-for-tests} - -フレークテストのチェックが自動化されています。 -すべての新しいテストを100回(functional testsの場合)または10回(integration testsの場合)実行します。 -少なくとも1回でもテストが失敗した場合、それはフレークと見なされます。 - -## Test Automation {#test-automation} - -私たちは[GitHub Actions](https://github.com/features/actions)を使用してテストを実行します。 - -ビルドジョブとテストは、コミットごとにSandboxで実行されます。 -結果として得られるパッケージとテスト結果はGitHubに公開され、直接リンクでダウンロードできます。 -アーティファクトは数ヶ月保存されます。 -GitHubでプルリクエストを送信すると、「テスト可能」とタグ付けされ、私たちのCIシステムがClickHouseパッケージ(リリース、デバッグ、アドレスサニタイザー付きなど)をあなたのためにビルドします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md.hash deleted file mode 100644 index c1f2b78eb0d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/development/tests.md.hash +++ /dev/null @@ -1 +0,0 @@ -a781c03d60105b25 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md deleted file mode 100644 index c4149a2d186..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md +++ /dev/null @@ -1,336 +0,0 @@ ---- -slug: '/dictionary' -title: 'Dictionary' -keywords: -- 'dictionary' -- 'dictionaries' -description: 'A dictionary provides a key-value representation of data for fast - lookups.' ---- - -import dictionaryUseCases from '@site/static/images/dictionary/dictionary-use-cases.png'; -import dictionaryLeftAnyJoin from '@site/static/images/dictionary/dictionary-left-any-join.png'; -import Image from '@theme/IdealImage'; - - -# Dictionary - -ClickHouseの辞書は、さまざまな [内部および外部ソース](/sql-reference/dictionaries#dictionary-sources) からのデータのインメモリ [key-value](https://en.wikipedia.org/wiki/Key%E2%80%93value_database) 表現を提供し、超低レイテンシのルックアップクエリを最適化します。 - -辞書は次のために便利です: -- 特に `JOIN` と共に使用することで、クエリのパフォーマンスを向上させる -- データ取り込みプロセスを遅延させずに、取得したデータをその場で豊かにする - -ClickHouseにおける辞書の利用ケース - -## 辞書を使ったJOINの高速化 {#speeding-up-joins-using-a-dictionary} - -辞書は特定のタイプの `JOIN` を高速化するために使用できます: [`LEFT ANY` タイプ](/sql-reference/statements/select/join#supported-types-of-join) で、結合キーが基礎となるキーバリューストレージのキー属性に一致する必要があります。 - -LEFT ANY JOINでの辞書の使用 - -この場合、ClickHouseは辞書を活用して [Direct Join](https://clickhouse.com/blog/clickhouse-fully-supports-joins-direct-join-part4#direct-join) を実行できます。これはClickHouseの最も高速な結合アルゴリズムであり、右側のテーブルの基礎となる [テーブルエンジン](/engines/table-engines) が低レイテンシのキーバリューリクエストをサポートしている場合に適用可能です。ClickHouseにはこれを提供する3つのテーブルエンジンがあります:[Join](/engines/table-engines/special/join)(基本的には事前計算されたハッシュテーブル)、[EmbeddedRocksDB](/engines/table-engines/integrations/embedded-rocksdb) および [Dictionary](/engines/table-engines/special/dictionary)。辞書ベースのアプローチについて説明しますが、メカニズムは3つのエンジンで同じです。 - -ダイレクトジョインアルゴリズムでは、右側のテーブルが辞書でバックアップされている必要があります。そのため、結合されるデータはすでにメモリ内に低レイテンシのキーバリューデータ構造の形で存在している必要があります。 - -### 例 {#example} - -Stack Overflowのデータセットを使用して、次の質問に答えましょう: -*Hacker NewsでSQLに関する最も物議を醸す投稿は何ですか?* - -物議を醸すとは、投稿が似たような数のアップ票とダウン票を持つ場合と定義します。この絶対的な差を計算し、値が0に近いほど物議を醸すものとします。投稿には最低10のアップ票とダウン票が必要であると仮定します - 票を投じられない投稿はあまり物議を醸しません。 - -データを正規化すると、現在このクエリは `posts` テーブルと `votes` テーブルを使用した `JOIN` を必要とします: - -```sql -WITH PostIds AS -( - SELECT Id - FROM posts - WHERE Title ILIKE '%SQL%' -) -SELECT - Id, - Title, - UpVotes, - DownVotes, - abs(UpVotes - DownVotes) AS Controversial_ratio -FROM posts -INNER JOIN -( - SELECT - PostId, - countIf(VoteTypeId = 2) AS UpVotes, - countIf(VoteTypeId = 3) AS DownVotes - FROM votes - WHERE PostId IN (PostIds) - GROUP BY PostId - HAVING (UpVotes > 10) AND (DownVotes > 10) -) AS votes ON posts.Id = votes.PostId -WHERE Id IN (PostIds) -ORDER BY Controversial_ratio ASC -LIMIT 1 - -Row 1: -────── -Id: 25372161 -Title: How to add exception handling to SqlDataSource.UpdateCommand -UpVotes: 13 -DownVotes: 13 -Controversial_ratio: 0 - -1 rows in set. Elapsed: 1.283 sec. Processed 418.44 million rows, 7.23 GB (326.07 million rows/s., 5.63 GB/s.) -Peak memory usage: 3.18 GiB. -``` - ->**`JOIN`の右側に小さいデータセットを使用する**:このクエリは、`PostId` に対するフィルタリングが外部および内部両方のクエリで行われているため、必要以上に冗長に見えるかもしれません。これは、クエリ応答時間を速くするためのパフォーマンス最適化です。最適なパフォーマンスのためには、常に `JOIN` の右側がより小さいセットであることを確認し、できるだけ小さくします。 `JOIN` のパフォーマンスを最適化し、利用可能なアルゴリズムを理解するためのヒントについては、[このブログ記事のシリーズ](https://clickhouse.com/blog/clickhouse-fully-supports-joins-part1) をお勧めします。 - -このクエリは速いですが、良好なパフォーマンスを達成するために `JOIN` を慎重に書く必要があります。理想的には、`UpVote` と `DownVote` のカウントを確認する前に、"SQL" を含む投稿にフィルターをかけたいところです。 - -#### 辞書の適用 {#applying-a-dictionary} - -これらの概念を示すために、私たちは投票データのために辞書を使用します。辞書は通常、メモリ内に保持されます([ssd_cache](/sql-reference/dictionaries#ssd_cache) は例外です)、ユーザーはデータのサイズに注意する必要があります。`votes` テーブルのサイズを確認します: - -```sql -SELECT table, - formatReadableSize(sum(data_compressed_bytes)) AS compressed_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, - round(sum(data_uncompressed_bytes) / sum(data_compressed_bytes), 2) AS ratio -FROM system.columns -WHERE table IN ('votes') -GROUP BY table - -┌─table───────────┬─compressed_size─┬─uncompressed_size─┬─ratio─┐ -│ votes │ 1.25 GiB │ 3.79 GiB │ 3.04 │ -└─────────────────┴─────────────────┴───────────────────┴───────┘ -``` - -データは辞書に未圧縮で保存されるため、すべてのカラムを辞書に保存する場合は、少なくとも4GBのメモリが必要です(実際には保存しません)。辞書はクラスタ全体にレプリケートされるため、このメモリ量は*ノードごと*に予約する必要があります。 - -> 下記の例では、私たちの辞書のデータはClickHouseテーブルに由来しています。これは辞書の最も一般的なソースですが、[ファイル](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse#choosing-a-layout)、http、および [Postgres](/sql-reference/dictionaries#postgresql) を含むデータベースを含む多くのソースがサポートされています。辞書は自動的に更新されることができ、頻繁に変更される小さなデータセットが直接結合に利用できるようにする理想的な方法です。 - -私たちの辞書は、ルックアップが行われる主キーを必要とします。これは、トランザクショナルデータベースの主キーと概念的に同じで、一意である必要があります。上記のクエリでは、結合キー `PostId` に対してルックアップが必要です。辞書は、その結果、`votes` テーブルからの `PostId` ごとのアップ票とダウン票の合計で埋め込まれるべきです。以下は、この辞書データを取得するためのクエリです: - -```sql -SELECT PostId, - countIf(VoteTypeId = 2) AS UpVotes, - countIf(VoteTypeId = 3) AS DownVotes -FROM votes -GROUP BY PostId -``` - -辞書を作成するには、次のDDLが必要です - 上述のクエリを使用していることに注意してください: - -```sql -CREATE DICTIONARY votes_dict -( - `PostId` UInt64, - `UpVotes` UInt32, - `DownVotes` UInt32 -) -PRIMARY KEY PostId -SOURCE(CLICKHOUSE(QUERY 'SELECT PostId, countIf(VoteTypeId = 2) AS UpVotes, countIf(VoteTypeId = 3) AS DownVotes FROM votes GROUP BY PostId')) -LIFETIME(MIN 600 MAX 900) -LAYOUT(HASHED()) - -0 rows in set. Elapsed: 36.063 sec. -``` - -> セルフマネージドOSSでは、上記のコマンドはすべてのノードで実行する必要があります。ClickHouse Cloudでは、辞書は自動的にすべてのノードにレプリケートされます。上記は64GBのRAMを持つClickHouse Cloudノードで実行され、読み込みに36秒かかりました。 - -辞書によって消費されるメモリを確認するには: - -```sql -SELECT formatReadableSize(bytes_allocated) AS size -FROM system.dictionaries -WHERE name = 'votes_dict' - -┌─size─────┐ -│ 4.00 GiB │ -└──────────┘ -``` - -特定の `PostId` に対してアップ票とダウン票を取得するのは、単純な `dictGet` 関数を使用して実行できます。以下に、投稿 `11227902` の値を取得します: - -```sql -SELECT dictGet('votes_dict', ('UpVotes', 'DownVotes'), '11227902') AS votes - -┌─votes──────┐ -│ (34999,32) │ -└────────────┘ - -これを以前のクエリに利用することで、JOINを削除できます: - -WITH PostIds AS -( - SELECT Id - FROM posts - WHERE Title ILIKE '%SQL%' -) -SELECT Id, Title, - dictGet('votes_dict', 'UpVotes', Id) AS UpVotes, - dictGet('votes_dict', 'DownVotes', Id) AS DownVotes, - abs(UpVotes - DownVotes) AS Controversial_ratio -FROM posts -WHERE (Id IN (PostIds)) AND (UpVotes > 10) AND (DownVotes > 10) -ORDER BY Controversial_ratio ASC -LIMIT 3 - -3 rows in set. Elapsed: 0.551 sec. Processed 119.64 million rows, 3.29 GB (216.96 million rows/s., 5.97 GB/s.) -Peak memory usage: 552.26 MiB. -``` - -このクエリははるかにシンプルで、速度も2倍以上向上しています!これはさらに最適化でき、10以上のアップ票とダウン票を持つ投稿のみを辞書に読み込むこと及び事前計算された物議の値を保存することも可能です。 - -## クエリ時の補強 {#query-time-enrichment} - -辞書はクエリ時に値をルックアップするためにも使用できます。これらの値は結果に返されるか、集計に使用されます。ユーザーIDを場所にマッピングする辞書を作成しましょう: - -```sql -CREATE DICTIONARY users_dict -( - `Id` Int32, - `Location` String -) -PRIMARY KEY Id -SOURCE(CLICKHOUSE(QUERY 'SELECT Id, Location FROM stackoverflow.users')) -LIFETIME(MIN 600 MAX 900) -LAYOUT(HASHED()) -``` - -この辞書を使用して投稿結果を補強できます: - -```sql -SELECT - Id, - Title, - dictGet('users_dict', 'Location', CAST(OwnerUserId, 'UInt64')) AS location -FROM posts -WHERE Title ILIKE '%clickhouse%' -LIMIT 5 -FORMAT PrettyCompactMonoBlock - -┌───────Id─┬─Title─────────────────────────────────────────────────────────┬─Location──────────────┐ -│ 52296928 │ Comparision between two Strings in ClickHouse │ Spain │ -│ 52345137 │ How to use a file to migrate data from mysql to a clickhouse? │ 中国江苏省Nanjing Shi │ -│ 61452077 │ How to change PARTITION in clickhouse │ Guangzhou, 广东省中国 │ -│ 55608325 │ Clickhouse select last record without max() on all table │ Moscow, Russia │ -│ 55758594 │ ClickHouse create temporary table │ Perm', Russia │ -└──────────┴───────────────────────────────────────────────────────────────┴───────────────────────┘ - -5 rows in set. Elapsed: 0.033 sec. Processed 4.25 million rows, 82.84 MB (130.62 million rows/s., 2.55 GB/s.) -Peak memory usage: 249.32 MiB. -``` - -上記のJOINの例と同様に、この辞書を使って、最も多くの投稿がどこから来ているかを効率的に特定することもできます: - -```sql -SELECT - dictGet('users_dict', 'Location', CAST(OwnerUserId, 'UInt64')) AS location, - count() AS c -FROM posts -WHERE location != '' -GROUP BY location -ORDER BY c DESC -LIMIT 5 - -┌─location───────────────┬──────c─┐ -│ India │ 787814 │ -│ Germany │ 685347 │ -│ United States │ 595818 │ -│ London, United Kingdom │ 538738 │ -│ United Kingdom │ 537699 │ -└────────────────────────┴────────┘ - -5 rows in set. Elapsed: 0.763 sec. Processed 59.82 million rows, 239.28 MB (78.40 million rows/s., 313.60 MB/s.) -Peak memory usage: 248.84 MiB. -``` - -## インデックス時の補強 {#index-time-enrichment} - -上記の例では、JOINを削除するためにクエリ時に辞書を使用しました。辞書は挿入時に行を補強するためにも使用できます。これは、補強値が変更されず、辞書を埋め込むために使用できる外部ソースに存在する場合に一般的に適切です。この場合、挿入時に行を補強することで、辞書へのクエリ時のルックアップを回避できます。 - -もしStack Overflowのユーザーの `Location` が決して変更されないと仮定しましょう(実際には変更されますが) - 明確には `users` テーブルの `Location` 列です。ポストテーブルに対してロケーション別の分析クエリを行いたいとします。ここには `UserId` が含まれています。 - -辞書はユーザーIDからロケーションへのマッピングを提供し、`users` テーブルでバックアップされます: - -```sql -CREATE DICTIONARY users_dict -( - `Id` UInt64, - `Location` String -) -PRIMARY KEY Id -SOURCE(CLICKHOUSE(QUERY 'SELECT Id, Location FROM users WHERE Id >= 0')) -LIFETIME(MIN 600 MAX 900) -LAYOUT(HASHED()) -``` - -> `Id < 0` のユーザーを省略し、`Hashed` 辞書タイプを使用できるようにします。 `Id < 0` のユーザーはシステムユーザーです。 - -この辞書を投稿テーブルの挿入時に利用するには、スキーマを変更する必要があります: - -```sql -CREATE TABLE posts_with_location -( - `Id` UInt32, - `PostTypeId` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - ... - `Location` MATERIALIZED dictGet(users_dict, 'Location', OwnerUserId::'UInt64') -) -ENGINE = MergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CommentCount) -``` - -上記の例では、`Location` が `MATERIALIZED` カラムとして宣言されています。これは、値を `INSERT` クエリの一部として提供でき、常に計算されることを意味します。 - -> ClickHouseは [`DEFAULT` カラム](/sql-reference/statements/create/table#default_values) もサポートしています(値は提供されない場合に挿入または計算できます)。 - -テーブルを埋めるために、通常の `INSERT INTO SELECT` をS3から使用できます: - -```sql -INSERT INTO posts_with_location SELECT Id, PostTypeId::UInt8, AcceptedAnswerId, CreationDate, Score, ViewCount, Body, OwnerUserId, OwnerDisplayName, LastEditorUserId, LastEditorDisplayName, LastEditDate, LastActivityDate, Title, Tags, AnswerCount, CommentCount, FavoriteCount, ContentLicense, ParentId, CommunityOwnedDate, ClosedDate FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') - -0 rows in set. Elapsed: 36.830 sec. Processed 238.98 million rows, 2.64 GB (6.49 million rows/s., 71.79 MB/s.) -``` - -今や、最も多くの投稿がどこから来ているのかを得ることができます: - -```sql -SELECT Location, count() AS c -FROM posts_with_location -WHERE Location != '' -GROUP BY Location -ORDER BY c DESC -LIMIT 4 - -┌─Location───────────────┬──────c─┐ -│ India │ 787814 │ -│ Germany │ 685347 │ -│ United States │ 595818 │ -│ London, United Kingdom │ 538738 │ -└────────────────────────┴────────┘ - -4 rows in set. Elapsed: 0.142 sec. Processed 59.82 million rows, 1.08 GB (420.73 million rows/s., 7.60 GB/s.) -Peak memory usage: 666.82 MiB. -``` - -## 高度な辞書トピック {#advanced-dictionary-topics} - -### 辞書の `LAYOUT` の選択 {#choosing-the-dictionary-layout} - -`LAYOUT` 句は、辞書の内部データ構造を制御します。いくつかのオプションが存在し、[こちらで文書化されています](/sql-reference/dictionaries#ways-to-store-dictionaries-in-memory)。正しいレイアウトを選択するためのヒントは[こちら](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse#choosing-a-layout)にあります。 - -### 辞書の更新 {#refreshing-dictionaries} - -辞書に対して `LIFETIME` を `MIN 600 MAX 900` と指定しました。LIFETIMEは、辞書の更新間隔で、ここでの値は600秒から900秒の間のランダムな間隔での定期的な再読み込みを引き起こします。このランダムな間隔は、大規模なサーバーで更新する際に辞書ソースへの負荷を分散するために必要です。更新中は、古いバージョンの辞書もクエリ可能で、初期読み込みのみがクエリをブロックします。`(LIFETIME(0))`を設定すると、辞書の更新が防止されます。 -ClickHouseやPostgresなどのデータベースソースでは、クエリの応答が実際に変わった場合にのみ辞書を更新するクエリを設定できます(定期的な間隔ではなく)。詳細は[こちら](https://sql-reference/dictionaries#refreshing-dictionary-data-using-lifetime)で確認できます。 - -### その他の辞書タイプ {#other-dictionary-types} - -ClickHouseは、[階層的](/sql-reference/dictionaries#hierarchical-dictionaries)、[多角形](/sql-reference/dictionaries#polygon-dictionaries)、および [正規表現](/sql-reference/dictionaries#regexp-tree-dictionary) 辞書もサポートしています。 - -### さらに読む {#more-reading} - -- [辞書を使用してクエリを加速する](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) -- [辞書の高度な構成](/sql-reference/dictionaries) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md.hash deleted file mode 100644 index a5676f6741b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/dictionary/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -d09e32ee182ee986 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/engines/_category_.yml deleted file mode 100644 index 7675c2d5107..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 30 -label: 'Database & Table Engines' -collapsible: true -collapsed: true -link: - type: generated-index - slug: /engines diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md deleted file mode 100644 index 0487d878702..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: 'The `Atomic` engine supports non-blocking `DROP TABLE` and `RENAME - TABLE` queries, and atomic `EXCHANGE TABLES`queries. The `Atomic` database engine - is used by default.' -sidebar_label: 'Atomic' -sidebar_position: 10 -slug: '/engines/database-engines/atomic' -title: 'Atomic' ---- - - - - -# Atomic - -`Atomic` エンジンは、非ブロッキングの [`DROP TABLE`](#drop-detach-table) および [`RENAME TABLE`](#rename-table) クエリ、及び原子性のある [`EXCHANGE TABLES`](#exchange-tables) クエリをサポートしています。 `Atomic` データベースエンジンはデフォルトで使用されます。 - -:::note -ClickHouse Cloud では、デフォルトで `Replicated` データベースエンジンが使用されます。 -::: - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE test [ENGINE = Atomic]; -``` - -## 特徴と推奨事項 {#specifics-and-recommendations} - -### テーブルUUID {#table-uuid} - -`Atomic` データベース内の各テーブルは、永続的な [UUID](../../sql-reference/data-types/uuid.md) を持ち、以下のディレクトリにデータを保存します: - -```text -/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/ -``` - -ここで、`xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` はテーブルのUUIDです。 - -デフォルトでは、UUIDは自動的に生成されます。ただし、ユーザーはテーブルを作成する際にUUIDを明示的に指定することもできますが、これは推奨されません。 - -例えば: - -```sql -CREATE TABLE name UUID '28f1c61c-2970-457a-bffe-454156ddcfef' (n UInt64) ENGINE = ...; -``` - -:::note -[show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil) 設定を使用すると、`SHOW CREATE` クエリでUUIDを表示できます。 -::: - -### RENAME TABLE {#rename-table} - -[`RENAME`](../../sql-reference/statements/rename.md) クエリは、UUIDを変更したりテーブルデータを移動したりしません。これらのクエリは即座に実行され、テーブルを使用している他のクエリが完了するのを待ちません。 - -### DROP/DETACH TABLE {#drop-detach-table} - -`DROP TABLE` を使用する際に、データは削除されません。 `Atomic` エンジンは、メタデータを `/clickhouse_path/metadata_dropped/` に移動させることでテーブルを削除済みとしてマークし、バックグラウンドスレッドに通知します。最終的なテーブルデータの削除までの遅延は、[`database_atomic_delay_before_drop_table_sec`](../../operations/server-configuration-parameters/settings.md#database_atomic_delay_before_drop_table_sec) 設定で指定されます。 `SYNC` 修飾子を使用して同期モードを指定することができます。これを行うためには、[`database_atomic_wait_for_drop_and_detach_synchronously`](../../operations/settings/settings.md#database_atomic_wait_for_drop_and_detach_synchronously) 設定を使用してください。この場合、`DROP` はテーブルを使用している実行中の `SELECT`、`INSERT`、その他のクエリが完了するのを待ちます。テーブルは使用中でない時に削除されます。 - -### EXCHANGE TABLES/DICTIONARIES {#exchange-tables} - -[`EXCHANGE`](../../sql-reference/statements/exchange.md) クエリは、テーブルまたはディクショナリを原子に入れ替えます。例えば、次の非原子的な操作の代わりに: - -```sql title="Non-atomic" -RENAME TABLE new_table TO tmp, old_table TO new_table, tmp TO old_table; -``` -原子性のあるものを使用できます: - -```sql title="Atomic" -EXCHANGE TABLES new_table AND old_table; -``` - -### Atomic データベースにおける ReplicatedMergeTree {#replicatedmergetree-in-atomic-database} - -[`ReplicatedMergeTree`](/engines/table-engines/mergetree-family/replication) テーブルの場合、ZooKeeper内のパスとレプリカ名のためのエンジンパラメータを指定しないことが推奨されます。この場合、設定パラメータ [`default_replica_path`](../../operations/server-configuration-parameters/settings.md#default_replica_path) および [`default_replica_name`](../../operations/server-configuration-parameters/settings.md#default_replica_name) が使用されます。エンジンパラメータを明示的に指定したい場合は、`{uuid}` マクロを使用することが推奨されます。これにより、ZooKeeper内の各テーブルに対してユニークなパスが自動的に生成されます。 - -## 参照 {#see-also} - -- [system.databases](../../operations/system-tables/databases.md) システムテーブル diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md.hash deleted file mode 100644 index 84eda628352..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/atomic.md.hash +++ /dev/null @@ -1 +0,0 @@ -62bf021f2ce93332 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md deleted file mode 100644 index f2cbb08f69b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: 'Allows to instantly attach table/database from backups in read-only - mode.' -sidebar_label: 'バックアップ' -sidebar_position: 60 -slug: '/engines/database-engines/backup' -title: 'Backup' ---- - - - - -# バックアップ - -データベースバックアップでは、[バックアップ](../../operations/backup)からテーブル/データベースを読み取り専用モードで瞬時にアタッチできます。 - -データベースバックアップは、増分バックアップと非増分バックアップの両方で機能します。 - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE backup_database -ENGINE = Backup('database_name_inside_backup', 'backup_destination') -``` - -バックアップ先は、`Disk`、`S3`、`File`など、すべての有効なバックアップ[宛先](../../operations/backup#configure-a-backup-destination)にすることができます。 - -`Disk`バックアップ先を使用した場合、バックアップからデータベースを作成するクエリは次のようになります: - -```sql -CREATE DATABASE backup_database -ENGINE = Backup('database_name_inside_backup', Disk('disk_name', 'backup_name')) -``` - -**エンジンパラメータ** - -- `database_name_inside_backup` — バックアップ内のデータベース名。 -- `backup_destination` — バックアップ先。 - -## 使用例 {#usage-example} - -`Disk`バックアップ先を使用した例を見てみましょう。まず、`storage.xml`でバックアップディスクを設定しましょう: - -```xml - - - - local - /home/ubuntu/ClickHouseWorkDir/backups/ - - - - - backups - /home/ubuntu/ClickHouseWorkDir/backups/ - -``` - -使用の例です。テストデータベースを作成し、テーブルを作成し、いくつかのデータを挿入し、最後にバックアップを作成しましょう: - -```sql -CREATE DATABASE test_database; - -CREATE TABLE test_database.test_table_1 (id UInt64, value String) ENGINE=MergeTree ORDER BY id; -INSERT INTO test_database.test_table_1 VALUES (0, 'test_database.test_table_1'); - -CREATE TABLE test_database.test_table_2 (id UInt64, value String) ENGINE=MergeTree ORDER BY id; -INSERT INTO test_database.test_table_2 VALUES (0, 'test_database.test_table_2'); - -CREATE TABLE test_database.test_table_3 (id UInt64, value String) ENGINE=MergeTree ORDER BY id; -INSERT INTO test_database.test_table_3 VALUES (0, 'test_database.test_table_3'); - -BACKUP DATABASE test_database TO Disk('backups', 'test_database_backup'); -``` - -これで`test_database_backup`バックアップができました。次に、バックアップを使用してデータベースを作成しましょう: - -```sql -CREATE DATABASE test_database_backup ENGINE = Backup('test_database', Disk('backups', 'test_database_backup')); -``` - -これで、データベースから任意のテーブルをクエリすることができます: - -```sql -SELECT id, value FROM test_database_backup.test_table_1; - -┌─id─┬─value──────────────────────┐ -│ 0 │ test_database.test_table_1 │ -└────┴────────────────────────────┘ - -SELECT id, value FROM test_database_backup.test_table_2; - -┌─id─┬─value──────────────────────┐ -│ 0 │ test_database.test_table_2 │ -└────┴────────────────────────────┘ - -SELECT id, value FROM test_database_backup.test_table_3; - -┌─id─┬─value──────────────────────┐ -│ 0 │ test_database.test_table_3 │ -└────┴────────────────────────────┘ -``` - -このバックアップデータベースを普通のデータベースと同様に操作することも可能です。例えば、テーブルをクエリすることもできます: - -```sql -SELECT database, name FROM system.tables WHERE database = 'test_database_backup'; - -┌─database─────────────┬─name─────────┐ -│ test_database_backup │ test_table_1 │ -│ test_database_backup │ test_table_2 │ -│ test_database_backup │ test_table_3 │ -└──────────────────────┴──────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md.hash deleted file mode 100644 index d1a0b48ba10..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/backup.md.hash +++ /dev/null @@ -1 +0,0 @@ -e12e92b1b21d23aa diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md deleted file mode 100644 index 2c2d665e264..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: 'Documentation for Database Engines' -slug: '/engines/database-engines/' -toc_folder_title: 'Database Engines' -toc_priority: 27 -toc_title: 'Introduction' -title: 'Database Engines' ---- - - - - -# データベースエンジン - -データベースエンジンは、テーブルを操作するためのものです。デフォルトでは、ClickHouseは[Atomic](../../engines/database-engines/atomic.md)データベースエンジンを使用しており、構成可能な[テーブルエンジン](../../engines/table-engines/index.md)と[SQLダイアレクト](../../sql-reference/syntax.md)を提供します。 - -以下は、利用可能なデータベースエンジンの完全なリストです。詳細についてはリンクを参照してください: - - | ページ | 説明 | -|-----|-----| -| [Replicated](/engines/database-engines/replicated) | このエンジンはAtomicエンジンに基づいています。DDLログをZooKeeperに書き込み、指定されたデータベースのすべてのレプリカで実行されることによってメタデータのレプリケーションをサポートします。 | -| [MySQL](/engines/database-engines/mysql) | リモートMySQLサーバー上のデータベースに接続し、ClickHouseとMySQL間でデータを交換するために`INSERT`および`SELECT`クエリを実行することを可能にします。 | -| [MaterializedPostgreSQL](/engines/database-engines/materialized-postgresql) | PostgreSQLデータベースからテーブルを持つClickHouseデータベースを作成します。 | -| [Atomic](/engines/database-engines/atomic) | `Atomic`エンジンは、ノンブロッキングの`DROP TABLE`および`RENAME TABLE`クエリ、並びにアトミックな`EXCHANGE TABLES`クエリをサポートします。デフォルトでは`Atomic`データベースエンジンが使用されます。 | -| [Lazy](/engines/database-engines/lazy) | 最後のアクセスから`expiration_time_in_seconds`秒間のみRAMにテーブルを保持します。Logタイプのテーブルでのみ使用可能です。 | -| [PostgreSQL](/engines/database-engines/postgresql) | リモートPostgreSQLサーバー上のデータベースに接続することを可能にします。 | -| [Backup](/engines/database-engines/backup) | 読み取り専用モードでバックアップからテーブル/データベースを即座にアタッチすることを可能にします。 | -| [SQLite](/engines/database-engines/sqlite) | SQLiteデータベースに接続し、ClickHouseとSQLite間でデータを交換するために`INSERT`および`SELECT`クエリを実行することを可能にします。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md.hash deleted file mode 100644 index 3e2badefaf0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -938d4da8c802ec8f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md deleted file mode 100644 index af2db712d6a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: 'Keeps tables in RAM only `expiration_time_in_seconds` seconds after - last access. Can be used only with Log type tables.' -sidebar_label: 'Lazy' -sidebar_position: 20 -slug: '/engines/database-engines/lazy' -title: 'Lazy' ---- - - - - -# Lazy - -テーブルは最終アクセス後 `expiration_time_in_seconds` 秒間のみ RAM に保持されます。これは \*Log テーブルでのみ使用できます。 - -多くの小さな \*Log テーブルを保存するために最適化されており、アクセス間の時間間隔が長いです。 - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE testlazy -ENGINE = Lazy(expiration_time_in_seconds); -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md.hash deleted file mode 100644 index bf76f33ce05..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/lazy.md.hash +++ /dev/null @@ -1 +0,0 @@ -6c45af00f6fe753f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md deleted file mode 100644 index 32e96c82097..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md +++ /dev/null @@ -1,297 +0,0 @@ ---- -description: 'Creates a ClickHouse database with tables from PostgreSQL database.' -sidebar_label: 'MaterializedPostgreSQL' -sidebar_position: 60 -slug: '/engines/database-engines/materialized-postgresql' -title: 'MaterializedPostgreSQL' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# MaterializedPostgreSQL - - - - -:::note -ClickHouse Cloud ユーザーは、PostgreSQL から ClickHouse へのレプリケーションに [ClickPipes](/integrations/clickpipes) を使用することを推奨されます。これにより、PostgreSQL 用の高性能な変更データキャプチャ (CDC) がネイティブにサポートされます。 -::: - -PostgreSQL データベースからテーブルを持つ ClickHouse データベースを作成します。まず、エンジン `MaterializedPostgreSQL` を使用してデータベースが PostgreSQL データベースのスナップショットを作成し、必要なテーブルをロードします。必要なテーブルには、指定されたデータベースの任意のスキーマからの任意のテーブルのサブセットを含めることができます。スナップショットとともに、データベースエンジンは LSN を取得し、テーブルの初期ダンプが実行されると、WAL からの更新をプルし始めます。データベースが作成された後、PostgreSQL データベースに新しく追加されたテーブルは、自動的にレプリケーションに追加されません。これらは `ATTACH TABLE db.table` クエリを使用して手動で追加する必要があります。 - -レプリケーションは PostgreSQL 論理レプリケーションプロトコルで実装されており、DDL をレプリケートすることはできませんが、レプリケーションの破壊的変更が発生したかどうかを知ることができます(カラムの型変更、カラムの追加/削除)。そのような変更が検出されると、該当するテーブルは更新を受信しなくなります。この場合、テーブルを完全に再ロードするために `ATTACH` / `DETACH PERMANENTLY` クエリを使用する必要があります。DDL がレプリケーションを破損しない場合(例えば、カラムの名前を変更する場合)テーブルは引き続き更新を受け取ります(挿入は位置によって行われます)。 - -:::note -このデータベースエンジンは実験的です。使用するには、設定ファイルで `allow_experimental_database_materialized_postgresql` を 1 に設定するか、`SET` コマンドを使用します: -```sql -SET allow_experimental_database_materialized_postgresql=1 -``` -::: - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') [SETTINGS ...] -``` - -**エンジンパラメータ** - -- `host:port` — PostgreSQL サーバーエンドポイント。 -- `database` — PostgreSQL データベース名。 -- `user` — PostgreSQL ユーザー。 -- `password` — ユーザーパスワード。 - -## 使用例 {#example-of-use} - -```sql -CREATE DATABASE postgres_db -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'); - -SHOW TABLES FROM postgres_db; - -┌─name───┐ -│ table1 │ -└────────┘ - -SELECT * FROM postgresql_db.postgres_table; -``` - -## レプリケーションに新しいテーブルを動的に追加 {#dynamically-adding-table-to-replication} - -`MaterializedPostgreSQL` データベースが作成された後、自動的に対応する PostgreSQL データベース内の新しいテーブルを検出することはありません。このようなテーブルは手動で追加できます: - -```sql -ATTACH TABLE postgres_database.new_table; -``` - -:::warning -バージョン 22.1 より前では、テーブルをレプリケーションに追加すると、一時的なレプリケーションスロット(`{db_name}_ch_replication_slot_tmp`という名前)が削除されませんでした。ClickHouse バージョン 22.1 より前でテーブルをアタッチする場合は、手動で削除する必要があります(`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`)。さもなければディスク使用量が増加します。この問題は 22.1 で修正されています。 -::: - -## レプリケーションからテーブルを動的に削除 {#dynamically-removing-table-from-replication} - -特定のテーブルをレプリケーションから削除することが可能です: - -```sql -DETACH TABLE postgres_database.table_to_remove PERMANENTLY; -``` - -## PostgreSQL スキーマ {#schema} - -PostgreSQL [スキーマ](https://www.postgresql.org/docs/9.1/ddl-schemas.html) は、(バージョン 21.12 以降)3 つの方法で構成できます。 - -1. 1 つの `MaterializedPostgreSQL` データベースエンジン用の 1 つのスキーマ。設定 `materialized_postgresql_schema` を使用する必要があります。 -テーブルはテーブル名のみでアクセスされます: - -```sql -CREATE DATABASE postgres_database -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') -SETTINGS materialized_postgresql_schema = 'postgres_schema'; - -SELECT * FROM postgres_database.table1; -``` - -2. 1 つの `MaterializedPostgreSQL` データベースエンジン用に指定されたテーブルセットを持つ任意の数のスキーマ。設定 `materialized_postgresql_tables_list` を使用する必要があります。各テーブルは、そのスキーマとともに記述されます。 -テーブルはスキーマ名とテーブル名の両方でアクセスされます: - -```sql -CREATE DATABASE database1 -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') -SETTINGS materialized_postgresql_tables_list = 'schema1.table1,schema2.table2,schema1.table3', - materialized_postgresql_tables_list_with_schema = 1; - -SELECT * FROM database1.`schema1.table1`; -SELECT * FROM database1.`schema2.table2`; -``` - -この場合、`materialized_postgresql_tables_list` のすべてのテーブルは、スキーマ名とともに記述する必要があります。 -`materialized_postgresql_tables_list_with_schema = 1` が必要です。 - -警告:この場合、テーブル名にドットは許可されません。 - -3. 1 つの `MaterializedPostgreSQL` データベースエンジン用にフルのテーブルセットを持つ任意の数のスキーマ。設定 `materialized_postgresql_schema_list` を使用する必要があります。 - -```sql -CREATE DATABASE database1 -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') -SETTINGS materialized_postgresql_schema_list = 'schema1,schema2,schema3'; - -SELECT * FROM database1.`schema1.table1`; -SELECT * FROM database1.`schema1.table2`; -SELECT * FROM database1.`schema2.table2`; -``` - -警告:この場合、テーブル名にドットは許可されません。 - -## 要件 {#requirements} - -1. PostgreSQL 設定ファイルの [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) 設定は `logical` の値を持ち、`max_replication_slots` パラメータは少なくとも `2` の値を持つ必要があります。 - -2. 各レプリケートされたテーブルは、以下のいずれかの [レプリカアイデンティティ](https://www.postgresql.org/docs/10/sql-altertable.html#SQL-CREATETABLE-REPLICA-IDENTITY) を持っている必要があります: - -- 主キー(デフォルト) - -- インデックス - -```bash -postgres# CREATE TABLE postgres_table (a Integer NOT NULL, b Integer, c Integer NOT NULL, d Integer, e Integer NOT NULL); -postgres# CREATE unique INDEX postgres_table_index on postgres_table(a, c, e); -postgres# ALTER TABLE postgres_table REPLICA IDENTITY USING INDEX postgres_table_index; -``` - -主キーが常に最初にチェックされます。主キーが存在しない場合、レプリカアイデンティティインデックスとして定義されたインデックスがチェックされます。 -インデックスがレプリカアイデンティティとして使用される場合、そのテーブルにはそのインデックスが 1 つだけ存在しなければなりません。 -特定のテーブルで使用されているタイプを確認するには、以下のコマンドを使用します: - -```bash -postgres# SELECT CASE relreplident - WHEN 'd' THEN 'default' - WHEN 'n' THEN 'nothing' - WHEN 'f' THEN 'full' - WHEN 'i' THEN 'index' - END AS replica_identity -FROM pg_class -WHERE oid = 'postgres_table'::regclass; -``` - -:::note -[**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) 値のレプリケーションはサポートされていません。データ型のデフォルト値が使用されます。 -::: - -## 設定 {#settings} - -### `materialized_postgresql_tables_list` {#materialized-postgresql-tables-list} - - PostgreSQL データベーステーブルのカンマ区切りリストを設定します。これらは [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) データベースエンジンを介してレプリケートされます。 - - 各テーブルは、カッコ内にレプリケートされるカラムのサブセットを持つことができます。カラムのサブセットが省略された場合、テーブルのすべてのカラムがレプリケートされます。 - - ```sql - materialized_postgresql_tables_list = 'table1(co1, col2),table2,table3(co3, col5, col7) - ``` - - デフォルト値:空のリスト — つまり、すべての PostgreSQL データベースがレプリケートされることを意味します。 - -### `materialized_postgresql_schema` {#materialized-postgresql-schema} - - デフォルト値:空の文字列。(デフォルトスキーマが使用されます) - -### `materialized_postgresql_schema_list` {#materialized-postgresql-schema-list} - - デフォルト値:空のリスト。(デフォルトスキーマが使用されます) - -### `materialized_postgresql_max_block_size` {#materialized-postgresql-max-block-size} - - PostgreSQL データベーステーブルにデータをフラッシュする前にメモリに収集される行の数を設定します。 - - 許可される値: - - - 正の整数。 - - デフォルト値: `65536`。 - -### `materialized_postgresql_replication_slot` {#materialized-postgresql-replication-slot} - - ユーザーが作成したレプリケーションスロット。`materialized_postgresql_snapshot` と一緒に使用する必要があります。 - -### `materialized_postgresql_snapshot` {#materialized-postgresql-snapshot} - - [PostgreSQL テーブルの初期ダンプ](../../engines/database-engines/materialized-postgresql.md) が実行されるスナップショットを識別するテキスト文字列。`materialized_postgresql_replication_slot` と一緒に使用する必要があります。 - - ```sql - CREATE DATABASE database1 - ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') - SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3'; - - SELECT * FROM database1.table1; - ``` - - 必要に応じて DDL クエリを使用して設定を変更できます。ただし、`materialized_postgresql_tables_list` 設定を変更することはできません。この設定のテーブルリストを更新するには、`ATTACH TABLE` クエリを使用してください。 - - ```sql - ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = ; - ``` - -### `materialized_postgresql_use_unique_replication_consumer_identifier` {#materialized_postgresql_use_unique_replication_consumer_identifier} - -レプリケーションのために一意のレプリケーションコンシューマ識別子を使用します。デフォルト:`0`。 -`1` に設定すると、同じ `PostgreSQL` テーブルを指す複数の `MaterializedPostgreSQL` テーブルをセットアップすることができます。 - -## 注意事項 {#notes} - -### 論理レプリケーションスロットのフェイルオーバー {#logical-replication-slot-failover} - -プライマリに存在する論理レプリケーションスロットは、スタンバイレプリカでは利用できません。 -したがって、フェイルオーバーが発生した場合、新しいプライマリ(古い物理スタンバイ)は、古いプライマリで存在していたスロットについて知ることができません。これにより、PostgreSQL からのレプリケーションが壊れます。 -これに対処するためには、レプリケーションスロットを自分で管理し、永続的なレプリケーションスロットを定義する必要があります(詳細情報は [こちら](https://patroni.readthedocs.io/en/latest/SETTINGS.html)にあります)。スロット名を `materialized_postgresql_replication_slot` 設定を介して渡す必要があり、`EXPORT SNAPSHOT` オプションでエクスポートされている必要があります。スナップショット識別子は `materialized_postgresql_snapshot` 設定を介して渡す必要があります。 - -これは必要な場合のみ使用することに注意してください。実際に必要ない場合や、その理由を完全に理解していない場合、テーブルエンジンが自分でスロットを作成および管理できるようにする方が良いです。 - -**例([@bchrobot](https://github.com/bchrobot) から)** - -1. PostgreSQL にレプリケーションスロットを設定します。 - - ```yaml - apiVersion: "acid.zalan.do/v1" - kind: postgresql - metadata: - name: acid-demo-cluster - spec: - numberOfInstances: 2 - postgresql: - parameters: - wal_level: logical - patroni: - slots: - clickhouse_sync: - type: logical - database: demodb - plugin: pgoutput - ``` - -2. レプリケーションスロットが準備できるのを待ち、その後トランザクションを開始してトランザクションスナップショット識別子をエクスポートします: - - ```sql - BEGIN; - SELECT pg_export_snapshot(); - ``` - -3. ClickHouse にデータベースを作成します: - - ```sql - CREATE DATABASE demodb - ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') - SETTINGS - materialized_postgresql_replication_slot = 'clickhouse_sync', - materialized_postgresql_snapshot = '0000000A-0000023F-3', - materialized_postgresql_tables_list = 'table1,table2,table3'; - ``` - -4. ClickHouse DB へのレプリケーションが確認できたら、PostgreSQL トランザクションを終了します。フェイルオーバー後もレプリケーションが続くことを確認します: - - ```bash - kubectl exec acid-demo-cluster-0 -c postgres -- su postgres -c 'patronictl failover --candidate acid-demo-cluster-1 --force' - ``` - -### 必要な権限 {#required-permissions} - -1. [CREATE PUBLICATION](https://postgrespro.ru/docs/postgresql/14/sql-createpublication) — 作成クエリの特権。 - -2. [CREATE_REPLICATION_SLOT](https://postgrespro.ru/docs/postgrespro/10/protocol-replication#PROTOCOL-REPLICATION-CREATE-SLOT) — レプリケーションの特権。 - -3. [pg_drop_replication_slot](https://postgrespro.ru/docs/postgrespro/9.5/functions-admin#functions-replication) — レプリケーションの特権またはスーパーユーザー。 - -4. [DROP PUBLICATION](https://postgrespro.ru/docs/postgresql/10/sql-droppublication) — 出版物の所有者(MaterializedPostgreSQL エンジン内の `username`)。 - -`2` および `3` コマンドを実行し、その権限を持たないようにすることは可能です。設定 `materialized_postgresql_replication_slot` と `materialized_postgresql_snapshot` を使用します。ただし、十分な注意が必要です。 - -テーブルへのアクセス: - -1. pg_publication - -2. pg_replication_slots - -3. pg_publication_tables diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md.hash deleted file mode 100644 index 28ae14024ab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/materialized-postgresql.md.hash +++ /dev/null @@ -1 +0,0 @@ -245fae8b7eb0a841 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md deleted file mode 100644 index aabdad40ff8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: 'Allows connecting to databases on a remote MySQL server and perform - `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.' -sidebar_label: 'MySQL' -sidebar_position: 50 -slug: '/engines/database-engines/mysql' -title: 'MySQL' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# MySQL データベースエンジン - - - -リモート MySQL サーバー上のデータベースに接続し、ClickHouse と MySQL の間でデータを交換するために `INSERT` および `SELECT` クエリを実行することができます。 - -`MySQL` データベースエンジンはクエリを MySQL サーバーに変換するため、`SHOW TABLES` や `SHOW CREATE TABLE` などの操作を実行できます。 - -以下のクエリは実行できません: - -- `RENAME` -- `CREATE TABLE` -- `ALTER` - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') -``` - -**エンジンパラメータ** - -- `host:port` — MySQL サーバーのアドレス。 -- `database` — リモートデータベース名。 -- `user` — MySQL ユーザー。 -- `password` — ユーザーのパスワード。 - -## データ型サポート {#data_types-support} - -| MySQL | ClickHouse | -|----------------------------------|--------------------------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../../sql-reference/data-types/int-uint.md) | -| TINYINT | [Int8](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED SMALLINT | [UInt16](../../sql-reference/data-types/int-uint.md) | -| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql-reference/data-types/int-uint.md) | -| INT, MEDIUMINT | [Int32](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED BIGINT | [UInt64](../../sql-reference/data-types/int-uint.md) | -| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) | -| FLOAT | [Float32](../../sql-reference/data-types/float.md) | -| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | -| DATE | [Date](../../sql-reference/data-types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | -| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) | - -その他の MySQL データ型はすべて [String](../../sql-reference/data-types/string.md) に変換されます。 - -[Nullable](../../sql-reference/data-types/nullable.md) がサポートされています。 - -## グローバル変数サポート {#global-variables-support} - -互換性向上のため、MySQL スタイルでグローバル変数に `@@identifier` としてアクセスできます。 - -サポートされる変数: -- `version` -- `max_allowed_packet` - -:::note -現在、これらの変数はスタブであり、何にも対応していません。 -::: - -例: - -```sql -SELECT @@version; -``` - -## 使用例 {#examples-of-use} - -MySQL でのテーブル: - -```text -mysql> USE test; -Database changed - -mysql> CREATE TABLE `mysql_table` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `float` FLOAT NOT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from mysql_table; -+------+-----+ -| int_id | value | -+------+-----+ -| 1 | 2 | -+------+-----+ -1 row in set (0,00 sec) -``` - -ClickHouse のデータベースで、MySQL サーバーとデータを交換: - -```sql -CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100; -``` - -```sql -SHOW DATABASES -``` - -```text -┌─name─────┐ -│ default │ -│ mysql_db │ -│ system │ -└──────────┘ -``` - -```sql -SHOW TABLES FROM mysql_db -``` - -```text -┌─name─────────┐ -│ mysql_table │ -└──────────────┘ -``` - -```sql -SELECT * FROM mysql_db.mysql_table -``` - -```text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -└────────┴───────┘ -``` - -```sql -INSERT INTO mysql_db.mysql_table VALUES (3,4) -``` - -```sql -SELECT * FROM mysql_db.mysql_table -``` - -```text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -│ 3 │ 4 │ -└────────┴───────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md.hash deleted file mode 100644 index a449cfa297e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/mysql.md.hash +++ /dev/null @@ -1 +0,0 @@ -a7a9d3e9fd442db3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md deleted file mode 100644 index 5e28e1579ce..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -description: 'Allows to connect to databases on a remote PostgreSQL server.' -sidebar_label: 'PostgreSQL' -sidebar_position: 40 -slug: '/engines/database-engines/postgresql' -title: 'PostgreSQL' ---- - - - - -# PostgreSQL - -リモートの [PostgreSQL](https://www.postgresql.org) サーバー上のデータベースに接続できます。ClickHouse と PostgreSQL の間でデータを交換するための読み取りおよび書き込み操作(`SELECT` および `INSERT` クエリ)をサポートします。 - -`SHOW TABLES` および `DESCRIBE TABLE` クエリを使用して、リモート PostgreSQL からテーブルリストおよびテーブル構造にリアルタイムアクセスを提供します。 - -テーブル構造の変更(`ALTER TABLE ... ADD|DROP COLUMN`)をサポートします。`use_table_cache` パラメータ(以下のエンジンパラメータを参照)が `1` に設定されている場合、テーブル構造はキャッシュされ、変更が確認されませんが、`DETACH` および `ATTACH` クエリで更新できます。 - -## データベースの作成 {#creating-a-database} - -```sql -CREATE DATABASE test_database -ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `schema`, `use_table_cache`]); -``` - -**エンジンパラメータ** - -- `host:port` — PostgreSQL サーバーアドレス。 -- `database` — リモートデータベース名。 -- `user` — PostgreSQL ユーザー。 -- `password` — ユーザーパスワード。 -- `schema` — PostgreSQL スキーマ。 -- `use_table_cache` — データベースのテーブル構造がキャッシュされるかどうかを定義します。オプション。デフォルト値: `0`。 - -## データ型のサポート {#data_types-support} - -| PostgreSQL | ClickHouse | -|------------------|--------------------------------------------------------------| -| DATE | [Date](../../sql-reference/data-types/date.md) | -| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | -| REAL | [Float32](../../sql-reference/data-types/float.md) | -| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | -| DECIMAL, NUMERIC | [Decimal](../../sql-reference/data-types/decimal.md) | -| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) | -| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) | -| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) | -| SERIAL | [UInt32](../../sql-reference/data-types/int-uint.md) | -| BIGSERIAL | [UInt64](../../sql-reference/data-types/int-uint.md) | -| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) | -| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))| -| ARRAY | [Array](../../sql-reference/data-types/array.md) | - - -## 使用例 {#examples-of-use} - -ClickHouse で PostgreSQL サーバーとデータを交換するデータベース: - -```sql -CREATE DATABASE test_database -ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'schema_name',1); -``` - -```sql -SHOW DATABASES; -``` - -```text -┌─name──────────┐ -│ default │ -│ test_database │ -│ system │ -└───────────────┘ -``` - -```sql -SHOW TABLES FROM test_database; -``` - -```text -┌─name───────┐ -│ test_table │ -└────────────┘ -``` - -PostgreSQL テーブルからデータを読み取る: - -```sql -SELECT * FROM test_database.test_table; -``` - -```text -┌─id─┬─value─┐ -│ 1 │ 2 │ -└────┴───────┘ -``` - -PostgreSQL テーブルにデータを書き込む: - -```sql -INSERT INTO test_database.test_table VALUES (3,4); -SELECT * FROM test_database.test_table; -``` - -```text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -│ 3 │ 4 │ -└────────┴───────┘ -``` - -PostgreSQL でテーブル構造が変更されたと仮定します: - -```sql -postgre> ALTER TABLE test_table ADD COLUMN data Text -``` - -データベースが作成されたときに `use_table_cache` パラメータが `1` に設定されていたため、ClickHouse のテーブル構造はキャッシュされており、したがって変更されていませんでした: - -```sql -DESCRIBE TABLE test_database.test_table; -``` -```text -┌─name───┬─type──────────────┐ -│ id │ Nullable(Integer) │ -│ value │ Nullable(Integer) │ -└────────┴───────────────────┘ -``` - -テーブルをデタッチし再度アタッチした後、構造が更新されました: - -```sql -DETACH TABLE test_database.test_table; -ATTACH TABLE test_database.test_table; -DESCRIBE TABLE test_database.test_table; -``` -```text -┌─name───┬─type──────────────┐ -│ id │ Nullable(Integer) │ -│ value │ Nullable(Integer) │ -│ data │ Nullable(String) │ -└────────┴───────────────────┘ -``` - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouse と PostgreSQL - データの天国でのマッチ - パート 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres) -- ブログ: [ClickHouse と PostgreSQL - データの天国でのマッチ - パート 2](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres-part-2) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md.hash deleted file mode 100644 index dacdf615b77..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/postgresql.md.hash +++ /dev/null @@ -1 +0,0 @@ -9d4bddf80bc66c1e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md deleted file mode 100644 index 3d4983baaa3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -description: 'エンジンはAtomicエンジンに基づいています。特定のデータベースのすべてのレプリカで書き込まれたDDLログをZooKeeperにレプリゼンテーションすることにより、メタデータのレプリケーションをサポートします。' -sidebar_label: 'レプリカ' -sidebar_position: 30 -slug: '/engines/database-engines/replicated' -title: 'レプリカ' ---- - - - - -# レプリケーション - -このエンジンは [Atomic](../../engines/database-engines/atomic.md) エンジンに基づいています。メタデータのレプリケーションをサポートしており、DDLログがZooKeeperに書き込まれ、特定のデータベースのすべてのレプリカで実行されます。 - -1つのClickHouseサーバーでは、複数のレプリケートされたデータベースを同時に実行および更新できます。ただし、同じレプリケートされたデータベースのレプリカを複数作成することはできません。 - -## データベースの作成 {#creating-a-database} -```sql -CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...] -``` - -**エンジンパラメーター** - -- `zoo_path` — ZooKeeperのパス。同じZooKeeperのパスは同じデータベースに対応します。 -- `shard_name` — シャード名。データベースのレプリカは `shard_name` によってシャードにグループ化されます。 -- `replica_name` — レプリカ名。レプリカ名は同じシャードのすべてのレプリカで異なる必要があります。 - -[ReplicatedMergeTree](/engines/table-engines/mergetree-family/replication) テーブルでは、引数が提供されていない場合、デフォルトの引数が使用されます:`/clickhouse/tables/{uuid}/{shard}` と `{replica}`。これらはサーバー設定の [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) および [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name) で変更できます。マクロ `{uuid}` はテーブルのuuidに展開され、`{shard}` と `{replica}` はデータベースエンジンの引数ではなくサーバーconfigからの値に展開されます。しかし、今後はReplicatedデータベースの `shard_name` および `replica_name` を使用できるようになる予定です。 - -## 特徴と推奨事項 {#specifics-and-recommendations} - -`Replicated` データベースを用いたDDLクエリは [ON CLUSTER](../../sql-reference/distributed-ddl.md) クエリと似たように機能しますが、いくつかの違いがあります。 - -まず、DDLリクエストはイニシエーター(ユーザーからリクエストを最初に受信したホスト)で実行しようとします。リクエストが完了しない場合、ユーザーはすぐにエラーを受け取り、他のホストはリクエストを完了しようとしません。リクエストがイニシエーターで正常に完了した場合、他のすべてのホストは、それを完了するまで自動的に再試行します。イニシエーターは、他のホストでクエリが完了するのを待つようにし、[distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)を超えない範囲で実行します。また、各ホストでのクエリ実行の状態を示すテーブルを返します。 - -エラーが発生した場合の挙動は [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) 設定によって規定されますが、`Replicated` データベースには `null_status_on_timeout` に設定するのが良いでしょう。つまり、いくつかのホストが [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout) のリクエストを実行する時間がなかった場合、例外をスローせずに、テーブル内のそれらのホストには `NULL` ステータスを表示します。 - -[system.clusters](../../operations/system-tables/clusters.md) システムテーブルは、レプリケートされたデータベースに名前が付けられたクラスタを含んでおり、データベースのすべてのレプリカで構成されています。このクラスタは、レプリカの作成/削除時に自動的に更新され、[Distributed](/engines/table-engines/special/distributed) テーブルに利用できます。 - -データベースの新しいレプリカを作成する際には、このレプリカが自動的にテーブルを作成します。もしそのレプリカが長い間利用できなくなっており、レプリケーションログから遅れている場合は、ローカルメタデータがZooKeeperの現在のメタデータと一致するかを確認し、追加のテーブルを別の非レプリケートされたデータベースに移動します(不要なものを誤って削除しないため)。不足しているテーブルを作成し、名前が変更されている場合はそのテーブル名を更新します。データは `ReplicatedMergeTree` レベルでレプリケートされます。つまり、テーブルがレプリケートされていない場合、データはレプリケートされません(データベースはメタデータのみに責任があります)。 - -[`ALTER TABLE FREEZE|ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md) クエリは許可されていますが、レプリケートされません。データベースエンジンは、現在のレプリカに対してのみパーティション/パートを追加/取得/削除します。ただし、テーブル自体がレプリケートされたテーブルエンジンを使用している場合、`ATTACH` を使用した後にデータがレプリケートされます。 - -テーブルのレプリケーションを維持せずにクラスタを設定したい場合は、[Cluster Discovery](../../operations/cluster-discovery.md) 機能を参照してください。 - -## 使用例 {#usage-example} - -3つのホストを持つクラスタを作成します: - -```sql -node1 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1'); -node2 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica'); -node3 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}'); -``` - -DDLクエリを実行します: - -```sql -CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n; -``` - -```text -┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ shard1|replica1 │ 0 │ │ 2 │ 0 │ -│ shard1|other_replica │ 0 │ │ 1 │ 0 │ -│ other_shard|r1 │ 0 │ │ 0 │ 0 │ -└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘ -``` - -システムテーブルを表示します: - -```sql -SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local -FROM system.clusters WHERE cluster='r'; -``` - -```text -┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐ -│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │ -│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │ -│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │ -└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘ -``` - -分散テーブルを作成し、データを挿入します: - -```sql -node2 :) CREATE TABLE r.d (n UInt64) ENGINE=Distributed('r','r','rmt', n % 2); -node3 :) INSERT INTO r.d SELECT * FROM numbers(10); -node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host; -``` - -```text -┌─hosts─┬─groupArray(n)─┐ -│ node3 │ [1,3,5,7,9] │ -│ node2 │ [0,2,4,6,8] │ -└───────┴───────────────┘ -``` - -もうひとつのホストにレプリカを追加します: - -```sql -node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2'); -``` - -クラスタの設定は以下のようになります: - -```text -┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐ -│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │ -│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │ -│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │ -│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │ -└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘ -``` - -分散テーブルは新しいホストからもデータを取得します: - -```sql -node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host; -``` - -```text -┌─hosts─┬─groupArray(n)─┐ -│ node2 │ [1,3,5,7,9] │ -│ node4 │ [0,2,4,6,8] │ -└───────┴───────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md.hash deleted file mode 100644 index fa132cbfe58..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/replicated.md.hash +++ /dev/null @@ -1 +0,0 @@ -c7048883fff456af diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md deleted file mode 100644 index 80a0a92ba54..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -description: 'Allows to connect to SQLite databases and perform `INSERT` and `SELECT` - queries to exchange data between ClickHouse and SQLite.' -sidebar_label: 'SQLite' -sidebar_position: 55 -slug: '/engines/database-engines/sqlite' -title: 'SQLite' ---- - - - - -# SQLite - -SQLite データベースに接続し、データを ClickHouse と SQLite の間で交換するために `INSERT` および `SELECT` クエリを実行できます。 - -## データベースの作成 {#creating-a-database} - -```sql - CREATE DATABASE sqlite_database - ENGINE = SQLite('db_path') -``` - -**エンジンパラメータ** - -- `db_path` — SQLite データベースのファイルへのパス。 - -## データ型サポート {#data_types-support} - -| SQLite | ClickHouse | -|---------------|---------------------------------------------------------| -| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) | -| REAL | [Float32](../../sql-reference/data-types/float.md) | -| TEXT | [String](../../sql-reference/data-types/string.md) | -| BLOB | [String](../../sql-reference/data-types/string.md) | - -## 特徴と推奨事項 {#specifics-and-recommendations} - -SQLite は、データベース全体(定義、テーブル、インデックス、およびデータ自体)をホストマシン上の単一のクロスプラットフォームファイルとして保存します。書き込み中、SQLite はデータベース全体のファイルをロックします。したがって、書き込み操作は順次実行されます。一方、読み取り操作はマルチタスクで実行できます。 -SQLite はサービス管理(起動スクリプトなど)や `GRANT` およびパスワードに基づくアクセス制御を必要としません。アクセス制御は、データベースファイル自体に与えられたファイルシステムの権限によって処理されます。 - -## 使用例 {#usage-example} - -ClickHouse に接続された SQLite のデータベース: - -```sql -CREATE DATABASE sqlite_db ENGINE = SQLite('sqlite.db'); -SHOW TABLES FROM sqlite_db; -``` - -```text -┌──name───┐ -│ table1 │ -│ table2 │ -└─────────┘ -``` - -テーブルを表示: - -```sql -SELECT * FROM sqlite_db.table1; -``` - -```text -┌─col1──┬─col2─┐ -│ line1 │ 1 │ -│ line2 │ 2 │ -│ line3 │ 3 │ -└───────┴──────┘ -``` - -ClickHouse テーブルから SQLite テーブルにデータを挿入: - -```sql -CREATE TABLE clickhouse_table(`col1` String,`col2` Int16) ENGINE = MergeTree() ORDER BY col2; -INSERT INTO clickhouse_table VALUES ('text',10); -INSERT INTO sqlite_db.table1 SELECT * FROM clickhouse_table; -SELECT * FROM sqlite_db.table1; -``` - -```text -┌─col1──┬─col2─┐ -│ line1 │ 1 │ -│ line2 │ 2 │ -│ line3 │ 3 │ -│ text │ 10 │ -└───────┴──────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md.hash deleted file mode 100644 index e05f74ad806..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/database-engines/sqlite.md.hash +++ /dev/null @@ -1 +0,0 @@ -461bcef746c98526 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md deleted file mode 100644 index 0289ca680e5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: 'テーブル・オブ・コンテンツ ページ for Engines' -slug: '/engines' -title: 'Engines' ---- - - - -| Page | Description | -|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [データベースエンジン](../engines/database-engines/index.md) | ClickHouseのデータベースエンジンは、テーブルで作業し、データがどのように保存および管理されるかを決定することを可能にします。 ClickHouseで利用可能なさまざまなデータベースエンジンについて詳しく学びましょう。 | -| [テーブルエンジン](../engines/table-engines/index.md) | ClickHouseのテーブルエンジンは、データがどのように保存され、書き込まれ、読み取られるかを決定する基本的な概念です。 ClickHouseで利用可能なさまざまなテーブルエンジンについて詳しく学びましょう。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md.hash deleted file mode 100644 index f262639de3c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -25f510c021abd402 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md deleted file mode 100644 index 020df1eefd4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -description: 'テーブルエンジンのドキュメント' -slug: '/engines/table-engines/' -toc_folder_title: 'Table Engines' -toc_priority: 26 -toc_title: 'Introduction' -title: 'テーブルエンジン' ---- - - - - -# テーブルエンジン - -テーブルエンジン(テーブルの種類)は、以下を決定します。 - -- データがどのように、どこに保存されるか、書き込む場所、読み取る場所。 -- サポートされるクエリとその方法。 -- 同時データアクセス。 -- 存在する場合のインデックスの使用。 -- マルチスレッドリクエスト実行が可能かどうか。 -- データレプリケーションパラメータ。 - -## エンジンファミリー {#engine-families} - -### MergeTree {#mergetree} - -高負荷タスクに対する最も汎用的で機能的なテーブルエンジン。これらのエンジンに共通する特性は、迅速なデータ挿入と、その後のバックグラウンドでのデータ処理です。`MergeTree`ファミリーのエンジンは、データレプリケーション([Replicated\*](/engines/table-engines/mergetree-family/replication)バージョンのエンジン)、パーティション、セカンダリデータスキッピングインデックス、その他の機能をサポートしていますが、他のエンジンではサポートされていません。 - -ファミリー内のエンジン: - -| MergeTreeエンジン | -|-------------------------------------------------------------------------------------------------------------------------------------------| -| [MergeTree](/engines/table-engines/mergetree-family/mergetree) | -| [ReplacingMergeTree](/engines/table-engines/mergetree-family/replacingmergetree) | -| [SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree) | -| [AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree) | -| [CollapsingMergeTree](/engines/table-engines/mergetree-family/collapsingmergetree) | -| [VersionedCollapsingMergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree) | -| [GraphiteMergeTree](/engines/table-engines/mergetree-family/graphitemergetree) | - -### Log {#log} - -最小限の機能を持つ軽量[エンジン](../../engines/table-engines/log-family/index.md)。多くの小さなテーブル(約100万行まで)を迅速に書き込み、後で全体として読み取る必要がある場合に最も効果的です。 - -ファミリー内のエンジン: - -| Logエンジン | -|----------------------------------------------------------------------------| -| [TinyLog](/engines/table-engines/log-family/tinylog) | -| [StripeLog](/engines/table-engines/log-family/stripelog) | -| [Log](/engines/table-engines/log-family/log) | - -### 統合エンジン {#integration-engines} - -他のデータストレージおよび処理システムと通信するためのエンジン。 - -ファミリー内のエンジン: - -| 統合エンジン | -|---------------------------------------------------------------------------------| -| [ODBC](../../engines/table-engines/integrations/odbc.md) | -| [JDBC](../../engines/table-engines/integrations/jdbc.md) | -| [MySQL](../../engines/table-engines/integrations/mysql.md) | -| [MongoDB](../../engines/table-engines/integrations/mongodb.md) | -| [Redis](../../engines/table-engines/integrations/redis.md) | -| [HDFS](../../engines/table-engines/integrations/hdfs.md) | -| [S3](../../engines/table-engines/integrations/s3.md) | -| [Kafka](../../engines/table-engines/integrations/kafka.md) | -| [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) | -| [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) | -| [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) | -| [S3Queue](../../engines/table-engines/integrations/s3queue.md) | -| [TimeSeries](../../engines/table-engines/integrations/time-series.md) | - -### 特殊エンジン {#special-engines} - -ファミリー内のエンジン: - -| 特殊エンジン | -|---------------------------------------------------------------| -| [Distributed](/engines/table-engines/special/distributed) | -| [Dictionary](/engines/table-engines/special/dictionary) | -| [Merge](/engines/table-engines/special/merge) | -| [Executable](/engines/table-engines/special/executable) | -| [File](/engines/table-engines/special/file) | -| [Null](/engines/table-engines/special/null) | -| [Set](/engines/table-engines/special/set) | -| [Join](/engines/table-engines/special/join) | -| [URL](/engines/table-engines/special/url) | -| [View](/engines/table-engines/special/view) | -| [Memory](/engines/table-engines/special/memory) | -| [Buffer](/engines/table-engines/special/buffer) | -| [External Data](/engines/table-engines/special/external-data) | -| [GenerateRandom](/engines/table-engines/special/generate) | -| [KeeperMap](/engines/table-engines/special/keeper-map) | -| [FileLog](/engines/table-engines/special/filelog) | - -## バーチャルカラム {#table_engines-virtual_columns} - -バーチャルカラムは、エンジンソースコードで定義されたテーブルエンジンの不可欠な属性です。 - -`CREATE TABLE`クエリではバーチャルカラムを指定してはいけません。`SHOW CREATE TABLE`や`DESCRIBE TABLE`クエリの結果にも表示されません。バーチャルカラムは読み取り専用であり、そこにデータを挿入することはできません。 - -バーチャルカラムからデータを選択するには、その名前を`SELECT`クエリで指定する必要があります。`SELECT *`ではバーチャルカラムの値は返されません。 - -テーブルにテーブルのバーチャルカラムのいずれかと同じ名前のカラムがある場合、バーチャルカラムはアクセスできなくなります。これを行うことはお勧めしません。競合を避けるために、バーチャルカラム名には通常アンダースコアがプレフィックスとして付けられます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md.hash deleted file mode 100644 index 3239ae1822e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -efced6cdc7d239cb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md deleted file mode 100644 index 82b07ebd9fe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: 'The `ExternalDistributed` engine allows to perform `SELECT` queries - on data that is stored on a remote servers MySQL or PostgreSQL. Accepts MySQL or - PostgreSQL engines as an argument so sharding is possible.' -sidebar_label: 'ExternalDistributed' -sidebar_position: 55 -slug: '/engines/table-engines/integrations/ExternalDistributed' -title: 'ExternalDistributed' ---- - - - -`ExternalDistributed` エンジンは、リモートサーバーの MySQL または PostgreSQL に保存されたデータに対して `SELECT` クエリを実行することを可能にします。シャーディングが可能なように、引数として [MySQL](../../../engines/table-engines/integrations/mysql.md) または [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) エンジンを受け入れます。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... -) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password'); -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -テーブルの構造は元のテーブルの構造と異なる場合があります: - -- カラム名は元のテーブルと同じである必要がありますが、これらのカラムの一部のみを使用し、任意の順序で指定することができます。 -- カラムタイプは元のテーブルのものと異なる場合があります。ClickHouse は [cast](/sql-reference/functions/type-conversion-functions#cast) 関数を使用して値を ClickHouse データ型に変換しようとします。 - -**エンジンのパラメータ** - -- `engine` — テーブルエンジン `MySQL` または `PostgreSQL`。 -- `host:port` — MySQL または PostgreSQL サーバーのアドレス。 -- `database` — リモートデータベース名。 -- `table` — リモートテーブル名。 -- `user` — ユーザー名。 -- `password` — ユーザーパスワード。 - -## 実装の詳細 {#implementation-details} - -複数のレプリカをサポートしており、`|` でリストされる必要があります。また、シャードは `,` でリストされる必要があります。例えば: - -```sql -CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); -``` - -レプリカを指定すると、読み取り時に各シャードの利用可能なレプリカのうちの1つが選択されます。接続が失敗した場合は、次のレプリカが選択され、全てのレプリカに対してそのように続けられます。もし全てのレプリカの接続試行が失敗した場合、同様の方法で数回試行されます。 - -各シャードに対して任意の数のシャードおよびレプリカを指定できます。 - -**関連情報** - -- [MySQL テーブルエンジン](../../../engines/table-engines/integrations/mysql.md) -- [PostgreSQL テーブルエンジン](../../../engines/table-engines/integrations/postgresql.md) -- [分散テーブルエンジン](../../../engines/table-engines/special/distributed.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md.hash deleted file mode 100644 index 95df0f68b47..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/ExternalDistributed.md.hash +++ /dev/null @@ -1 +0,0 @@ -8c62b5a6fec3e7cf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md deleted file mode 100644 index 16f02458ced..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -description: 'This engine provides an integration with the Azure Blob Storage ecosystem, - allowing streaming data import.' -sidebar_label: 'AzureQueue' -sidebar_position: 181 -slug: '/engines/table-engines/integrations/azure-queue' -title: 'AzureQueue テーブルエンジン' ---- - - - - -# AzureQueue テーブルエンジン - -このエンジンは [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) エコシステムとの統合を提供し、ストリーミングデータのインポートを可能にします。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE test (name String, value UInt32) - ENGINE = AzureQueue(...) - [SETTINGS] - [mode = '',] - [after_processing = 'keep',] - [keeper_path = '',] - ... -``` - -**エンジンのパラメータ** - -`AzureQueue` のパラメータは `AzureBlobStorage` テーブルエンジンがサポートしているのと同じです。パラメータセクションは [こちら](../../../engines/table-engines/integrations/azureBlobStorage.md) を参照してください。 - -[AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) テーブルエンジンと同様に、ユーザーはローカルの Azure Storage 開発のために Azurite エミュレーターを使用できます。詳細は [こちら](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage) を参照してください。 - -**例** - -```sql -CREATE TABLE azure_queue_engine_table -( - `key` UInt64, - `data` String -) -ENGINE = AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', 'testcontainer', '*', 'CSV') -SETTINGS mode = 'unordered' -``` - -## 設定 {#settings} - -サポートされている設定のセットは `S3Queue` テーブルエンジンと同じですが、`s3queue_` プレフィックスはありません。[設定のフルリスト](../../../engines/table-engines/integrations/s3queue.md#settings)を参照してください。 -テーブルに対して設定された設定のリストを取得するには、`system.azure_queue_settings` テーブルを使用します。利用可能は `24.10` 以降です。 - -## 説明 {#description} - -ストリーミングインポートに対して `SELECT` は特に便利ではありません(デバッグを除く)、なぜなら各ファイルは一度だけインポートできるからです。実際には、[マテリアライズドビュー](../../../sql-reference/statements/create/view.md)を使用してリアルタイムスレッドを作成するのがより実用的です。これを行うには: - -1. エンジンを使用して、S3の指定されたパスからデータを消費するためのテーブルを作成し、それをデータストリームと見なします。 -2. 望ましい構造を持つテーブルを作成します。 -3. エンジンからデータを変換し、以前に作成したテーブルに挿入するマテリアライズドビューを作成します。 - -`MATERIALIZED VIEW` がエンジンを結合すると、バックグラウンドでデータの収集を開始します。 - -例: - -```sql -CREATE TABLE azure_queue_engine_table (key UInt64, data String) - ENGINE=AzureQueue('', 'CSV', 'gzip') - SETTINGS - mode = 'unordered'; - -CREATE TABLE stats (key UInt64, data String) - ENGINE = MergeTree() ORDER BY key; - -CREATE MATERIALIZED VIEW consumer TO stats - AS SELECT key, data FROM azure_queue_engine_table; - -SELECT * FROM stats ORDER BY key; -``` - -## 仮想カラム {#virtual-columns} - -- `_path` — ファイルへのパス。 -- `_file` — ファイルの名前。 - -仮想カラムに関する詳細は [こちら](../../../engines/table-engines/index.md#table_engines-virtual_columns) を参照してください。 - -## 内部情報 {#introspection} - -テーブル設定 `enable_logging_to_queue_log=1` を介してテーブルのロギングを有効にします。 - -内部情報の機能は、[S3Queue テーブルエンジン](/engines/table-engines/integrations/s3queue#introspection) と同様ですが、いくつかの明確な違いがあります: - -1. サーバーバージョン >= 25.1 では、`system.azure_queue` を使用してキューのメモリ内状態を確認します。古いバージョンでは `system.s3queue` を使用します(それには `azure` テーブルに関する情報も含まれます)。 -2. メインの ClickHouse 構成を介して `system.azure_queue_log` を有効にします。例えば: - - ```xml - - system - azure_queue_log
-
- ``` - -この永続テーブルは `system.s3queue` と同じ情報を持っていますが、処理され、失敗したファイルに関するものです。 - -テーブルの構造は以下の通りです: - -```sql - -CREATE TABLE system.azure_queue_log -( - `hostname` LowCardinality(String) COMMENT 'ホスト名', - `event_date` Date COMMENT 'このログ行が書き込まれたイベントの日付', - `event_time` DateTime COMMENT 'このログ行が書き込まれたイベントの時間', - `database` String COMMENT '現在の S3Queue テーブルが存在するデータベースの名前', - `table` String COMMENT 'S3Queue テーブルの名前', - `uuid` String COMMENT 'S3Queue テーブルの UUID', - `file_name` String COMMENT '処理されているファイルの名前', - `rows_processed` UInt64 COMMENT '処理された行数', - `status` Enum8('Processed' = 0, 'Failed' = 1) COMMENT '処理されたファイルのステータス', - `processing_start_time` Nullable(DateTime) COMMENT 'ファイル処理開始時刻', - `processing_end_time` Nullable(DateTime) COMMENT 'ファイル処理終了時刻', - `exception` String COMMENT '発生した場合の例外メッセージ' -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(event_date) -ORDER BY (event_date, event_time) -SETTINGS index_granularity = 8192 -COMMENT 'S3Queue エンジンによって処理されたファイルに関するログエントリを含みます。' - -``` - -例: - -```sql -SELECT * -FROM system.azure_queue_log -LIMIT 1 -FORMAT Vertical - -Row 1: -────── -hostname: clickhouse -event_date: 2024-12-16 -event_time: 2024-12-16 13:42:47 -database: default -table: azure_queue_engine_table -uuid: 1bc52858-00c0-420d-8d03-ac3f189f27c8 -file_name: test_1.csv -rows_processed: 3 -status: Processed -processing_start_time: 2024-12-16 13:42:47 -processing_end_time: 2024-12-16 13:42:47 -exception: - -1 row in set. Elapsed: 0.002 sec. - -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md.hash deleted file mode 100644 index 21d1d63f26e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azure-queue.md.hash +++ /dev/null @@ -1 +0,0 @@ -4cd479d5c2b90bab diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md deleted file mode 100644 index e0d8211a484..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: 'This engine provides an integration with Azure Blob Storage ecosystem.' -sidebar_label: 'Azure Blob Storage' -sidebar_position: 10 -slug: '/engines/table-engines/integrations/azureBlobStorage' -title: 'AzureBlobStorage Table Engine' ---- - - - - -# AzureBlobStorage テーブルエンジン - -このエンジンは、[Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) エコシステムと統合を提供します。 - -## テーブルを作成する {#create-table} - -```sql -CREATE TABLE azure_blob_storage_table (name String, value UInt32) - ENGINE = AzureBlobStorage(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression]) - [PARTITION BY expr] - [SETTINGS ...] -``` - -### エンジンパラメータ {#engine-parameters} - -- `endpoint` — AzureBlobStorage エンドポイント URL(コンテナとプレフィックスを含む)。認証方法に応じて account_name が必要な場合があります。 (`http://azurite1:{port}/[account_name]{container_name}/{data_prefix}`) または、これらのパラメータを storage_account_url、account_name および container を使って別々に指定できます。プレフィックスを指定するには、エンドポイントを使用する必要があります。 -- `endpoint_contains_account_name` - このフラグは、エンドポイントに account_name が含まれているかどうかを指定するために使用されます。特定の認証メソッドにのみ必要です。(デフォルト: true) -- `connection_string|storage_account_url` — connection_string にはアカウント名とキーが含まれています([接続文字列の作成](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account))または、ここでストレージアカウントの URL を提供し、アカウント名とアカウントキーを補足のパラメータとして提供することもできます(パラメータ account_name および account_key を参照)。 -- `container_name` - コンテナ名 -- `blobpath` - ファイルパス。読み取り専用モードで以下のワイルドカードをサポートします: `*`, `**`, `?`, `{abc,def}` および `{N..M}` ここで `N` と `M` は数字、`'abc'` と `'def'` は文字列です。 -- `account_name` - storage_account_url が使用されている場合、アカウント名をここで指定できます。 -- `account_key` - storage_account_url が使用されている場合、アカウントキーをここで指定できます。 -- `format` — ファイルの[フォーマット](/interfaces/formats.md)。 -- `compression` — サポートされている値: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`。デフォルトでは、ファイル拡張子によって圧縮が自動的に検出されます。(`auto` に設定するのと同じ)。 - -**例** - -ユーザーは、ローカル Azure ストレージ開発のために Azurite エミュレーターを使用できます。詳細は[こちら](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage)をご覧ください。ローカルの Azurite インスタンスを使用する場合、ユーザーは下記のコマンドで `http://azurite1:10000` の代わりに `http://localhost:10000` を指定する必要があります。ここでは、Azurite がホスト `azurite1` で利用可能であると仮定しています。 - -```sql -CREATE TABLE test_table (key UInt64, data String) - ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', 'testcontainer', 'test_table', 'CSV'); - -INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); - -SELECT * FROM test_table; -``` - -```text -┌─key──┬─data──┐ -│ 1 │ a │ -│ 2 │ b │ -│ 3 │ c │ -└──────┴───────┘ -``` - -## 仮想カラム {#virtual-columns} - -- `_path` — ファイルへのパス。タイプ: `LowCardinality(String)`。 -- `_file` — ファイル名。タイプ: `LowCardinality(String)`。 -- `_size` — ファイルのサイズ(バイト単位)。タイプ: `Nullable(UInt64)`。サイズが不明な場合、値は `NULL` です。 -- `_time` — ファイルの最終更新時刻。タイプ: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` です。 - -## 認証 {#authentication} - -現在、認証方法は3つあります: -- `Managed Identity` - `endpoint`、`connection_string`、または `storage_account_url` を指定することで使用できます。 -- `SAS Token` - `endpoint`、`connection_string`、または `storage_account_url` を指定することで使用できます。URL に '?' が含まれているかどうかで識別されます。例については、[azureBlobStorage](/sql-reference/table-functions/azureBlobStorage#using-shared-access-signatures-sas-sas-tokens)を参照してください。 -- `Workload Identity` - `endpoint` または `storage_account_url` を指定することで使用できます。 `use_workload_identity` パラメータが設定されている場合、([workload identity](https://github.com/Azure/azure-sdk-for-cpp/tree/main/sdk/identity/azure-identity#authenticate-azure-hosted-applications))が認証に使用されます。 - -### データキャッシュ {#data-cache} - -`Azure` テーブルエンジンは、ローカルディスクにデータキャッシングをサポートしています。 -この[セクション](/operations/storing-data.md/#using-local-cache)でファイルシステムキャッシュの構成オプションと使用法を確認してください。 -キャッシングは、パスとストレージオブジェクトのETagに基づいて行われるため、ClickHouse は古いキャッシュバージョンを読み取ることはありません。 - -キャッシングを有効にするには、設定 `filesystem_cache_name = ''` と `enable_filesystem_cache = 1` を使用します。 - -```sql -SELECT * -FROM azureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', 'testcontainer', 'test_table', 'CSV') -SETTINGS filesystem_cache_name = 'cache_for_azure', enable_filesystem_cache = 1; -``` - -1. clickhouse の設定ファイルに以下のセクションを追加します: - -```xml - - - - キャッシュディレクトリへのパス - 10Gi - - - -``` - -2. clickhouse の `storage_configuration` セクションからキャッシュ設定(したがってキャッシュストレージ)を再利用します。これは[こちら](/operations/storing-data.md/#using-local-cache)に説明されています。 - -## 参照 {#see-also} - -[Azure Blob Storage テーブル関数](/sql-reference/table-functions/azureBlobStorage) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md.hash deleted file mode 100644 index 511f8b474a4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/azureBlobStorage.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ee4138e8fb7df5d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md deleted file mode 100644 index adb58ce137e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: 'This engine provides a read-only integration with existing Delta Lake - tables in Amazon S3.' -sidebar_label: 'DeltaLake' -sidebar_position: 40 -slug: '/engines/table-engines/integrations/deltalake' -title: 'DeltaLake Table Engine' ---- - - - - -# DeltaLake テーブルエンジン - -このエンジンは、Amazon S3 にある既存の [Delta Lake](https://github.com/delta-io/delta) テーブルとの読み取り専用統合を提供します。 - -## テーブル作成 {#create-table} - -Delta Lake テーブルは S3 に既に存在している必要があります。このコマンドは新しいテーブルを作成するための DDL パラメータを受け取りません。 - -```sql -CREATE TABLE deltalake - ENGINE = DeltaLake(url, [aws_access_key_id, aws_secret_access_key,]) -``` - -**エンジンパラメータ** - -- `url` — 既存の Delta Lake テーブルへのパスを含むバケット URL。 -- `aws_access_key_id`, `aws_secret_access_key` - [AWS](https://aws.amazon.com/) アカウントユーザーの長期認証情報。リクエストの認証に使用できます。このパラメータはオプションです。認証情報が指定されていない場合、設定ファイルから使用されます。 - -エンジンパラメータは [Named Collections](/operations/named-collections.md) を使用して指定できます。 - -**例** - -```sql -CREATE TABLE deltalake ENGINE=DeltaLake('http://mars-doc-test.s3.amazonaws.com/clickhouse-bucket-3/test_table/', 'ABC123', 'Abc+123') -``` - -名前付きコレクションを使用する場合: - -```xml - - - - http://mars-doc-test.s3.amazonaws.com/clickhouse-bucket-3/ - ABC123 - Abc+123 - - - -``` - -```sql -CREATE TABLE deltalake ENGINE=DeltaLake(deltalake_conf, filename = 'test_table') -``` - -### データキャッシュ {#data-cache} - -`Iceberg` テーブルエンジンとテーブル関数は、`S3`、`AzureBlobStorage`、`HDFS` ストレージと同様にデータキャッシュをサポートします。詳細は [こちら](../../../engines/table-engines/integrations/s3.md#data-cache) をご覧ください。 - -## 参照 {#see-also} - -- [deltaLake テーブル関数](../../../sql-reference/table-functions/deltalake.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md.hash deleted file mode 100644 index 96a63502ae9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/deltalake.md.hash +++ /dev/null @@ -1 +0,0 @@ -4b5c5cbddca79208 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md deleted file mode 100644 index b48e684ab5b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -description: 'This engine allows integrating ClickHouse with RocksDB' -sidebar_label: 'EmbeddedRocksDB' -sidebar_position: 50 -slug: '/engines/table-engines/integrations/embedded-rocksdb' -title: 'EmbeddedRocksDB Engine' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# EmbeddedRocksDB エンジン - - - -このエンジンは、ClickHouse を [RocksDB](http://rocksdb.org/) と統合することを可能にします。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = EmbeddedRocksDB([ttl, rocksdb_dir, read_only]) PRIMARY KEY(primary_key_name) -[ SETTINGS name=value, ... ] -``` - -エンジンパラメータ: - -- `ttl` - 値の有効期限。TTLは秒単位で受け付けられます。TTLが 0 の場合、通常の RocksDB インスタンスが使用されます (TTL なし)。 -- `rocksdb_dir` - 既存の RocksDB のディレクトリのパスまたは作成された RocksDB の宛先パス。指定された `rocksdb_dir` でテーブルを開きます。 -- `read_only` - `read_only` が true に設定されている場合、読み取り専用モードが使用されます。TTL を持つストレージの場合、コンパクションはトリガーされず (手動または自動のどちらも)、期限切れのエントリは削除されません。 -- `primary_key_name` – カラムリスト内の任意のカラム名。 -- `primary key` は指定する必要があり、主キーには 1 つのカラムのみがサポートされています。主キーは `rocksdb key` としてバイナリでシリアライズされます。 -- 主キー以外のカラムは、対応する順序で `rocksdb` 値としてバイナリでシリアライズされます。 -- キー `equals` または `in` フィルタリングを持つクエリは、`rocksdb` からのマルチキーのルックアップに最適化されます。 - -エンジン設定: - -- `optimize_for_bulk_insert` – テーブルはバルク挿入用に最適化されています (挿入パイプラインは SST ファイルを作成し、メムテーブルへの書き込みの代わりに rocksdb データベースにインポートします); デフォルト値: `1`。 -- `bulk_insert_block_size` - バルク挿入によって作成される SST ファイルの最小サイズ (行数単位); デフォルト値: `1048449`。 - -例: - -```sql -CREATE TABLE test -( - `key` String, - `v1` UInt32, - `v2` String, - `v3` Float32 -) -ENGINE = EmbeddedRocksDB -PRIMARY KEY key -``` - -## メトリクス {#metrics} - -`system.rocksdb` テーブルもあり、rocksdb の統計情報を公開しています: - -```sql -SELECT - name, - value -FROM system.rocksdb - -┌─name──────────────────────┬─value─┐ -│ no.file.opens │ 1 │ -│ number.block.decompressed │ 1 │ -└───────────────────────────┴───────┘ -``` - -## 設定 {#configuration} - -任意の [rocksdb オプション](https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map) を設定を使用して変更することもできます: - -```xml - - - 8 - - - 2 - - - - TABLE - - 8 - - - 2 - -
-
-
-``` - -デフォルトでは、単純な近似カウントの最適化はオフになっており、これが `count()` クエリのパフォーマンスに影響を与える可能性があります。この最適化を有効にするには、`optimize_trivial_approximate_count_query = 1` を設定します。また、この設定は EmbeddedRocksDB エンジンの `system.tables` にも影響し、`total_rows` および `total_bytes` の近似値を表示するには設定をオンにしてください。 - -## サポートされる操作 {#supported-operations} - -### 挿入 {#inserts} - -`EmbeddedRocksDB` に新しい行が挿入されるとき、キーがすでに存在する場合、その値が更新され、存在しない場合は新しいキーが作成されます。 - -例: - -```sql -INSERT INTO test VALUES ('some key', 1, 'value', 3.2); -``` - -### 削除 {#deletes} - -行は `DELETE` クエリまたは `TRUNCATE` を使用して削除できます。 - -```sql -DELETE FROM test WHERE key LIKE 'some%' AND v1 > 1; -``` - -```sql -ALTER TABLE test DELETE WHERE key LIKE 'some%' AND v1 > 1; -``` - -```sql -TRUNCATE TABLE test; -``` - -### 更新 {#updates} - -値は `ALTER TABLE` クエリを使用して更新できます。主キーは更新できません。 - -```sql -ALTER TABLE test UPDATE v1 = v1 * 10 + 2 WHERE key LIKE 'some%' AND v3 > 3.1; -``` - -### ジョイン {#joins} - -EmbeddedRocksDB テーブルとの特別な `direct` ジョインがサポートされています。 -この直接ジョインは、メモリ上でハッシュテーブルを形成せず、EmbeddedRocksDB から直接データにアクセスします。 - -大規模なジョインでは、ハッシュテーブルが作成されないため、メモリ使用量が大幅に低下する場合があります。 - -直接ジョインを有効にするには: -```sql -SET join_algorithm = 'direct, hash' -``` - -:::tip -`join_algorithm` が `direct, hash` に設定されている場合、可能な場合は直接ジョインが使用され、それ以外の場合はハッシュジョインが使用されます。 -::: - -#### 例 {#example} - -##### EmbeddedRocksDB テーブルの作成とデータの挿入 {#create-and-populate-an-embeddedrocksdb-table} -```sql -CREATE TABLE rdb -( - `key` UInt32, - `value` Array(UInt32), - `value2` String -) -ENGINE = EmbeddedRocksDB -PRIMARY KEY key -``` - -```sql -INSERT INTO rdb - SELECT - toUInt32(sipHash64(number) % 10) as key, - [key, key+1] as value, - ('val2' || toString(key)) as value2 - FROM numbers_mt(10); -``` - -##### `rdb` テーブルと結合するためのテーブルの作成とデータの挿入 {#create-and-populate-a-table-to-join-with-table-rdb} - -```sql -CREATE TABLE t2 -( - `k` UInt16 -) -ENGINE = TinyLog -``` - -```sql -INSERT INTO t2 SELECT number AS k -FROM numbers_mt(10) -``` - -##### ジョインアルゴリズムを `direct` に設定 {#set-the-join-algorithm-to-direct} - -```sql -SET join_algorithm = 'direct' -``` - -##### INNER JOIN {#an-inner-join} -```sql -SELECT * -FROM -( - SELECT k AS key - FROM t2 -) AS t2 -INNER JOIN rdb ON rdb.key = t2.key -ORDER BY key ASC -``` -```response -┌─key─┬─rdb.key─┬─value──┬─value2─┐ -│ 0 │ 0 │ [0,1] │ val20 │ -│ 2 │ 2 │ [2,3] │ val22 │ -│ 3 │ 3 │ [3,4] │ val23 │ -│ 6 │ 6 │ [6,7] │ val26 │ -│ 7 │ 7 │ [7,8] │ val27 │ -│ 8 │ 8 │ [8,9] │ val28 │ -│ 9 │ 9 │ [9,10] │ val29 │ -└─────┴─────────┴────────┴────────┘ -``` - -### ジョインに関するさらなる情報 {#more-information-on-joins} -- [`join_algorithm` 設定](/operations/settings/settings.md#join_algorithm) -- [JOIN 句](/sql-reference/statements/select/join.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md.hash deleted file mode 100644 index eb6399aaf78..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md.hash +++ /dev/null @@ -1 +0,0 @@ -bc76956b2f7223e6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md deleted file mode 100644 index 672761c39a0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -description: 'This engine provides integration with the Apache Hadoop ecosystem - by allowing to manage data on HDFS via ClickHouse. This engine is similar to the - File and URL engines, but provides Hadoop-specific features.' -sidebar_label: 'HDFS' -sidebar_position: 80 -slug: '/engines/table-engines/integrations/hdfs' -title: 'HDFS' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# HDFS - - - -このエンジンは、ClickHouse経由で[HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)上のデータを管理することにより、[Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop)エコシステムとの統合を提供します。このエンジンは、[File](/engines/table-engines/special/file)および[URL](/engines/table-engines/special/url)エンジンに似ていますが、Hadoop特有の機能を提供します。 - -この機能はClickHouseエンジニアによってサポートされておらず、品質が不安定であることが知られています。問題が発生した場合は、自分で修正し、プルリクエストを提出してください。 - -## 使用法 {#usage} - -```sql -ENGINE = HDFS(URI, format) -``` - -**エンジンパラメータ** - -- `URI` - HDFS内のファイルの完全 URI。`URI`のパス部分にはグロブが含まれる場合があります。この場合、テーブルは読み取り専用になります。 -- `format` - 利用可能なファイル形式のいずれかを指定します。`SELECT`クエリを実行するには、形式が入力に対してサポートされている必要があり、`INSERT`クエリを実行するには、出力に対してサポートされている必要があります。利用可能な形式については、[Formats](/sql-reference/formats#formats-overview)セクションに一覧があります。 -- [PARTITION BY expr] - -### PARTITION BY {#partition-by} - -`PARTITION BY` — オプション。ほとんどのケースではパーティションキーは必要ありませんが、必要な場合でも、一般的には月単位以上の詳細なパーティションキーを必要としません。パーティショニングはクエリの高速化には寄与しません(ORDER BY式とは対照的です)。詳細なパーティショニングは決して使用しないでください。クライアント識別子や名前でデータをパーティショニングしないでください(代わりに、クライアント識別子や名前をORDER BY式の最初のカラムにしてください)。 - -月単位でのパーティショニングには、`toYYYYMM(date_column)`式を使用します。ここで`date_column`は[Date](/sql-reference/data-types/date.md)型の日付を含むカラムです。ここでのパーティション名は`"YYYYMM"`形式になります。 - -**例:** - -**1.** `hdfs_engine_table`テーブルを設定します: - -```sql -CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') -``` - -**2.** ファイルを埋めます: - -```sql -INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) -``` - -**3.** データをクエリします: - -```sql -SELECT * FROM hdfs_engine_table LIMIT 2 -``` - -```text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## 実装詳細 {#implementation-details} - -- 読み書きは並列で行うことができます。 -- サポートされていないもの: - - `ALTER`および`SELECT...SAMPLE`操作。 - - インデックス。 - - [ゼロコピー](../../../operations/storing-data.md#zero-copy)レプリケーションは可能ですが、推奨されません。 - - :::note ゼロコピーレプリケーションは本番環境には未対応 - ゼロコピーレプリケーションは、ClickHouse バージョン 22.8 以降でデフォルトで無効です。この機能は本番環境での使用は推奨されていません。 - ::: - -**パスにおけるグロブ** - -複数のパスコンポーネントにグロブを使用できます。処理されるファイルは存在し、全体のパスパターンに一致する必要があります。ファイルのリストは`SELECT`時に決定されます(`CREATE`時ではありません)。 - -- `*` — `/`を含む任意の文字の任意の数を置き換え、空文字列も含みます。 -- `?` — 任意の単一文字を置き換えます。 -- `{some_string,another_string,yet_another_one}` — 文字列 `'some_string', 'another_string', 'yet_another_one'` のいずれかを置き換えます。 -- `{N..M}` — NからMまでの範囲の任意の数を置き換えます(両端を含む)。 - -`{}`を使用した構造は、[リモート](../../../sql-reference/table-functions/remote.md)テーブル関数に似ています。 - -**例** - -1. HDFS上に以下のURIを持つTSV形式のいくつかのファイルがあるとします: - - - 'hdfs://hdfs1:9000/some_dir/some_file_1' - - 'hdfs://hdfs1:9000/some_dir/some_file_2' - - 'hdfs://hdfs1:9000/some_dir/some_file_3' - - 'hdfs://hdfs1:9000/another_dir/some_file_1' - - 'hdfs://hdfs1:9000/another_dir/some_file_2' - - 'hdfs://hdfs1:9000/another_dir/some_file_3' - -2. すべての6つのファイルを含むテーブルを作成する方法はいくつかあります: - - - -```sql -CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') -``` - -別の方法: - -```sql -CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') -``` - -テーブルは両方のディレクトリ内のすべてのファイルで構成されます(すべてのファイルは、クエリで説明されている形式およびスキーマに一致する必要があります): - -```sql -CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') -``` - -:::note -ファイルのリストに先頭ゼロを伴う数値範囲が含まれている場合、それぞれの桁に対して波括弧を使うか、`?`を使用してください。 -::: - -**例** - -`file000`, `file001`, ... , `file999` という名前のファイルを持つテーブルを作成します: - -```sql -CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') -``` - -## 設定 {#configuration} - -GraphiteMergeTreeに似て、HDFSエンジンはClickHouse設定ファイルを使った拡張設定をサポートしています。使用できる設定キーは2つあります:グローバル(`hdfs`)とユーザーレベル(`hdfs_*`)。グローバル設定が最初に適用され、その後ユーザーレベルの設定が存在する場合に適用されます。 - -```xml - - - /tmp/keytab/clickhouse.keytab - clickuser@TEST.CLICKHOUSE.TECH - kerberos - - - - - root@TEST.CLICKHOUSE.TECH - -``` - -### 設定オプション {#configuration-options} - -#### libhdfs3によってサポートされている {#supported-by-libhdfs3} - -| **パラメータ** | **デフォルト値** | -| - | - | -| rpc\_client\_connect\_tcpnodelay | true | -| dfs\_client\_read\_shortcircuit | true | -| output\_replace-datanode-on-failure | true | -| input\_notretry-another-node | false | -| input\_localread\_mappedfile | true | -| dfs\_client\_use\_legacy\_blockreader\_local | false | -| rpc\_client\_ping\_interval | 10 * 1000 | -| rpc\_client\_connect\_timeout | 600 * 1000 | -| rpc\_client\_read\_timeout | 3600 * 1000 | -| rpc\_client\_write\_timeout | 3600 * 1000 | -| rpc\_client\_socket\_linger\_timeout | -1 | -| rpc\_client\_connect\_retry | 10 | -| rpc\_client\_timeout | 3600 * 1000 | -| dfs\_default\_replica | 3 | -| input\_connect\_timeout | 600 * 1000 | -| input\_read\_timeout | 3600 * 1000 | -| input\_write\_timeout | 3600 * 1000 | -| input\_localread\_default\_buffersize | 1 * 1024 * 1024 | -| dfs\_prefetchsize | 10 | -| input\_read\_getblockinfo\_retry | 3 | -| input\_localread\_blockinfo\_cachesize | 1000 | -| input\_read\_max\_retry | 60 | -| output\_default\_chunksize | 512 | -| output\_default\_packetsize | 64 * 1024 | -| output\_default\_write\_retry | 10 | -| output\_connect\_timeout | 600 * 1000 | -| output\_read\_timeout | 3600 * 1000 | -| output\_write\_timeout | 3600 * 1000 | -| output\_close\_timeout | 3600 * 1000 | -| output\_packetpool\_size | 1024 | -| output\_heartbeat\_interval | 10 * 1000 | -| dfs\_client\_failover\_max\_attempts | 15 | -| dfs\_client\_read\_shortcircuit\_streams\_cache\_size | 256 | -| dfs\_client\_socketcache\_expiryMsec | 3000 | -| dfs\_client\_socketcache\_capacity | 16 | -| dfs\_default\_blocksize | 64 * 1024 * 1024 | -| dfs\_default\_uri | "hdfs://localhost:9000" | -| hadoop\_security\_authentication | "simple" | -| hadoop\_security\_kerberos\_ticket\_cache\_path | "" | -| dfs\_client\_log\_severity | "INFO" | -| dfs\_domain\_socket\_path | "" | - -[HDFS Configuration Reference](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html)は、一部のパラメータについて説明しています。 - -#### ClickHouseの追加機能 {#clickhouse-extras} - -| **パラメータ** | **デフォルト値** | -| - | - | -| hadoop\_kerberos\_keytab | "" | -| hadoop\_kerberos\_principal | "" | -| libhdfs3\_conf | "" | - -### 制限事項 {#limitations} -* `hadoop_security_kerberos_ticket_cache_path`および`libhdfs3_conf`はグローバル専用で、ユーザー専用ではありません。 - -## Kerberosサポート {#kerberos-support} - -`hadoop_security_authentication`パラメータが`kerberos`の値を持つ場合、ClickHouseはKerberosを介して認証します。 -パラメータは[こちら](#clickhouse-extras)にあり、`hadoop_security_kerberos_ticket_cache_path`が役立つ場合があります。 -libhdfs3の制限により、古典的なアプローチのみがサポートされているため、データノードの通信はSASLによって保護されていません(`HADOOP_SECURE_DN_USER`はそのようなセキュリティアプローチの信頼できる指標です)。リファレンスとして`tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh`を使用してください。 - -`hadoop_kerberos_keytab`、`hadoop_kerberos_principal`または`hadoop_security_kerberos_ticket_cache_path`が指定されている場合、Kerberos認証が使用されます。この場合、`hadoop_kerberos_keytab`と`hadoop_kerberos_principal`は必須です。 - -## HDFS Namenode HAサポート {#namenode-ha} - -libhdfs3はHDFS namenode HAをサポートしています。 - -- HDFSノードから`hdfs-site.xml`を`/etc/clickhouse-server/`へコピーします。 -- ClickHouse設定ファイルに以下の部分を追加します: - -```xml - - /etc/clickhouse-server/hdfs-site.xml - -``` - -- その後、`hdfs-site.xml`の`dfs.nameservices`タグの値をHDFS URIのnamenodeアドレスとして使用します。たとえば、`hdfs://appadmin@192.168.101.11:8020/abc/`を`hdfs://appadmin@my_nameservice/abc/`に置き換えます。 - -## バーチャルカラム {#virtual-columns} - -- `_path` — ファイルへのパス。タイプ: `LowCardinality(String)`。 -- `_file` — ファイル名。タイプ: `LowCardinality(String)`。 -- `_size` — ファイルのサイズ(バイト単位)。タイプ: `Nullable(UInt64)`。サイズが不明な場合、値は`NULL`です。 -- `_time` — ファイルの最終変更時間。タイプ: `Nullable(DateTime)`。時間が不明な場合、値は`NULL`です。 - -## ストレージ設定 {#storage-settings} - -- [hdfs_truncate_on_insert](/operations/settings/settings.md#hdfs_truncate_on_insert) - 挿入前にファイルを切り捨てることを許可します。デフォルトでは無効です。 -- [hdfs_create_new_file_on_insert](/operations/settings/settings.md#hdfs_create_new_file_on_insert) - 各挿入時にサフィックスのある新しいファイルを作成することを許可します。デフォルトでは無効です。 -- [hdfs_skip_empty_files](/operations/settings/settings.md#hdfs_skip_empty_files) - 読み取り時に空のファイルをスキップすることを許可します。デフォルトでは無効です。 - -**関連項目** - -- [バーチャルカラム](../../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md.hash deleted file mode 100644 index e52eb19d414..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hdfs.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ae287885e44d41c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md deleted file mode 100644 index 52da8f4c896..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md +++ /dev/null @@ -1,423 +0,0 @@ ---- -description: 'The Hive engine allows you to perform `SELECT` queries on HDFS Hive - table.' -sidebar_label: 'Hive' -sidebar_position: 84 -slug: '/engines/table-engines/integrations/hive' -title: 'Hive' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -# Hive - - - -Hiveエンジンを利用することで、HDFS Hiveテーブルに対して`SELECT`クエリを実行することができます。現在、以下の入力フォーマットがサポートされています。 - -- テキスト: `binary`を除くシンプルなスカラー型のみをサポート - -- ORC: `char`を除くシンプルなスカラー型をサポート; `array`のような複雑な型のみをサポート - -- Parquet: すべてのシンプルなスカラー型をサポート; `array`のような複雑な型のみをサポート - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [ALIAS expr1], - name2 [type2] [ALIAS expr2], - ... -) ENGINE = Hive('thrift://host:port', 'database', 'table'); -PARTITION BY expr -``` -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -テーブルの構造は元のHiveテーブルの構造と異なることがあります: -- カラム名は元のHiveテーブル内のものと同じである必要がありますが、これらのカラムの一部のみを使用することができ、順序も任意で、他のカラムから計算されたエイリアスカラムを使用することもできます。 -- カラムタイプは元のHiveテーブルのものと同じである必要があります。 -- パーティションによる式は元のHiveテーブルと一貫性を保ち、パーティションによる式のカラムはテーブル構造内に含まれている必要があります。 - -**エンジンパラメータ** - -- `thrift://host:port` — Hiveメタストアのアドレス - -- `database` — リモートデータベース名。 - -- `table` — リモートテーブル名。 - -## 使用例 {#usage-example} - -### HDFSファイルシステムのローカルキャッシュの使用方法 {#how-to-use-local-cache-for-hdfs-filesystem} - -リモートファイルシステムのローカルキャッシュを有効にすることを強くお勧めします。ベンチマークによると、キャッシュを使用した場合、ほぼ2倍の速度向上が見られます。 - -キャッシュを使用する前に、`config.xml`に追加してください。 -```xml - - true - local_cache - 559096952 - 1048576 - -``` - -- enable: trueの場合、ClickHouseは起動後にリモートファイルシステム(HDFS)のローカルキャッシュを維持します。 -- root_dir: 必須。リモートファイルシステム用のローカルキャッシュファイルを保存するルートディレクトリ。 -- limit_size: 必須。ローカルキャッシュファイルの最大サイズ(バイト単位)。 -- bytes_read_before_flush: リモートファイルシステムからファイルをダウンロードする際にローカルファイルシステムにフラッシュする前のバイト数を制御します。デフォルト値は1MBです。 - -### ORC入力フォーマットでHiveテーブルにクエリを実行する {#query-hive-table-with-orc-input-format} - -#### Hiveでのテーブル作成 {#create-table-in-hive} - -```text -hive > CREATE TABLE `test`.`test_orc`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, - `f_array_array_float` array>) -PARTITIONED BY ( - `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' -STORED AS INPUTFORMAT - 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' -OUTPUTFORMAT - 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' -LOCATION - 'hdfs://testcluster/data/hive/test.db/test_orc' - -OK -Time taken: 0.51 seconds - -hive > insert into test.test_orc partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44))); -OK -Time taken: 36.025 seconds - -hive > select * from test.test_orc; -OK -1 2 3 4 5 6.11 7.22 8 2021-11-05 12:38:16.314 2021-11-05 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18 -Time taken: 0.295 seconds, Fetched: 1 row(s) -``` - -#### ClickHouseでのテーブル作成 {#create-table-in-clickhouse} - -ClickHouseでのテーブル、上記で作成したHiveテーブルからデータを取得: -```sql -CREATE TABLE test.test_orc -( - `f_tinyint` Int8, - `f_smallint` Int16, - `f_int` Int32, - `f_integer` Int32, - `f_bigint` Int64, - `f_float` Float32, - `f_double` Float64, - `f_decimal` Float64, - `f_timestamp` DateTime, - `f_date` Date, - `f_string` String, - `f_varchar` String, - `f_bool` Bool, - `f_binary` String, - `f_array_int` Array(Int32), - `f_array_string` Array(String), - `f_array_float` Array(Float32), - `f_array_array_int` Array(Array(Int32)), - `f_array_array_string` Array(Array(String)), - `f_array_array_float` Array(Array(Float32)), - `day` String -) -ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc') -PARTITION BY day - -``` - -```sql -SELECT * FROM test.test_orc settings input_format_orc_allow_missing_columns = 1\G -``` - -```text -SELECT * -FROM test.test_orc -SETTINGS input_format_orc_allow_missing_columns = 1 - -Query id: c3eaffdc-78ab-43cd-96a4-4acc5b480658 - -Row 1: -────── -f_tinyint: 1 -f_smallint: 2 -f_int: 3 -f_integer: 4 -f_bigint: 5 -f_float: 6.11 -f_double: 7.22 -f_decimal: 8 -f_timestamp: 2021-12-04 04:00:44 -f_date: 2021-12-03 -f_string: hello world -f_varchar: hello world -f_bool: true -f_binary: hello world -f_array_int: [1,2,3] -f_array_string: ['hello world','hello world'] -f_array_float: [1.1,1.2] -f_array_array_int: [[1,2],[3,4]] -f_array_array_string: [['a','b'],['c','d']] -f_array_array_float: [[1.11,2.22],[3.33,4.44]] -day: 2021-09-18 - - -1 rows in set. Elapsed: 0.078 sec. -``` - -### Parquet入力フォーマットでHiveテーブルにクエリを実行する {#query-hive-table-with-parquet-input-format} - -#### Hiveでのテーブル作成 {#create-table-in-hive-1} - -```text -hive > -CREATE TABLE `test`.`test_parquet`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_char` char(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, - `f_array_array_float` array>) -PARTITIONED BY ( - `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' -STORED AS INPUTFORMAT - 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' -OUTPUTFORMAT - 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' -LOCATION - 'hdfs://testcluster/data/hive/test.db/test_parquet' -OK -Time taken: 0.51 seconds - -hive > insert into test.test_parquet partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44))); -OK -Time taken: 36.025 seconds - -hive > select * from test.test_parquet; -OK -1 2 3 4 5 6.11 7.22 8 2021-12-14 17:54:56.743 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18 -Time taken: 0.766 seconds, Fetched: 1 row(s) -``` - -#### ClickHouseでのテーブル作成 {#create-table-in-clickhouse-1} - -ClickHouseでのテーブル、上記で作成したHiveテーブルからデータを取得: -```sql -CREATE TABLE test.test_parquet -( - `f_tinyint` Int8, - `f_smallint` Int16, - `f_int` Int32, - `f_integer` Int32, - `f_bigint` Int64, - `f_float` Float32, - `f_double` Float64, - `f_decimal` Float64, - `f_timestamp` DateTime, - `f_date` Date, - `f_string` String, - `f_varchar` String, - `f_char` String, - `f_bool` Bool, - `f_binary` String, - `f_array_int` Array(Int32), - `f_array_string` Array(String), - `f_array_float` Array(Float32), - `f_array_array_int` Array(Array(Int32)), - `f_array_array_string` Array(Array(String)), - `f_array_array_float` Array(Array(Float32)), - `day` String -) -ENGINE = Hive('thrift://localhost:9083', 'test', 'test_parquet') -PARTITION BY day -``` - -```sql -SELECT * FROM test.test_parquet settings input_format_parquet_allow_missing_columns = 1\G -``` - -```text -SELECT * -FROM test_parquet -SETTINGS input_format_parquet_allow_missing_columns = 1 - -Query id: 4e35cf02-c7b2-430d-9b81-16f438e5fca9 - -Row 1: -────── -f_tinyint: 1 -f_smallint: 2 -f_int: 3 -f_integer: 4 -f_bigint: 5 -f_float: 6.11 -f_double: 7.22 -f_decimal: 8 -f_timestamp: 2021-12-14 17:54:56 -f_date: 2021-12-14 -f_string: hello world -f_varchar: hello world -f_char: hello world -f_bool: true -f_binary: hello world -f_array_int: [1,2,3] -f_array_string: ['hello world','hello world'] -f_array_float: [1.1,1.2] -f_array_array_int: [[1,2],[3,4]] -f_array_array_string: [['a','b'],['c','d']] -f_array_array_float: [[1.11,2.22],[3.33,4.44]] -day: 2021-09-18 - -1 rows in set. Elapsed: 0.357 sec. -``` - -### テキスト入力フォーマットでHiveテーブルにクエリを実行する {#query-hive-table-with-text-input-format} - -#### Hiveでのテーブル作成 {#create-table-in-hive-2} - -```text -hive > -CREATE TABLE `test`.`test_text`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_char` char(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, - `f_array_array_float` array>) -PARTITIONED BY ( - `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' -STORED AS INPUTFORMAT - 'org.apache.hadoop.mapred.TextInputFormat' -OUTPUTFORMAT - 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -LOCATION - 'hdfs://testcluster/data/hive/test.db/test_text' -Time taken: 0.1 seconds, Fetched: 34 row(s) - - -hive > insert into test.test_text partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44))); -OK -Time taken: 36.025 seconds - -hive > select * from test.test_text; -OK -1 2 3 4 5 6.11 7.22 8 2021-12-14 18:11:17.239 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18 -Time taken: 0.624 seconds, Fetched: 1 row(s) -``` - -#### ClickHouseでのテーブル作成 {#create-table-in-clickhouse-2} - -ClickHouseでのテーブル、上記で作成したHiveテーブルからデータを取得: -```sql -CREATE TABLE test.test_text -( - `f_tinyint` Int8, - `f_smallint` Int16, - `f_int` Int32, - `f_integer` Int32, - `f_bigint` Int64, - `f_float` Float32, - `f_double` Float64, - `f_decimal` Float64, - `f_timestamp` DateTime, - `f_date` Date, - `f_string` String, - `f_varchar` String, - `f_char` String, - `f_bool` Bool, - `day` String -) -ENGINE = Hive('thrift://localhost:9083', 'test', 'test_text') -PARTITION BY day -``` - -```sql -SELECT * FROM test.test_text settings input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort'\G -``` - -```text -SELECT * -FROM test.test_text -SETTINGS input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort' - -Query id: 55b79d35-56de-45b9-8be6-57282fbf1f44 - -Row 1: -────── -f_tinyint: 1 -f_smallint: 2 -f_int: 3 -f_integer: 4 -f_bigint: 5 -f_float: 6.11 -f_double: 7.22 -f_decimal: 8 -f_timestamp: 2021-12-14 18:11:17 -f_date: 2021-12-14 -f_string: hello world -f_varchar: hello world -f_char: hello world -f_bool: true -day: 2021-09-18 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md.hash deleted file mode 100644 index 16e338cacbf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md.hash +++ /dev/null @@ -1 +0,0 @@ -e3d200d462b96b40 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md deleted file mode 100644 index e78fbea7910..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: 'このエンジンは、Amazon S3内の既存の Apache Hudi テーブルとの読み取り専用統合を提供します。' -sidebar_label: 'Hudi' -sidebar_position: 86 -slug: '/engines/table-engines/integrations/hudi' -title: 'Hudi テーブルエンジン' ---- - - - - -# Hudi テーブルエンジン - -このエンジンは、Amazon S3 上の既存の Apache [Hudi](https://hudi.apache.org/) テーブルとの読み取り専用の統合を提供します。 - -## テーブルの作成 {#create-table} - -Hudi テーブルはすでに S3 に存在する必要があります。このコマンドは新しいテーブルを作成するための DDL パラメーターを受け取りません。 - -```sql -CREATE TABLE hudi_table - ENGINE = Hudi(url, [aws_access_key_id, aws_secret_access_key,]) -``` - -**エンジンパラメーター** - -- `url` — 既存の Hudi テーブルへのパスを含むバケット URL。 -- `aws_access_key_id`, `aws_secret_access_key` - [AWS](https://aws.amazon.com/) アカウントユーザーの長期認証情報。これらを使用してリクエストを認証できます。このパラメーターはオプションです。認証情報が指定されていない場合は、設定ファイルから使用されます。 - -エンジンパラメーターは [Named Collections](/operations/named-collections.md) を使用して指定できます。 - -**例** - -```sql -CREATE TABLE hudi_table ENGINE=Hudi('http://mars-doc-test.s3.amazonaws.com/clickhouse-bucket-3/test_table/', 'ABC123', 'Abc+123') -``` - -名付けられたコレクションを使用する場合: - -```xml - - - - http://mars-doc-test.s3.amazonaws.com/clickhouse-bucket-3/ - ABC123 - Abc+123 - - - -``` - -```sql -CREATE TABLE hudi_table ENGINE=Hudi(hudi_conf, filename = 'test_table') -``` - -## 関連項目 {#see-also} - -- [hudi テーブル関数](/sql-reference/table-functions/hudi.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md.hash deleted file mode 100644 index e01b1cb1936..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md.hash +++ /dev/null @@ -1 +0,0 @@ -20d81132e0861552 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md deleted file mode 100644 index 55a0f14116c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -description: 'This engine provides a read-only integration with existing Apache - Iceberg tables in Amazon S3, Azure, HDFS and locally stored tables.' -sidebar_label: 'Iceberg' -sidebar_position: 90 -slug: '/engines/table-engines/integrations/iceberg' -title: 'Iceberg Table Engine' ---- - - - - -# Iceberg テーブルエンジン {#iceberg-table-engine} - -:::warning -ClickHouseでIcebergデータを扱うためには、[Iceberg テーブル関数](/sql-reference/table-functions/iceberg.md)の使用を推奨します。Iceberg テーブル関数は現在、Iceberg テーブルに対して部分的な読み取り専用インターフェースを提供する十分な機能を備えています。 - -Iceberg テーブルエンジンは利用可能ですが、制限がある場合があります。ClickHouseは元々、外部で変更されるスキーマを持つテーブルをサポートするように設計されていないため、Iceberg テーブルエンジンの機能に影響を与える可能性があります。その結果、通常のテーブルで動作する機能の一部が利用できないか、正しく機能しない場合があります。特に古いアナライザーを使用している場合です。 - -最適な互換性のために、Iceberg テーブルエンジンのサポートを改善し続ける間、Iceberg テーブル関数の使用をお勧めします。 -::: - -このエンジンは、Amazon S3、Azure、HDFS、およびローカルに保存されたテーブルにある既存のApache [Iceberg](https://iceberg.apache.org/) テーブルとの読み取り専用統合を提供します。 - -## テーブル作成 {#create-table} - -Icebergテーブルはストレージ内に既に存在している必要があります。このコマンドは新しいテーブルを作成するためのDDLパラメータを取らないことに注意してください。 - -```sql -CREATE TABLE iceberg_table_s3 - ENGINE = IcebergS3(url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression]) - -CREATE TABLE iceberg_table_azure - ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression]) - -CREATE TABLE iceberg_table_hdfs - ENGINE = IcebergHDFS(path_to_table, [,format] [,compression_method]) - -CREATE TABLE iceberg_table_local - ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method]) -``` - -## エンジン引数 {#engine-arguments} - -引数の説明は、エンジン `S3`、`AzureBlobStorage`、`HDFS` および `File` の引数の説明と一致します。 -`format` はIcebergテーブルのデータファイルのフォーマットを表します。 - -エンジンパラメータは、[Named Collections](../../../operations/named-collections.md)を使用して指定できます。 - -### 例 {#example} - -```sql -CREATE TABLE iceberg_table ENGINE=IcebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test') -``` - -名前付きコレクションを使用する場合: - -```xml - - - - http://test.s3.amazonaws.com/clickhouse-bucket/ - test - test - - - -``` - -```sql -CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table') - -``` - -## エイリアス {#aliases} - -テーブルエンジン `Iceberg` は現時点で `IcebergS3` のエイリアスです。 - -## スキーマ進化 {#schema-evolution} -現在、CHを使用すると、時間とともにスキーマが変更されたIcebergテーブルを読み取ることができます。現在、列の追加や削除、列の順序変更が行われたテーブルの読み取りをサポートしています。また、値が必須のカラムをNULLを許可するカラムに変更することも可能です。さらに、次の単純型に対する型キャストをサポートしています:   -* int -> long -* float -> double -* decimal(P, S) -> decimal(P', S) ただし P' > P。 - -現在、ネストされた構造や配列およびマップ内の要素の型を変更することはできません。 - -スキーマが作成後に変更されたテーブルを動的スキーマ推論で読み取るには、テーブルの作成時に `allow_dynamic_metadata_for_data_lakes = true` を設定します。 - -## パーティションプルーニング {#partition-pruning} - -ClickHouseはIcebergテーブルに対するSELECTクエリ中にパーティションプルーニングをサポートしており、これにより無関係なデータファイルをスキップすることでクエリパフォーマンスを最適化します。パーティションプルーニングを有効にするには、 `use_iceberg_partition_pruning = 1` を設定します。Icebergパーティションプルーニングの詳細については、https://iceberg.apache.org/spec/#partitioningにアクセスしてください。 - -## タイムトラベル {#time-travel} - -ClickHouseはIcebergテーブルに対するタイムトラベルをサポートしており、特定のタイムスタンプまたはスナップショットIDを使用して過去のデータをクエリすることができます。 - -### 基本的な使い方 {#basic-usage} - ```sql - SELECT * FROM example_table ORDER BY 1 - SETTINGS iceberg_timestamp_ms = 1714636800000 - ``` - - ```sql - SELECT * FROM example_table ORDER BY 1 - SETTINGS iceberg_snapshot_id = 3547395809148285433 - ``` - -注意:同一のクエリで `iceberg_timestamp_ms` と `iceberg_snapshot_id` の両方のパラメータを指定することはできません。 - -### 重要な考慮事項 {#important-considerations} - -- **スナップショット** は通常、以下のときに作成されます: - - テーブルに新しいデータが書き込まれるとき - - 何らかのデータ圧縮が行われるとき - -- **スキーマの変更は通常スナップショットを作成しません** - これは、スキーマ進化が行われたテーブルでタイムトラベルを使用するときに重要な挙動につながります。 - -### 例となるシナリオ {#example-scenarios} - -すべてのシナリオはSparkで記述されています。ClickHouseは現在Icebergテーブルへの書き込みをサポートしていないためです。 - -#### シナリオ 1: 新しいスナップショットなしのスキーマ変更 {#scenario-1} - -以下の操作のシーケンスを考えます: - - ```sql - -- 2つの列を持つテーブルを作成 - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( - order_number int, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2') - --- テーブルにデータを挿入 - INSERT INTO spark_catalog.db.time_travel_example VALUES - (1, 'Mars') - - ts1 = now() // 擬似コードの一部 - --- テーブルを変更して新しい列を追加 - ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) - - ts2 = now() - --- テーブルにデータを挿入 - INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) - - ts3 = now() - --- 各タイムスタンプでテーブルをクエリ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; - -+------------+------------+ -|order_number|product_code| -+------------+------------+ -| 1| Mars| -+------------+------------+ - - - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; - -+------------+------------+ -|order_number|product_code| -+------------+------------+ -| 1| Mars| -+------------+------------+ - - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; - -+------------+------------+-----+ -|order_number|product_code|price| -+------------+------------+-----+ -| 1| Mars| NULL| -| 2| Venus|100.0| -+------------+------------+-----+ -``` - -異なるタイムスタンプでのクエリ結果: - -- ts1 と ts2 では、オリジナルの2つの列のみが表示されます。 -- ts3では、すべての3つの列が表示され、最初の行の価格はNULLになります。 - -#### シナリオ 2: 過去のスキーマと現在のスキーマの違い {#scenario-2} - - -現在の瞬間でのタイムトラベルクエリは、現在のテーブルとは異なるスキーマを示す場合があります: - -```sql --- テーブルを作成 - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example_2 ( - order_number int, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2') - --- テーブルに初期データを挿入 - INSERT INTO spark_catalog.db.time_travel_example_2 VALUES (2, 'Venus'); - --- テーブルを変更して新しい列を追加 - ALTER TABLE spark_catalog.db.time_travel_example_2 ADD COLUMN (price double); - - ts = now(); - --- 現在の瞬間のテーブルをクエリしますが、タイムスタンプ構文を使用します - - SELECT * FROM spark_catalog.db.time_travel_example_2 TIMESTAMP AS OF ts; - - +------------+------------+ - |order_number|product_code| - +------------+------------+ - | 2| Venus| - +------------+------------+ - --- 現在の瞬間のテーブルをクエリします - SELECT * FROM spark_catalog.db.time_travel_example_2; - - - +------------+------------+-----+ - |order_number|product_code|price| - +------------+------------+-----+ - | 2| Venus| NULL| - +------------+------------+-----+ -``` - -これは、`ALTER TABLE` が新しいスナップショットを作成しないために発生しますが、現在のテーブルに対してSparkは最新のメタデータファイルから `schema_id` の値を取得するためです。 - -#### シナリオ 3: 過去のスキーマと現在のスキーマの違い {#scenario-3} - -もう一つは、タイムトラベルを行っているときに、任意のデータが書き込まれる前のテーブルの状態を取得できないことです: - -```sql --- テーブルを作成 - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example_3 ( - order_number int, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2'); - - ts = now(); - --- 特定のタイムスタンプでテーブルをクエリ - SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- エラー: tsより古いスナップショットが見つかりません。 -``` - - -Clickhouseの動作はSparkと一貫しています。SparkのSelectクエリをClickhouseのSelectクエリに置き換えることができ、同じように機能します。 - -## メタデータファイルの解決 {#metadata-file-resolution} -ClickHouseで`Iceberg`テーブルエンジンを使用する際、システムはIcebergテーブルの構造を記述した正しいmetadata.jsonファイルを見つける必要があります。この解決プロセスの仕組みは次のとおりです。 - -### 候補の検索(優先順) {#candidate-search} - -1. **直接パスの指定**: - * `iceberg_metadata_file_path` を設定すると、システムはこの正確なパスをIcebergテーブルのディレクトリパスと組み合わせて使用します。 - * この設定が提供されると、他の解決設定は無視されます。 - -2. **テーブルUUIDの一致**: - * `iceberg_metadata_table_uuid` が指定されている場合、システムは: - * `metadata` ディレクトリ内の `.metadata.json` ファイルのみを調べます。 - * 指定したUUIDと一致する `table-uuid` フィールドを含むファイルをフィルタリングします(大文字と小文字を区別しません)。 - -3. **デフォルトの検索**: - * 上記の設定がいずれも提供されていない場合、`metadata` ディレクトリ内のすべての `.metadata.json` ファイルが候補になります。 - -### 最新のファイルの選択 {#most-recent-file} - -上記の規則を使用して候補ファイルを特定した後、システムは最も新しいファイルを決定します。 - -* `iceberg_recent_metadata_file_by_last_updated_ms_field` が有効な場合: - * `last-updated-ms` 値が最大のファイルが選択されます。 - -* それ以外の場合: - * バージョン番号が最も高いファイルが選択されます。 - * (バージョンは、 `V.metadata.json` または `V-uuid.metadata.json` という形式のファイル名に `V` として表示されます。) - -**注**: 上記に言及したすべての設定はエンジンレベルの設定であり、テーブルの作成時に以下のように指定する必要があります: - -```sql -CREATE TABLE example_table ENGINE = Iceberg( - 's3://bucket/path/to/iceberg_table' -) SETTINGS iceberg_metadata_table_uuid = '6f6f6407-c6a5-465f-a808-ea8900e35a38'; -``` - -**注**: Icebergカタログは通常、メタデータ解決を処理しますが、ClickHouseの `Iceberg` テーブルエンジンは S3 に保存されたファイルを直接 Iceberg テーブルとして解釈します。これが、これらの解決ルールを理解することが重要な理由です。 - -## データキャッシュ {#data-cache} - -`Iceberg` テーブルエンジンおよびテーブル関数は、 `S3`、`AzureBlobStorage`、`HDFS` ストレージと同様にデータキャッシングをサポートしています。詳しくは[こちら](../../../engines/table-engines/integrations/s3.md#data-cache)。 - -## メタデータキャッシュ {#metadata-cache} - -`Iceberg` テーブルエンジンおよびテーブル関数は、マニフェストファイル、マニフェストリスト、メタデータjsonの情報を保存するメタデータキャッシュをサポートしています。キャッシュはメモリ内に保存されます。この機能は `use_iceberg_metadata_files_cache` を設定することで制御されており、デフォルトで有効になっています。 - -## 参照 {#see-also} - -- [iceberg テーブル関数](/sql-reference/table-functions/iceberg.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md.hash deleted file mode 100644 index 969e6f40954..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/iceberg.md.hash +++ /dev/null @@ -1 +0,0 @@ -4d90ce3130afc6bf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md deleted file mode 100644 index 4a7c43e71eb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: 'Documentation for Table Engines for Integrations' -sidebar_label: 'Integrations' -sidebar_position: 40 -slug: '/engines/table-engines/integrations/' -title: 'Table Engines for Integrations' ---- - - - - -# Table Engines for Integrations - -ClickHouseは、テーブルエンジンを含む外部システムとの統合手段を提供します。他のテーブルエンジンと同様に、設定は`CREATE TABLE`または`ALTER TABLE`クエリを使用して行われます。そして、ユーザーの視点から見ると、設定された統合は通常のテーブルのように見えますが、それに対するクエリは外部システムにプロキシされます。この透過的なクエリ処理は、毎回カスタムクエリメソッドの使用を必要とする辞書やテーブル関数などの代替統合方法に対する、このアプローチの主な利点の一つです。 - - -| ページ | 説明 | -|-----|-----| -| [Kafka](/engines/table-engines/integrations/kafka) | KafkaエンジンはApache Kafkaと連携し、データフローの公開や購読、フォールトトレラントストレージの整理、および利用可能になるストリームの処理を可能にします。 | -| [Iceberg Table Engine](/engines/table-engines/integrations/iceberg) | このエンジンは、Amazon S3、Azure、HDFSにある既存のApache Icebergテーブルとのリードオンリー統合を提供します。 | -| [RabbitMQ Engine](/engines/table-engines/integrations/rabbitmq) | このエンジンは、ClickHouseとRabbitMQの統合を可能にします。 | -| [EmbeddedRocksDB Engine](/engines/table-engines/integrations/embedded-rocksdb) | このエンジンは、ClickHouseとRocksDBの統合を可能にします。 | -| [Hive](/engines/table-engines/integrations/hive) | HiveエンジンはHDFS Hiveテーブルに対して`SELECT`クエリを実行できるようにします。 | -| [Hudi Table Engine](/engines/table-engines/integrations/hudi) | このエンジンは、Amazon S3にある既存のApache Hudiテーブルとのリードオンリー統合を提供します。 | -| [Redis](/engines/table-engines/integrations/redis) | このエンジンは、ClickHouseとRedisの統合を可能にします。 | -| [The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server.](/engines/table-engines/integrations/mysql) | MySQLテーブルエンジンのドキュメント | -| [MaterializedPostgreSQL](/engines/table-engines/integrations/materialized-postgresql) | PostgreSQLテーブルの初期データダンプを持つClickHouseテーブルを作成し、レプリケーションプロセスを開始します。 | -| [S3 Table Engine](/engines/table-engines/integrations/s3) | このエンジンは、Amazon S3エコシステムとの統合を提供します。HDFSエンジンに類似していますが、S3固有の機能を提供します。 | -| [HDFS](/engines/table-engines/integrations/hdfs) | このエンジンは、ClickHouseを介してHDFSのデータを管理できるようにし、Apache Hadoopエコシステムとの統合を提供します。このエンジンは、ファイルおよびURLエンジンに似ていますが、Hadoop固有の機能を提供します。 | -| [ExternalDistributed](/engines/table-engines/integrations/ExternalDistributed) | `ExternalDistributed`エンジンは、リモートサーバーのMySQLまたはPostgreSQLに保存されたデータに対して`SELECT`クエリを実行することを可能にします。MySQLまたはPostgreSQLエンジンを引数として受け入れ、シャーディングが可能です。 | -| [DeltaLake Table Engine](/engines/table-engines/integrations/deltalake) | このエンジンは、Amazon S3にある既存のDelta Lakeテーブルとのリードオンリー統合を提供します。 | -| [PostgreSQL Table Engine](/engines/table-engines/integrations/postgresql) | PostgreSQLエンジンは、リモートPostgreSQLサーバーに保存されたデータに対して`SELECT`および`INSERT`クエリを実行できるようにします。 | -| [AzureBlobStorage Table Engine](/engines/table-engines/integrations/azureBlobStorage) | このエンジンは、Azure Blob Storageエコシステムとの統合を提供します。 | -| [ODBC](/engines/table-engines/integrations/odbc) | ClickHouseがODBCを介して外部データベースに接続できるようにします。 | -| [JDBC](/engines/table-engines/integrations/jdbc) | ClickHouseがJDBCを介して外部データベースに接続できるようにします。 | -| [NATS Engine](/engines/table-engines/integrations/nats) | このエンジンは、ClickHouseとNATSを統合し、メッセージのサブジェクトを公開または購読でき、新しいメッセージが利用可能になるとそれを処理できるようにします。 | -| [SQLite](/engines/table-engines/integrations/sqlite) | このエンジンは、SQLiteへのデータのインポートとエクスポートを可能にし、ClickHouseからSQLiteテーブルへのクエリを直接サポートします。 | -| [S3Queue Table Engine](/engines/table-engines/integrations/s3queue) | このエンジンは、Amazon S3エコシステムとの統合を提供し、ストリーミングインポートを可能にします。KafkaおよびRabbitMQエンジンに似ていますが、S3固有の機能を提供します。 | -| [AzureQueue Table Engine](/engines/table-engines/integrations/azure-queue) | このエンジンは、Azure Blob Storageエコシステムとの統合を提供し、ストリーミングデータのインポートを可能にします。 | -| [TimeSeries Engine](/engines/table-engines/special/time_series) | タイムスタンプとタグ(またはラベル)に関連付けられた値のセットを持つ時系列を保存するテーブルエンジンです。 | -| [MongoDB](/engines/table-engines/integrations/mongodb) | MongoDBエンジンはリードオンリーのテーブルエンジンで、リモートコレクションからデータを読み取ることができます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md.hash deleted file mode 100644 index c818f1d461e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -78e42d796bdfc86a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md deleted file mode 100644 index fd3ec8abac4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -description: 'Allows ClickHouse to connect to external databases via JDBC.' -sidebar_label: 'JDBC' -sidebar_position: 100 -slug: '/engines/table-engines/integrations/jdbc' -title: 'JDBC' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# JDBC - - - -:::note -clickhouse-jdbc-bridge には実験的なコードが含まれており、もはやサポートされていません。信頼性の問題やセキュリティの脆弱性が含まれている可能性があります。自己の責任で使用してください。 -ClickHouseは、アドホッククエリシナリオに対してより良い代替手段を提供する、ClickHouse内の組み込みテーブル関数の使用を推奨しています(Postgres、MySQL、MongoDBなど)。 -::: - -ClickHouseが外部データベースに[ JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity)を介して接続できるようにします。 - -JDBC接続を実装するために、ClickHouseはデーモンとして実行する必要がある別のプログラム[clickhouse-jdbc-bridge](https://github.com/ClickHouse/clickhouse-jdbc-bridge)を使用します。 - -このエンジンは[Nullable](../../../sql-reference/data-types/nullable.md)データ型をサポートしています。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name -( - columns list... -) -ENGINE = JDBC(datasource_uri, external_database, external_table) -``` - -**エンジンパラメータ** - - -- `datasource_uri` — 外部DBMSのURIまたは名前。 - - URI形式: `jdbc:://:/?user=&password=`。 - MySQLの例: `jdbc:mysql://localhost:3306/?user=root&password=root`。 - -- `external_database` — 外部DBMS内のデータベース。 - -- `external_table` — `external_database`内のテーブル名、または`select * from table1 where column1=1`のような選択クエリ。 - -## 使用例 {#usage-example} - -MySQLサーバーにおいて、コンソールクライアントを介して直接テーブルを作成します: - -```text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -ClickHouseサーバーにテーブルを作成し、そこからデータを選択します: - -```sql -CREATE TABLE jdbc_table -( - `int_id` Int32, - `int_nullable` Nullable(Int32), - `float` Float32, - `float_nullable` Nullable(Float32) -) -ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') -``` - -```sql -SELECT * -FROM jdbc_table -``` - -```text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -```sql -INSERT INTO jdbc_table(`int_id`, `float`) -SELECT toInt32(number), toFloat32(number * 1.0) -FROM system.numbers -``` - -## 参照 {#see-also} - -- [JDBCテーブル関数](../../../sql-reference/table-functions/jdbc.md)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md.hash deleted file mode 100644 index 37f308eef35..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/jdbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -bd04b495a90d6683 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md deleted file mode 100644 index df007366f38..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md +++ /dev/null @@ -1,319 +0,0 @@ ---- -description: 'The Kafka engine works with Apache Kafka and lets you publish or subscribe - to data flows, organize fault-tolerant storage, and process streams as they become - available.' -sidebar_label: 'Kafka' -sidebar_position: 110 -slug: '/engines/table-engines/integrations/kafka' -title: 'Kafka' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - -# Kafka - - - -:::note -ClickHouse Cloud ユーザーには、[ClickPipes](/integrations/clickpipes) を使用して Kafka データを ClickHouse にストリーミングすることを推奨します。これは、高パフォーマンスの挿入をネイティブにサポートし、取り込みとクラスターリソースを独立してスケーリングできるように、関心の分離を保証します。 -::: - -このエンジンは [Apache Kafka](http://kafka.apache.org/) で動作します。 - -Kafka では以下が可能です: - -- データフローの発行または購読。 -- 障害耐性のあるストレージの整理。 -- 利用可能になったストリームの処理。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [ALIAS expr1], - name2 [type2] [ALIAS expr2], - ... -) ENGINE = Kafka() -SETTINGS - kafka_broker_list = 'host:port', - kafka_topic_list = 'topic1,topic2,...', - kafka_group_name = 'group_name', - kafka_format = 'data_format'[,] - [kafka_security_protocol = '',] - [kafka_sasl_mechanism = '',] - [kafka_sasl_username = '',] - [kafka_sasl_password = '',] - [kafka_schema = '',] - [kafka_num_consumers = N,] - [kafka_max_block_size = 0,] - [kafka_skip_broken_messages = N,] - [kafka_commit_every_batch = 0,] - [kafka_client_id = '',] - [kafka_poll_timeout_ms = 0,] - [kafka_poll_max_batch_size = 0,] - [kafka_flush_interval_ms = 0,] - [kafka_thread_per_consumer = 0,] - [kafka_handle_error_mode = 'default',] - [kafka_commit_on_select = false,] - [kafka_max_rows_per_message = 1]; -``` - -必須パラメーター: - -- `kafka_broker_list` — ブローカーのカンマ区切りリスト(例えば、`localhost:9092`)。 -- `kafka_topic_list` — Kafka トピックのリスト。 -- `kafka_group_name` — Kafka コンシューマーのグループ。読み取りマージンは各グループごとに個別に追跡されます。クラスターでメッセージが重複しないようにするには、どこでも同じグループ名を使用してください。 -- `kafka_format` — メッセージフォーマット。SQL の `FORMAT` 関数と同じ表記を使用します。例:`JSONEachRow`。詳細については、[Formats](../../../interfaces/formats.md) セクションを参照してください。 - -オプションのパラメーター: - -- `kafka_security_protocol` - ブローカーとの通信に使用されるプロトコル。可能な値:`plaintext`、`ssl`、`sasl_plaintext`、`sasl_ssl`。 -- `kafka_sasl_mechanism` - 認証に使用する SASL メカニズム。可能な値:`GSSAPI`、`PLAIN`、`SCRAM-SHA-256`、`SCRAM-SHA-512`、`OAUTHBEARER`。 -- `kafka_sasl_username` - `PLAIN` および `SASL-SCRAM-..` メカニズムで使用する SASL ユーザー名。 -- `kafka_sasl_password` - `PLAIN` および `SASL-SCRAM-..` メカニズムで使用する SASL パスワード。 -- `kafka_schema` — フォーマットがスキーマ定義を必要とする場合に使用する必要があるパラメーター。たとえば、[Cap'n Proto](https://capnproto.org/) では、スキーマファイルのパスとルート `schema.capnp:Message` オブジェクトの名前を要求します。 -- `kafka_num_consumers` — テーブルごとのコンシューマーの数。1 つのコンシューマーのスループットが不十分な場合は、より多くのコンシューマーを指定してください。全コンシューマーの数はトピック内のパーティションの数を超えてはいけません。なぜなら、1 つのパーティションには 1 つのコンシューマーのみを割り当てることができ、ClickHouse がデプロイされているサーバーの物理コア数を超えてはいけないからです。デフォルト:`1`。 -- `kafka_max_block_size` — ポーリングのための最大バッチサイズ(メッセージ単位)。デフォルト:[max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size)。 -- `kafka_skip_broken_messages` — スキーマと互換性のないメッセージごとの Kafka メッセージパーサーの耐性。`kafka_skip_broken_messages = N` の場合、エンジンはパースできない *N* の Kafka メッセージをスキップします(メッセージはデータの行に等しい)。デフォルト:`0`。 -- `kafka_commit_every_batch` — すべての消費されたおよび処理されたバッチをコミットし、全ブロックを書き込んだ後の単一コミットを避けます。デフォルト:`0`。 -- `kafka_client_id` — クライアント識別子。デフォルトは空です。 -- `kafka_poll_timeout_ms` — Kafka からの単一ポーリングのタイムアウト。デフォルト:[stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms)。 -- `kafka_poll_max_batch_size` — 単一の Kafka ポーリングでポーリングされる最大メッセージ数。デフォルト:[max_block_size](/operations/settings/settings#max_block_size)。 -- `kafka_flush_interval_ms` — Kafka からのデータフラッシュのタイムアウト。デフォルト:[stream_flush_interval_ms](/operations/settings/settings#stream_flush_interval_ms)。 -- `kafka_thread_per_consumer` — 各コンシューマーに独立したスレッドを提供します。有効にすると、各コンシューマーは独立してデータをフラッシュし、並行して処理します(そうでなければ、いくつかのコンシューマーからの行が1つのブロックにまとめられます)。デフォルト:`0`。 -- `kafka_handle_error_mode` — Kafka エンジンのエラー処理方法。可能な値:デフォルト(メッセージのパースに失敗した場合は例外がスローされます)、ストリーム(例外メッセージと生のメッセージが仮想カラム `_error` と `_raw_message` に保存されます)。 -- `kafka_commit_on_select` — SELECT クエリが実行されたときにメッセージをコミットします。デフォルト:`false`。 -- `kafka_max_rows_per_message` — 行ベースのフォーマットの単一 Kafka メッセージで書き込まれる最大行数。デフォルト : `1`。 - -例: - -```sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - SELECT * FROM queue LIMIT 5; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', - kafka_topic_list = 'topic', - kafka_group_name = 'group1', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; - - CREATE TABLE queue3 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') - SETTINGS kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; -``` - -

- -テーブルを作成するための非推奨メソッド - -:::note -新しいプロジェクトではこの方法を使用しないでください。可能であれば、古いプロジェクトは上記の方法に移行してください。 -::: - -```sql -Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format - [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_max_block_size, kafka_skip_broken_messages, kafka_commit_every_batch, kafka_client_id, kafka_poll_timeout_ms, kafka_poll_max_batch_size, kafka_flush_interval_ms, kafka_thread_per_consumer, kafka_handle_error_mode, kafka_commit_on_select, kafka_max_rows_per_message]); -``` - -
- -:::info -Kafka テーブルエンジンは、[default value](/sql-reference/statements/create/table#default_values) を持つカラムをサポートしていません。デフォルト値を持つカラムが必要な場合は、マテリアライズドビューのレベルで追加できます(下記を参照)。 -::: - -## 説明 {#description} - -配信されたメッセージは自動的に追跡されるため、各グループの各メッセージは1 回だけカウントされます。データを二重に取得したい場合は、別のグループ名でテーブルのコピーを作成してください。 - -グループは柔軟で、クラスターで同期されています。たとえば、10 のトピックとクラスター内に 5 つのテーブルのコピーがある場合、各コピーは 2 つのトピックを取得します。コピーの数が変更されると、トピックは自動的にコピー間で再配分されます。このことについては、http://kafka.apache.org/intro で詳しく読むことができます。 - -`SELECT` は特にメッセージを読み取るためには便利ではありません(デバッグを除く)、なぜなら各メッセージは 1 回しか読み取れないからです。リアルタイムスレッドをマテリアライズドビューを使用して作成することがより実用的です。そのためには: - -1. エンジンを使用して Kafka コンシューマーを作成し、それをデータストリームと見なします。 -2. 必要な構造のテーブルを作成します。 -3. エンジンからデータを変換し、事前に作成されたテーブルに配置するマテリアライズドビューを作成します。 - -`MATERIALIZED VIEW` がエンジンに参加すると、バックグラウンドでデータの集計を開始します。これにより、Kafka からメッセージを継続的に受信し、`SELECT` を使用して必要なフォーマットに変換できます。 -1 つの Kafka テーブルには、好きなだけのマテリアライズドビューを持つことができ、これらは Kafka テーブルから直接データを読み取ることはなく、新しいレコード(ブロック単位)を受け取ります。この方法で、異なる詳細レベルで複数のテーブルに書き込むことができます(グルーピング - 集約ありおよびなし)。 - -例: - -```sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - CREATE TABLE daily ( - day Date, - level String, - total UInt64 - ) ENGINE = SummingMergeTree(day, (day, level), 8192); - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total - FROM queue GROUP BY day, level; - - SELECT level, sum(total) FROM daily GROUP BY level; -``` -パフォーマンスを向上させるために、受信したメッセージは [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size) のサイズのブロックにグループ化されます。ブロックが [stream_flush_interval_ms](/operations/settings/settings.md#stream_flush_interval_ms) ミリ秒以内に形成されなかった場合は、ブロックの完全性に関係なくデータがテーブルにフラッシュされます。 - -トピックデータの受信を停止するか、変換ロジックを変更するには、マテリアライズドビューを切り離します: - -```sql - DETACH TABLE consumer; - ATTACH TABLE consumer; -``` - -`ALTER` を使用してターゲットテーブルを変更する場合、ターゲットテーブルとビューからのデータ間の不一致を避けるために、マテリアルビューを無効にすることをお勧めします。 - -## 設定 {#configuration} - -GraphiteMergeTree と同様に、Kafka エンジンは ClickHouse 設定ファイルを使用した拡張設定をサポートしています。使用できる設定キーは、グローバル(`` の下)とトピックレベル(`` の下)の 2 つです。グローバル設定が最初に適用され、その後トピックレベルの設定が適用されます(存在する場合)。 - -```xml - - - cgrp - 3000 - - - logs - 4000 - - - - - smallest - - logs - 100000 - - - - stats - 50000 - - - - - - - logs - 250 - - - - stats - 400 - - - -``` - - -利用可能な設定オプションのリストについては、[librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) を参照してください。ClickHouse 設定では、ドットの代わりにアンダースコア(`_`)を使用します。たとえば、`check.crcs=true` は `true` になります。 - -### Kerberos サポート {#kafka-kerberos-support} - -Kerberos 対応 Kafka を扱うには、`sasl_plaintext` 値を持つ `security_protocol` 子要素を追加します。OS の機能によって Kerberos チケット授与チケットが取得され、キャッシュされていれば十分です。 -ClickHouse はキータブファイルを使用して Kerberos 資格情報を管理できます。`sasl_kerberos_service_name`、`sasl_kerberos_keytab` および `sasl_kerberos_principal` 子要素を考慮してください。 - -例: - -```xml - - - SASL_PLAINTEXT - /home/kafkauser/kafkauser.keytab - kafkauser/kafkahost@EXAMPLE.COM - -``` - -## 仮想カラム {#virtual-columns} - -- `_topic` — Kafka トピック。データ型:`LowCardinality(String)`。 -- `_key` — メッセージの鍵。データ型:`String`。 -- `_offset` — メッセージのオフセット。データ型:`UInt64`。 -- `_timestamp` — メッセージのタイムスタンプ データ型:`Nullable(DateTime)`。 -- `_timestamp_ms` — メッセージのミリ秒単位のタイムスタンプ。データ型:`Nullable(DateTime64(3))`。 -- `_partition` — Kafka トピックのパーティション。データ型:`UInt64`。 -- `_headers.name` — メッセージのヘッダーキーの配列。データ型:`Array(String)`。 -- `_headers.value` — メッセージのヘッダー値の配列。データ型:`Array(String)`。 - -`kafka_handle_error_mode='stream'` の場合の追加仮想カラム: - -- `_raw_message` - 正しく解析できなかった生メッセージ。データ型:`String`。 -- `_error` - 解析に失敗した際に発生した例外メッセージ。データ型:`String`。 - -注:`_raw_message` と `_error` の仮想カラムは、解析中の例外の場合にのみ埋められ、メッセージが正常に解析された場合は常に空です。 - -## データフォーマットのサポート {#data-formats-support} - -Kafka エンジンは、ClickHouse でサポートされているすべての [formats](../../../interfaces/formats.md) をサポートしています。 -1 つの Kafka メッセージの行数は、フォーマットが行ベースかブロックベースかによって異なります。 - -- 行ベースのフォーマットの場合、1 つの Kafka メッセージの行数は `kafka_max_rows_per_message` を設定して制御できます。 -- ブロックベースのフォーマットの場合、ブロックを小さな部分に分割することはできませんが、1 つのブロックの行数は一般設定 [max_block_size](/operations/settings/settings#max_block_size) で制御できます。 - -## 提出済みオフセットを ClickHouse Keeper に保存するためのエンジン {#engine-to-store-committed-offsets-in-clickhouse-keeper} - - - -`allow_experimental_kafka_offsets_storage_in_keeper` が有効になっている場合、Kafka テーブルエンジンには 2 つの設定を指定できます: - - `kafka_keeper_path` は、ClickHouse Keeper 内のテーブルのパスを指定します - - `kafka_replica_name` は、ClickHouse Keeper 内のレプリカ名を指定します - -どちらの設定も指定するか、どちらも指定しない必要があります。どちらの設定も指定された場合は、新しい実験的な Kafka エンジンが使用されます。この新しいエンジンは、コミットされたオフセットを Kafka に保存することに依存せず、ClickHouse Keeper に保存します。オフセットを Kafka にコミットしようとはしますが、テーブルが作成されるときにのみそのオフセットに依存します。他のすべての状況(テーブルが再起動されたり、エラーから回復された場合)では、ClickHouse Keeper に保存されたオフセットがメッセージの消費を続けるためのオフセットとして使用されます。コミットされたオフセットのほかに、最後のバッチで消費されたメッセージの数も保存されるので、挿入が失敗した場合には、必要に応じて同じ数のメッセージが消費され、重複排除が可能になります。 - -例: - -```sql -CREATE TABLE experimental_kafka (key UInt64, value UInt64) -ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') -SETTINGS - kafka_keeper_path = '/clickhouse/{database}/experimental_kafka', - kafka_replica_name = 'r1' -SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1; -``` - -または、`uuid` および `replica` マクロを ReplicatedMergeTree と同様に利用する: - -```sql -CREATE TABLE experimental_kafka (key UInt64, value UInt64) -ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') -SETTINGS - kafka_keeper_path = '/clickhouse/{database}/{uuid}', - kafka_replica_name = '{replica}' -SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1; -``` - -### 既知の制限 {#known-limitations} - -新しいエンジンは実験的であるため、まだ本番環境には対応していません。実装の既知の制限がいくつかあります: - - 最大の制限は、エンジンが直接読み取りをサポートしていないことです。マテリアライズドビューを使用してエンジンから読み取ることと、エンジンに書き込むことは機能しますが、直接読み取りは機能しません。その結果、すべての直接 `SELECT` クエリは失敗します。 - - テーブルを迅速に削除して再作成することや、異なるエンジンに同じ ClickHouse Keeper パスを指定することは問題を引き起こす可能性があります。ベストプラクティスとして、`kafka_keeper_path` に `{uuid}` を使用して衝突するパスを避けることができます。 - - 繰り返し可能な読み取りを行うには、メッセージを単一スレッドの複数パーティションから消費することはできません。これに対して、Kafka コンシューマーは定期的にポーリングして生存状態を維持する必要があります。これらの 2 つの目標の結果として、`kafka_thread_per_consumer` が有効な場合にのみ複数のコンシューマーの作成を許可することにしました。そうでなければ、コンシューマーを定期的にポーリングする際に問題を回避することが非常に複雑になります。 - - 新しいストレージエンジンによって作成されたコンシューマーは [`system.kafka_consumers`](../../../operations/system-tables/kafka_consumers.md) テーブルには表示されません。 - -**関連情報** - -- [仮想カラム](../../../engines/table-engines/index.md#table_engines-virtual_columns) -- [background_message_broker_schedule_pool_size](/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size) -- [system.kafka_consumers](../../../operations/system-tables/kafka_consumers.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md.hash deleted file mode 100644 index d34e779d678..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/kafka.md.hash +++ /dev/null @@ -1 +0,0 @@ -457cb34add0e2bf4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md deleted file mode 100644 index 281042714d8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -description: 'Creates a ClickHouse table with an initial data dump of a PostgreSQL - table and starts the replication process.' -sidebar_label: 'MaterializedPostgreSQL' -sidebar_position: 130 -slug: '/engines/table-engines/integrations/materialized-postgresql' -title: 'MaterializedPostgreSQL' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# MaterializedPostgreSQL - - - - -:::note -ClickHouse Cloud のユーザーは、PostgreSQL から ClickHouse へのレプリケーションには [ClickPipes](/integrations/clickpipes) の使用を推奨します。これは PostgreSQL に対する高性能なデータ変更キャプチャ (CDC) をネイティブにサポートしています。 -::: - -ClickHouse テーブルを PostgreSQL テーブルの初期データダンプで作成し、レプリケーションプロセスを開始します。つまり、リモートの PostgreSQL データベース内の PostgreSQL テーブルで新しい変更が行われるたびに適用するバックグラウンドジョブを実行します。 - -:::note -このテーブルエンジンは実験的です。使用するには、設定ファイルで `allow_experimental_materialized_postgresql_table` を 1 に設定するか、`SET` コマンドを使用してください: -```sql -SET allow_experimental_materialized_postgresql_table=1 -``` -::: - -複数のテーブルが必要な場合は、テーブルエンジンの代わりに [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) データベースエンジンを使用し、レプリケーションするテーブルを指定する `materialized_postgresql_tables_list` 設定を使用することを強く推奨します(データベースの `schema` を追加することも可能です)。これにより CPU 使用率が改善され、接続数やリモート PostgreSQL データベース内のレプリケーションスロット数が減少します。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE postgresql_db.postgresql_replica (key UInt64, value UInt64) -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_table', 'postgres_user', 'postgres_password') -PRIMARY KEY key; -``` - -**エンジンのパラメータ** - -- `host:port` — PostgreSQL サーバーのアドレス。 -- `database` — リモートデータベース名。 -- `table` — リモートテーブル名。 -- `user` — PostgreSQL ユーザー。 -- `password` — ユーザーパスワード。 - -## 要件 {#requirements} - -1. [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) 設定は `logical` に設定されている必要があり、`max_replication_slots` パラメータは PostgreSQL 設定ファイル内で少なくとも `2` に設定されている必要があります。 - -2. `MaterializedPostgreSQL` エンジンを持つテーブルは、PostgreSQL テーブルのレプリカアイデンティティインデックス(デフォルトでは:主キー)と同じ主キーを持たなければなりません([レプリカアイデンティティインデックスの詳細はこちら](../../../engines/database-engines/materialized-postgresql.md#requirements))。 - -3. データベースは [Atomic](https://en.wikipedia.org/wiki/Atomicity_(database_systems)) のみが許可されています。 - -4. `MaterializedPostgreSQL` テーブルエンジンは、[pg_replication_slot_advance](https://pgpedia.info/p/pg_replication_slot_advance.html) PostgreSQL 関数を必要とするため、PostgreSQL バージョン >= 11 のみで動作します。 - -## 仮想カラム {#virtual-columns} - -- `_version` — トランザクションカウンター。型: [UInt64](../../../sql-reference/data-types/int-uint.md)。 - -- `_sign` — 削除マーク。型: [Int8](../../../sql-reference/data-types/int-uint.md)。可能な値: - - `1` — 行は削除されていない、 - - `-1` — 行は削除されている。 - -これらのカラムはテーブル作成時に追加する必要はありません。常に `SELECT` クエリでアクセス可能です。 -`_version` カラムは `WAL` 内の `LSN` ポジションと等しいため、レプリケーションがどれほど最新かをチェックするために使用できます。 - -```sql -CREATE TABLE postgresql_db.postgresql_replica (key UInt64, value UInt64) -ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') -PRIMARY KEY key; - -SELECT key, value, _version FROM postgresql_db.postgresql_replica; -``` - -:::note -[**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) 値のレプリケーションはサポートされていません。データ型のデフォルト値が使用されます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md.hash deleted file mode 100644 index 7f4daf8e962..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/materialized-postgresql.md.hash +++ /dev/null @@ -1 +0,0 @@ -aae3422db871676e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md deleted file mode 100644 index c4848a75dfb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -description: 'MongoDB engine is read-only table engine which allows to read data - from a remote collection.' -sidebar_label: 'MongoDB' -sidebar_position: 135 -slug: '/engines/table-engines/integrations/mongodb' -title: 'MongoDB' ---- - - - - -# MongoDB - -MongoDBエンジンは、リモートの [MongoDB](https://www.mongodb.com/) コレクションからデータを読み取ることができる読み取り専用テーブルエンジンです。 - -MongoDB v3.6+ サーバーのみがサポートされています。 -[Seed list(`mongodb+srv`)](https://www.mongodb.com/docs/manual/reference/glossary/#std-term-seed-list) はまだサポートされていません。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name -( - name1 [type1], - name2 [type2], - ... -) ENGINE = MongoDB(host:port, database, collection, user, password[, options[, oid_columns]]); -``` - -**エンジンパラメータ** - -- `host:port` — MongoDBサーバーのアドレス。 - -- `database` — リモートデータベース名。 - -- `collection` — リモートコレクション名。 - -- `user` — MongoDBユーザー。 - -- `password` — ユーザーパスワード。 - -- `options` — MongoDB接続文字列オプション(オプションパラメータ)。 - -- `oid_columns` - WHERE句で`oid`として扱うべきカラムのカンマ区切りリスト。デフォルトは`_id`です。 - -:::tip -MongoDB Atlasクラウドオファリングを使用している場合、接続URLは「Atlas SQL」オプションから取得できます。 -Seed list(`mongodb**+srv**`) はまだサポートされていませんが、将来的なリリースで追加される予定です。 -::: - -別の方法として、URIを渡すこともできます: - -```sql -ENGINE = MongoDB(uri, collection[, oid_columns]); -``` - -**エンジンパラメータ** - -- `uri` — MongoDBサーバーの接続URI。 - -- `collection` — リモートコレクション名。 - -- `oid_columns` - WHERE句で`oid`として扱うべきカラムのカンマ区切りリスト。デフォルトは`_id`です。 - -## 型マッピング {#types-mappings} - -| MongoDB | ClickHouse | -|-------------------------|-----------------------------------------------------------------------| -| bool, int32, int64 | *任意の数値型*, String | -| double | Float64, String | -| date | Date, Date32, DateTime, DateTime64, String | -| string | String | -| document | String(をJSONとして) | -| array | Array, String(をJSONとして) | -| oid | String | -| binary | 列にある場合はString, 配列またはドキュメントにある場合はbase64エンコードされた文字列 | -| uuid (binary subtype 4) | UUID | -| *その他すべて* | String | - -MongoDBドキュメントにキーが見つからない場合(たとえば、カラム名が一致しない場合)、デフォルト値または`NULL`(カラムがnullableの場合)が挿入されます。 - -### OID {#oid} - -WHERE句で`String`を`oid`として扱いたい場合は、テーブルエンジンの最後の引数にカラム名を指定してください。 -これは、MongoDBでデフォルトで`oid`型を持つ`_id`カラムでレコードをクエリする際に必要となる場合があります。 -テーブルの`_id`フィールドが他の型(たとえば`uuid`)の場合、空の`oid_columns`を指定する必要があります。さもないと、このパラメータのデフォルト値である`_id`が使用されます。 - -```javascript -db.sample_oid.insertMany([ - {"another_oid_column": ObjectId()}, -]); - -db.sample_oid.find(); -[ - { - "_id": {"$oid": "67bf6cc44ebc466d33d42fb2"}, - "another_oid_column": {"$oid": "67bf6cc40000000000ea41b1"} - } -] -``` - -デフォルトでは、`_id`のみが`oid`カラムとして扱われます。 - -```sql -CREATE TABLE sample_oid -( - _id String, - another_oid_column String -) ENGINE = MongoDB('mongodb://user:pass@host/db', 'sample_oid'); - -SELECT count() FROM sample_oid WHERE _id = '67bf6cc44ebc466d33d42fb2'; -- 出力は1になります。 -SELECT count() FROM sample_oid WHERE another_oid_column = '67bf6cc40000000000ea41b1'; -- 出力は0になります -``` - -この場合、出力は`0`になります。ClickHouseは`another_oid_column`が`oid`型であることを知らないため、修正しましょう: - -```sql -CREATE TABLE sample_oid -( - _id String, - another_oid_column String -) ENGINE = MongoDB('mongodb://user:pass@host/db', 'sample_oid', '_id,another_oid_column'); - --- または - -CREATE TABLE sample_oid -( - _id String, - another_oid_column String -) ENGINE = MongoDB('host', 'db', 'sample_oid', 'user', 'pass', '', '_id,another_oid_column'); - -SELECT count() FROM sample_oid WHERE another_oid_column = '67bf6cc40000000000ea41b1'; -- これで出力は1になります。 -``` - -## サポートされている句 {#supported-clauses} - -単純な式を持つクエリのみがサポートされています(例えば、`WHERE field = <定数> ORDER BY field2 LIMIT <定数>`)。 -そのような式はMongoDBクエリ言語に変換され、サーバー側で実行されます。 -この制限をすべて無効にするには、[mongodb_throw_on_unsupported_query](../../../operations/settings/settings.md#mongodb_throw_on_unsupported_query)を使用してください。 -その場合、ClickHouseはクエリを最善の努力で変換しようとしますが、これにより全テーブルスキャンやClickHouse側での処理が発生する可能性があります。 - -:::note -リテラルの型を明示的に設定することが常に望ましいです。なぜならMongoは厳密な型フィルターを必要とするからです。\ -たとえば、`Date`でフィルタリングしたい場合: - -```sql -SELECT * FROM mongo_table WHERE date = '2024-01-01' -``` - -これは機能しません。Mongoは文字列を`Date`にキャストしないため、手動でキャストする必要があります。 - -```sql -SELECT * FROM mongo_table WHERE date = '2024-01-01'::Date OR date = toDate('2024-01-01') -``` - -これは`Date`、`Date32`、`DateTime`、`Bool`、`UUID`に適用されます。 - -::: - - -## 使用例 {#usage-example} - -MongoDBに[ sample_mflix](https://www.mongodb.com/docs/atlas/sample-data/sample-mflix) データセットがロードされていると仮定します。 - -MongoDBのコレクションからデータを読み取ることを可能にするClickHouseのテーブルを作成します: - -```sql -CREATE TABLE sample_mflix_table -( - _id String, - title String, - plot String, - genres Array(String), - directors Array(String), - writers Array(String), - released Date, - imdb String, - year String, -) ENGINE = MongoDB('mongodb://:@atlas-sql-6634be87cefd3876070caf96-98lxs.a.query.mongodb.net/sample_mflix?ssl=true&authSource=admin', 'movies'); -``` - -クエリ: - -```sql -SELECT count() FROM sample_mflix_table -``` - -```text - ┌─count()─┐ -1. │ 21349 │ - └─────────┘ -``` - -```sql --- JSONExtractStringはMongoDBにプッシュダウンできません -SET mongodb_throw_on_unsupported_query = 0; - --- 評価が7.5を超える「バック・トゥ・ザ・フューチャー」の続編をすべて見つける -SELECT title, plot, genres, directors, released FROM sample_mflix_table -WHERE title IN ('Back to the Future', 'Back to the Future Part II', 'Back to the Future Part III') - AND toFloat32(JSONExtractString(imdb, 'rating')) > 7.5 -ORDER BY year -FORMAT Vertical; -``` - -```text -Row 1: -────── -title: Back to the Future -plot: A young man is accidentally sent 30 years into the past in a time-traveling DeLorean invented by his friend, Dr. Emmett Brown, and must make sure his high-school-age parents unite in order to save his own existence. -genres: ['Adventure','Comedy','Sci-Fi'] -directors: ['Robert Zemeckis'] -released: 1985-07-03 - -Row 2: -────── -title: Back to the Future Part II -plot: After visiting 2015, Marty McFly must repeat his visit to 1955 to prevent disastrous changes to 1985... without interfering with his first trip. -genres: ['Action','Adventure','Comedy'] -directors: ['Robert Zemeckis'] -released: 1989-11-22 -``` - -```sql --- コーマック・マッカーシーの作品に基づくトップ3の映画を見つける -SELECT title, toFloat32(JSONExtractString(imdb, 'rating')) as rating -FROM sample_mflix_table -WHERE arrayExists(x -> x like 'Cormac McCarthy%', writers) -ORDER BY rating DESC -LIMIT 3; -``` - -```text - ┌─title──────────────────┬─rating─┐ -1. │ No Country for Old Men │ 8.1 │ -2. │ The Sunset Limited │ 7.4 │ -3. │ The Road │ 7.3 │ - └────────────────────────┴────────┘ -``` - -## トラブルシューティング {#troubleshooting} -DEBUGレベルのログで生成されたMongoDBクエリを見ることができます。 - -実装の詳細は、[mongocxx](https://github.com/mongodb/mongo-cxx-driver) および [mongoc](https://github.com/mongodb/mongo-c-driver) のドキュメントで確認できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md.hash deleted file mode 100644 index 3da0d187e16..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md.hash +++ /dev/null @@ -1 +0,0 @@ -02651f019d132c30 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md deleted file mode 100644 index 6b9953639e2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -description: 'Documentation for MySQL Table Engine' -sidebar_label: 'MySQL' -sidebar_position: 138 -slug: '/engines/table-engines/integrations/mysql' -title: 'The MySQL engine allows you to perform `SELECT` and `INSERT` queries on - data that is stored on a remote MySQL server.' ---- - - - - -# MySQL テーブルエンジン - -MySQL エンジンでは、リモート MySQL サーバーに保存されているデータに対して `SELECT` および `INSERT` クエリを実行できます。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... -) ENGINE = MySQL({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]}) -SETTINGS - [ connection_pool_size=16, ] - [ connection_max_tries=3, ] - [ connection_wait_timeout=5, ] - [ connection_auto_close=true, ] - [ connect_timeout=10, ] - [ read_write_timeout=300 ] -; -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -テーブル構造は元の MySQL テーブルの構造と異なる場合があります。 - -- カラム名は元の MySQL テーブルと同じである必要がありますが、これらのカラムの一部のみを使用しても、順番は自由です。 -- カラムタイプは元の MySQL テーブルのものと異なる場合があります。ClickHouse は値を ClickHouse データ型に[キャスト](../../../engines/database-engines/mysql.md#data_types-support)しようとします。 -- [external_table_functions_use_nulls](/operations/settings/settings#external_table_functions_use_nulls) 設定は、Nullable カラムの処理方法を定義します。デフォルト値: 1。0 の場合、テーブル関数は Nullable カラムを作成せず、null の代わりにデフォルト値を挿入します。これは配列内の NULL 値にも適用されます。 - -**エンジンパラメータ** - -- `host:port` — MySQL サーバーのアドレス。 -- `database` — リモートデータベース名。 -- `table` — リモートテーブル名。 -- `user` — MySQL ユーザー。 -- `password` — ユーザーパスワード。 -- `replace_query` — `INSERT INTO` クエリを `REPLACE INTO` に変換するフラグ。`replace_query=1` の場合、クエリが代入されます。 -- `on_duplicate_clause` — `INSERT` クエリに追加される `ON DUPLICATE KEY on_duplicate_clause` 式。 - 例: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1` では、`on_duplicate_clause` は `UPDATE c2 = c2 + 1` です。[MySQL ドキュメント](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html)を参照して、`ON DUPLICATE KEY` 句と共に使用できる `on_duplicate_clause` を確認してください。 - `on_duplicate_clause` を指定するには、`replace_query` パラメータに `0` を渡す必要があります。`replace_query = 1` と `on_duplicate_clause` を同時に渡すと、ClickHouse は例外を生成します。 - -引数は[名前付きコレクション](/operations/named-collections.md)を使用して渡すこともできます。この場合、`host` と `port` は別々に指定する必要があります。このアプローチは本番環境での使用が推奨されます。 - -`=, !=, >, >=, <, <=` のような単純な `WHERE` 句は、MySQL サーバーで実行されます。 - -クエリが MySQL に終了してから、残りの条件と `LIMIT` サンプリング制約は ClickHouse でのみ実行されます。 - -複数のレプリカをサポートしており、`|` でリストする必要があります。例えば: - -```sql -CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); -``` - -## 使用例 {#usage-example} - -MySQL にテーブルを作成: - -```text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -ClickHouse でプレーン引数を使用してテーブルを作成: - -```sql -CREATE TABLE mysql_table -( - `float_nullable` Nullable(Float32), - `int_id` Int32 -) -ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') -``` - -または[名前付きコレクション](/operations/named-collections.md)を使用: - -```sql -CREATE NAMED COLLECTION creds AS - host = 'localhost', - port = 3306, - database = 'test', - user = 'bayonet', - password = '123'; -CREATE TABLE mysql_table -( - `float_nullable` Nullable(Float32), - `int_id` Int32 -) -ENGINE = MySQL(creds, table='test') -``` - -MySQL テーブルからデータを取得: - -```sql -SELECT * FROM mysql_table -``` - -```text -┌─float_nullable─┬─int_id─┐ -│ ᴺᵁᴸᴸ │ 1 │ -└────────────────┴────────┘ -``` - -## 設定 {#mysql-settings} - -デフォルトの設定はあまり効率的ではなく、接続を再利用すらしません。これらの設定を使用すると、サーバーで実行されるクエリの数を増加させることができます。 - -### connection_auto_close {#connection-auto-close} - -クエリ実行後に接続を自動的に閉じることを許可し、すなわち接続の再利用を無効にします。 - -可能な値: - -- 1 — 自動的に接続を閉じることが許可され、接続の再利用が無効 -- 0 — 自動的に接続を閉じることが許可されず、接続の再利用が有効 - -デフォルト値: `1`。 - -### connection_max_tries {#connection-max-tries} - -フェイルオーバーのプールのリトライ回数を設定します。 - -可能な値: - -- 正の整数。 -- 0 — フェイルオーバーのプールにリトライはありません。 - -デフォルト値: `3`。 - -### connection_pool_size {#connection-pool-size} - -接続プールのサイズ(すべての接続が使用中の場合、クエリは自由になるまで待機します)。 - -可能な値: - -- 正の整数。 - -デフォルト値: `16`。 - -### connection_wait_timeout {#connection-wait-timeout} - -自由な接続を待機するタイムアウト(秒単位)(接続プールサイズでアクティブな接続がすでにある場合)、0 - 待機しない。 - -可能な値: - -- 正の整数。 - -デフォルト値: `5`。 - -### connect_timeout {#connect-timeout} - -接続タイムアウト(秒単位)。 - -可能な値: - -- 正の整数。 - -デフォルト値: `10`。 - -### read_write_timeout {#read-write-timeout} - -読み取り/書き込みタイムアウト(秒単位)。 - -可能な値: - -- 正の整数。 - -デフォルト値: `300`。 - -## 参照 {#see-also} - -- [MySQL テーブル関数](../../../sql-reference/table-functions/mysql.md) -- [MySQL を辞書ソースとして使用する](/sql-reference/dictionaries#mysql) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md.hash deleted file mode 100644 index 33785290004..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mysql.md.hash +++ /dev/null @@ -1 +0,0 @@ -d66c1c7916203ea1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md deleted file mode 100644 index 5f9f4ca8c08..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -description: 'This engine allows integrating ClickHouse with NATS to publish or - subscribe to message subjects, and process new messages as they become available.' -sidebar_label: 'NATS' -sidebar_position: 140 -slug: '/engines/table-engines/integrations/nats' -title: 'NATS Engine' ---- - - - - -# NATSエンジン {#redisstreams-engine} - -このエンジンは、ClickHouseと [NATS](https://nats.io/) を統合することを可能にします。 - -`NATS` では次のことができます: - -- メッセージサブジェクトの発行または購読。 -- 新しいメッセージが利用可能になると処理。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = NATS SETTINGS - nats_url = 'host:port', - nats_subjects = 'subject1,subject2,...', - nats_format = 'data_format'[,] - [nats_schema = '',] - [nats_num_consumers = N,] - [nats_queue_group = 'group_name',] - [nats_secure = false,] - [nats_max_reconnect = N,] - [nats_reconnect_wait = N,] - [nats_server_list = 'host1:port1,host2:port2,...',] - [nats_skip_broken_messages = N,] - [nats_max_block_size = N,] - [nats_flush_interval_ms = N,] - [nats_username = 'user',] - [nats_password = 'password',] - [nats_token = 'clickhouse',] - [nats_credential_file = '/var/nats_credentials',] - [nats_startup_connect_tries = '5'] - [nats_max_rows_per_message = 1,] - [nats_handle_error_mode = 'default'] -``` - -必須パラメータ: - -- `nats_url` – host:port (例: `localhost:5672`).. -- `nats_subjects` – 購読/発行するNATSテーブルのサブジェクトのリスト。ワイルドカードサブジェクト `foo.*.bar` や `baz.>` をサポート。 -- `nats_format` – メッセージフォーマット。SQLの `FORMAT` 関数と同じ表記法を使用(例: `JSONEachRow`)。詳細は [Formats](../../../interfaces/formats.md) セクションを参照。 - -オプションパラメータ: - -- `nats_schema` – フォーマットがスキーマ定義を必要とする場合に使用すべきパラメータ。たとえば、[Cap'n Proto](https://capnproto.org/) はスキーマファイルのパスとルート `schema.capnp:Message` オブジェクトの名前を必要とします。 -- `nats_num_consumers` – テーブルごとの消費者数。デフォルト: `1`。1つの消費者のスループットが不十分な場合は、より多くの消費者を指定してください。 -- `nats_queue_group` – NATS購読者のキューグループ名。デフォルトはテーブル名です。 -- `nats_max_reconnect` – 廃止され、効果がありません。再接続は nats_reconnect_wait のタイムアウトで永久に行われます。 -- `nats_reconnect_wait` – 各再接続試行の間にスリープするミリ秒単位の時間。デフォルト: `5000`。 -- `nats_server_list` - 接続用のサーバーリスト。NATSクラスターに接続するために指定できます。 -- `nats_skip_broken_messages` - スキーマと互換性のないメッセージをブロックごとにスキップするNATSメッセージパーサの許容度。デフォルト: `0`。`nats_skip_broken_messages = N` の場合、エンジンは解析できない *N* NATSメッセージをスキップします(メッセージはデータの行に等しい)。 -- `nats_max_block_size` - NATSからデータをフラッシュするためにポーリングで収集された行の数。デフォルト: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size)。 -- `nats_flush_interval_ms` - NATSから読み取ったデータをフラッシュするためのタイムアウト。デフォルト: [stream_flush_interval_ms](/operations/settings/settings#stream_flush_interval_ms)。 -- `nats_username` - NATSユーザー名。 -- `nats_password` - NATSパスワード。 -- `nats_token` - NATS認証トークン。 -- `nats_credential_file` - NATS資格情報ファイルへのパス。 -- `nats_startup_connect_tries` - 起動時の接続試行回数。デフォルト: `5`。 -- `nats_max_rows_per_message` — 行ベースのフォーマットで1つのNATSメッセージに書き込まれる最大行数。(デフォルト: `1`)。 -- `nats_handle_error_mode` — NATSエンジンに対するエラー処理の方法。可能な値: default(メッセージの解析に失敗した場合に例外がスローされます)、stream(例外メッセージと生のメッセージが仮想カラム `_error` と `_raw_message` に保存されます)。 - -SSL接続: - -安全な接続には `nats_secure = 1` を使用します。 -使用されるライブラリのデフォルトの動作は、作成されたTLS接続が十分に安全かどうかを確認しません。証明書が期限切れ、自己署名、不足、または無効であっても、接続は単に許可されます。証明書のより厳格なチェックは将来的に実装される可能性があります。 - -NATSテーブルへの書き込み: - -テーブルが1つのサブジェクトからのみ読み取る場合、挿入は同じサブジェクトに公開されます。 -しかし、テーブルが複数のサブジェクトから読み取る場合、公開するサブジェクトを指定する必要があります。 -そのため、複数のサブジェクトを持つテーブルに挿入する際には、`stream_like_engine_insert_queue` の設定が必要です。 -テーブルが読み取るサブジェクトの1つを選択し、そこにデータを公開できます。例: - -```sql - CREATE TABLE queue ( - key UInt64, - value UInt64 - ) ENGINE = NATS - SETTINGS nats_url = 'localhost:4444', - nats_subjects = 'subject1,subject2', - nats_format = 'JSONEachRow'; - - INSERT INTO queue - SETTINGS stream_like_engine_insert_queue = 'subject2' - VALUES (1, 1); -``` - -また、フォーマット設定をnats関連の設定と一緒に追加することができます。 - -例: - -```sql - CREATE TABLE queue ( - key UInt64, - value UInt64, - date DateTime - ) ENGINE = NATS - SETTINGS nats_url = 'localhost:4444', - nats_subjects = 'subject1', - nats_format = 'JSONEachRow', - date_time_input_format = 'best_effort'; -``` - -NATSサーバーの設定はClickHouseの設定ファイルを使用して追加できます。 -具体的には、NATSエンジンのためのRedisパスワードを追加できます: - -```xml - - click - house - clickhouse - -``` - -## 説明 {#description} - -`SELECT` はメッセージを読み取るには特に役に立ちません(デバッグを除いて)、なぜなら各メッセージは一度だけ読むことができるからです。リアルタイムスレッドを作成するには、[マテリアライズドビュー](../../../sql-reference/statements/create/view.md)を使用するのがより実用的です。これを行うには: - -1. エンジンを使用してNATS消費者を作成し、それをデータストリームと見なします。 -2. 必要な構造のテーブルを作成します。 -3. エンジンからのデータを変換し、以前に作成したテーブルに入れるマテリアライズドビューを作成します。 - -`MATERIALIZED VIEW` がエンジンに接続すると、バックグラウンドでデータを収集し始めます。これにより、NATSからメッセージを継続的に受け取り、`SELECT`を使用して必要なフォーマットに変換することができます。 -1つのNATSテーブルには、任意の数のマテリアライズドビューを持つことができ、これらはテーブルから直接データを読み取るのではなく、新しいレコード(ブロック単位)を受け取ります。これにより、異なる詳細レベルの複数のテーブルに書き込むことができます(グループ化 - 集約ありおよびなし)。 - -例: - -```sql - CREATE TABLE queue ( - key UInt64, - value UInt64 - ) ENGINE = NATS - SETTINGS nats_url = 'localhost:4444', - nats_subjects = 'subject1', - nats_format = 'JSONEachRow', - date_time_input_format = 'best_effort'; - - CREATE TABLE daily (key UInt64, value UInt64) - ENGINE = MergeTree() ORDER BY key; - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT key, value FROM queue; - - SELECT key, value FROM daily ORDER BY key; -``` - -ストリームデータの受信を停止したり、変換ロジックを変更したりするには、マテリアライズドビューを切り離します: - -```sql - DETACH TABLE consumer; - ATTACH TABLE consumer; -``` - -ターゲットテーブルを `ALTER` で変更したい場合は、ターゲットテーブルとビューからのデータの不一致を避けるために、マテリアルビューを無効にすることをお勧めします。 - -## 仮想カラム {#virtual-columns} - -- `_subject` - NATSメッセージのサブジェクト。データ型: `String`。 - -`nats_handle_error_mode='stream'` の場合の追加仮想カラム: - -- `_raw_message` - 正しく解析できなかった生のメッセージ。データ型: `Nullable(String)`。 -- `_error` - 解析中に発生した例外メッセージ。データ型: `Nullable(String)`。 - -注意:`_raw_message` と `_error` の仮想カラムは、解析中に例外が発生した場合のみ埋められ、メッセージが正常に解析された場合は常に `NULL` です。 - -## データフォーマットのサポート {#data-formats-support} - -NATSエンジンは、ClickHouseでサポートされているすべての [formats](../../../interfaces/formats.md) をサポートします。 -1つのNATSメッセージの行数は、フォーマットが行ベースかブロックベースかによって異なります: - -- 行ベースのフォーマットの場合、1つのNATSメッセージ内の行数は `nats_max_rows_per_message` を設定することで制御できます。 -- ブロックベースのフォーマットではブロックをより小さな部分に分割することはできませんが、1つのブロックの行数は一般的な設定 [max_block_size](/operations/settings/settings#max_block_size) で制御できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md.hash deleted file mode 100644 index ffe4f8b0a0f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md.hash +++ /dev/null @@ -1 +0,0 @@ -86108ebeea06db77 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md deleted file mode 100644 index 506a7a6d289..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -description: 'Allows ClickHouse to connect to external databases via ODBC.' -sidebar_label: 'ODBC' -sidebar_position: 150 -slug: '/engines/table-engines/integrations/odbc' -title: 'ODBC' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# ODBC - - - -ClickHouseを利用して、[ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity)を介して外部のデータベースに接続できます。 - -ODBC接続を安全に実装するために、ClickHouseは別のプログラム`clickhouse-odbc-bridge`を使用します。ODBCドライバーが`clickhouse-server`から直接読み込まれると、ドライバーの問題によってClickHouseサーバーがクラッシュする可能性があります。ClickHouseは必要に応じて自動的に`clickhouse-odbc-bridge`を起動します。ODBCブリッジプログラムは、`clickhouse-server`と同じパッケージからインストールされます。 - -このエンジンは、[Nullable](../../../sql-reference/data-types/nullable.md)データ型をサポートしています。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1], - name2 [type2], - ... -) -ENGINE = ODBC(connection_settings, external_database, external_table) -``` - -[CREATE TABLE](/sql-reference/statements/create/table)クエリの詳細な説明を参照してください。 - -テーブルの構造は、ソーステーブルの構造と異なる場合があります: - -- カラム名はソーステーブルと同じである必要がありますが、これらのカラムの一部を任意の順序で使用することができます。 -- カラムタイプはソーステーブルのものと異なる場合があります。ClickHouseは、値をClickHouseデータ型に[キャスト](/sql-reference/functions/type-conversion-functions#cast)しようとします。 -- [external_table_functions_use_nulls](/operations/settings/settings#external_table_functions_use_nulls)設定は、Nullableカラムの扱い方を定義します。デフォルト値は1です。0の場合、テーブル関数はNullableカラムを作成せず、nullの代わりにデフォルト値を挿入します。これは配列内のNULL値にも適用されます。 - -**エンジンパラメータ** - -- `connection_settings` — `odbc.ini`ファイル内の接続設定セクションの名前。 -- `external_database` — 外部DBMS内のデータベース名。 -- `external_table` — `external_database`内のテーブル名。 - -## 使用例 {#usage-example} - -**ODBCを介してローカルのMySQLインストールからデータを取得する** - -この例は、Ubuntu Linux 18.04およびMySQLサーバー5.7で検証されています。 - -unixODBCとMySQL Connectorがインストールされていることを確認してください。 - -デフォルトでは(パッケージからインストールされた場合)、ClickHouseはユーザー`clickhouse`として起動します。したがって、MySQLサーバー内でこのユーザーを作成し、構成する必要があります。 - -```bash -$ sudo mysql -``` - -```sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION; -``` - -次に、`/etc/odbc.ini`で接続を構成します。 - -```bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USER = clickhouse -PASSWORD = clickhouse -``` - -unixODBCインストールの`isql`ユーティリティを使って接続を確認できます。 - -```bash -$ isql -v mysqlconn -+-------------------------+ -| Connected! | -| | -... -``` - -MySQLのテーブル: - -```text -mysql> CREATE DATABASE test; -Query OK, 1 row affected (0,01 sec) - -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test.test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test.test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -ClickHouse内のテーブル(MySQLテーブルからデータを取得): - -```sql -CREATE TABLE odbc_t -( - `int_id` Int32, - `float_nullable` Nullable(Float32) -) -ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') -``` - -```sql -SELECT * FROM odbc_t -``` - -```text -┌─int_id─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└────────┴────────────────┘ -``` - -## 関連項目 {#see-also} - -- [ODBC辞書](/sql-reference/dictionaries#mysql) -- [ODBCテーブル関数](../../../sql-reference/table-functions/odbc.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md.hash deleted file mode 100644 index 73fcda8302b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/odbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -8fce48b64a9da0f4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md deleted file mode 100644 index e08ece3ecda..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md +++ /dev/null @@ -1,229 +0,0 @@ ---- -description: 'The PostgreSQL engine allows `SELECT` and `INSERT` queries on data - stored on a remote PostgreSQL server.' -sidebar_label: 'PostgreSQL' -sidebar_position: 160 -slug: '/engines/table-engines/integrations/postgresql' -title: 'PostgreSQL テーブルエンジン' ---- - - - -The PostgreSQL engine allows `SELECT` and `INSERT` queries on data stored on a remote PostgreSQL server. - -:::note -現在、PostgreSQLバージョン12以上のみがサポートされています。 -::: - -:::note Replicating or migrating Postgres data with with PeerDB -> Postgresテーブルエンジンに加えて、[PeerDB](https://docs.peerdb.io/introduction) by ClickHouseを使用して、PostgresからClickHouseへの継続的なデータパイプラインを設定できます。PeerDBは、PostgresからClickHouseへのデータを変更データキャプチャ(CDC)を使用して複製するために特別に設計されたツールです。 -::: - -## Creating a Table {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 type1 [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 type2 [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... -) ENGINE = PostgreSQL({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]}) -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -テーブル構造は元のPostgreSQLテーブル構造と異なる場合があります: - -- カラム名は元のPostgreSQLテーブルと同じである必要がありますが、これらのカラムの一部のみを使用し、任意の順序で使用することができます。 -- カラムタイプは元のPostgreSQLテーブルのものと異なる場合があります。ClickHouseは値をClickHouseデータ型に[キャスト](../../../engines/database-engines/postgresql.md#data_types-support)しようとします。 -- [external_table_functions_use_nulls](/operations/settings/settings#external_table_functions_use_nulls) 設定は、Nullableカラムの扱い方を定義します。デフォルト値:1。0の場合、テーブル関数はNullableカラムを作成せず、nullの代わりにデフォルト値を挿入します。これは、配列内のNULL値にも適用されます。 - -**Engine Parameters** - -- `host:port` — PostgreSQLサーバーアドレス。 -- `database` — リモートデータベース名。 -- `table` — リモートテーブル名。 -- `user` — PostgreSQLユーザー。 -- `password` — ユーザーパスワード。 -- `schema` — 非デフォルトテーブルスキーマ。オプション。 -- `on_conflict` — コンフリクト解決戦略。例:`ON CONFLICT DO NOTHING`。オプション。ただし、このオプションを追加すると、挿入効率が低下します。 - -[Named collections](/operations/named-collections.md) (バージョン21.11以降で利用可能)は、プロダクション環境での使用を推奨します。以下はその例です: - -```xml - - - localhost - 5432 - postgres - **** - schema1 - - -``` - -一部のパラメータはキー値引数として上書きできます: -```sql -SELECT * FROM postgresql(postgres_creds, table='table1'); -``` - -## Implementation Details {#implementation-details} - -PostgreSQL側の`SELECT`クエリは、読み取り専用のPostgreSQLトランザクション内で`COPY (SELECT ...) TO STDOUT`として実行され、各`SELECT`クエリの後にコミットされます。 - -`=`, `!=`, `>`, `>=`, `<`, `<=`, `IN`などの単純な`WHERE`句は、PostgreSQLサーバーで実行されます。 - -すべての結合、集計、ソート、`IN [ array ]`条件、および`LIMIT`サンプリング制約は、PostgreSQLへのクエリが終了した後にClickHouse内でのみ実行されます。 - -PostgreSQL側の`INSERT`クエリは、PostgreSQLトランザクション内で`COPY "table_name" (field1, field2, ... fieldN) FROM STDIN`として実行され、各`INSERT`ステートメントの後に自動コミットが行われます。 - -PostgreSQLの`Array`タイプはClickHouseの配列に変換されます。 - -:::note -注意 - PostgreSQLでは、`type_name[]`のように作成された配列データは、同じカラムの異なるテーブル行で異なる次元の多次元配列を含むことができます。しかし、ClickHouseでは、同じカラムのすべてのテーブル行で同じ次元数の多次元配列のみが許可されています。 -::: - -複数のレプリカをサポートしており、`|`でリストにする必要があります。たとえば: - -```sql -CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); -``` - -PostgreSQL辞書ソースのためのレプリカの優先度もサポートされています。地図中の番号が大きいほど、優先度は低くなります。最も高い優先度は`0`です。 - -以下の例では、レプリカ`example01-1`が最高の優先度を持っています: - -```xml - - 5432 - clickhouse - qwerty - - example01-1 - 1 - - - example01-2 - 2 - - db_name - table_name
- id=10 - SQL_QUERY -
- -``` - -## Usage Example {#usage-example} - -### Table in PostgreSQL {#table-in-postgresql} - -```text -postgres=# CREATE TABLE "public"."test" ( -"int_id" SERIAL, -"int_nullable" INT NULL DEFAULT NULL, -"float" FLOAT NOT NULL, -"str" VARCHAR(100) NOT NULL DEFAULT '', -"float_nullable" FLOAT NULL DEFAULT NULL, -PRIMARY KEY (int_id)); - -CREATE TABLE - -postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); -INSERT 0 1 - -postgresql> SELECT * FROM test; - int_id | int_nullable | float | str | float_nullable - --------+--------------+-------+------+---------------- - 1 | | 2 | test | - (1 row) -``` - -### Creating Table in ClickHouse, and connecting to PostgreSQL table created above {#creating-table-in-clickhouse-and-connecting-to--postgresql-table-created-above} - -この例では、[PostgreSQLテーブルエンジン](/engines/table-engines/integrations/postgresql.md)を使用して、ClickHouseテーブルが上記のPostgreSQLテーブルに接続され、SELECTとINSERTステートメントの両方をPostgreSQLデータベースに対して使用します: - -```sql -CREATE TABLE default.postgresql_table -( - `float_nullable` Nullable(Float32), - `str` String, - `int_id` Int32 -) -ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postgres_user', 'postgres_password'); -``` - -### Inserting initial data from PostgreSQL table into ClickHouse table, using a SELECT query {#inserting-initial-data-from-postgresql-table-into-clickhouse-table-using-a-select-query} - -[postgresqlテーブル関数](/sql-reference/table-functions/postgresql.md)は、データをPostgreSQLからClickHouseにコピーします。これは、PostgreSQLではなくClickHouseでデータのクエリや分析を行うことでクエリパフォーマンスを向上させるためによく使用されるか、PostgreSQLからClickHouseへのデータ移行にも使用できます。PostgreSQLからClickHouseへデータをコピーするため、ClickHouseでMergeTreeテーブルエンジンを使用し、これをpostgresql_copyと呼びます: - -```sql -CREATE TABLE default.postgresql_copy -( - `float_nullable` Nullable(Float32), - `str` String, - `int_id` Int32 -) -ENGINE = MergeTree -ORDER BY (int_id); -``` - -```sql -INSERT INTO default.postgresql_copy -SELECT * FROM postgresql('localhost:5432', 'public', 'test', 'postgres_user', 'postgres_password'); -``` - -### Inserting incremental data from PostgreSQL table into ClickHouse table {#inserting-incremental-data-from-postgresql-table-into-clickhouse-table} - -初期の挿入の後、PostgreSQLテーブルとClickHouseテーブルの間で継続的な同期を行う場合、ClickHouseでWHERE句を使用して、タイムスタンプまたはユニークなシーケンスIDに基づいてPostgreSQLに追加されたデータのみを挿入できます。 - -これには、以前に追加された最大IDまたはタイムスタンプを追跡する必要があります。たとえば、以下のようにします: - -```sql -SELECT max(`int_id`) AS maxIntID FROM default.postgresql_copy; -``` - -その後、最大より大きいPostgreSQLテーブルから値を挿入します。 - -```sql -INSERT INTO default.postgresql_copy -SELECT * FROM postgresql('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password'); -WHERE int_id > maxIntID; -``` - -### Selecting data from the resulting ClickHouse table {#selecting-data-from-the-resulting-clickhouse-table} - -```sql -SELECT * FROM postgresql_copy WHERE str IN ('test'); -``` - -```text -┌─float_nullable─┬─str──┬─int_id─┐ -│ ᴺᵁᴸᴸ │ test │ 1 │ -└────────────────┴──────┴────────┘ -``` - -### Using Non-default Schema {#using-non-default-schema} - -```text -postgres=# CREATE SCHEMA "nice.schema"; - -postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); - -postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) -``` - -```sql -CREATE TABLE pg_table_schema_with_dots (a UInt32) - ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); -``` - -**See Also** - -- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md) -- [Using PostgreSQL as a dictionary source](/sql-reference/dictionaries#mysql) - -## Related content {#related-content} - -- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres) -- Blog: [ClickHouse and PostgreSQL - a Match Made in Data Heaven - part 2](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres-part-2) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md.hash deleted file mode 100644 index 5b1b485c36f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/postgresql.md.hash +++ /dev/null @@ -1 +0,0 @@ -7e25de1837234afc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md deleted file mode 100644 index 59df20be7f4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -description: 'This engine allows integrating ClickHouse with RabbitMQ.' -sidebar_label: 'RabbitMQ' -sidebar_position: 170 -slug: '/engines/table-engines/integrations/rabbitmq' -title: 'RabbitMQ Engine' ---- - - - - -# RabbitMQ エンジン - -このエンジンは、ClickHouse と [RabbitMQ](https://www.rabbitmq.com) を統合することを可能にします。 - -`RabbitMQ` を利用すると: - -- データフローを発行または購読できます。 -- 流れが利用可能になると、それを処理できます。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1], - name2 [type2], - ... -) ENGINE = RabbitMQ SETTINGS - rabbitmq_host_port = 'host:port' [or rabbitmq_address = 'amqp(s)://guest:guest@localhost/vhost'], - rabbitmq_exchange_name = 'exchange_name', - rabbitmq_format = 'data_format'[,] - [rabbitmq_exchange_type = 'exchange_type',] - [rabbitmq_routing_key_list = 'key1,key2,...',] - [rabbitmq_secure = 0,] - [rabbitmq_schema = '',] - [rabbitmq_num_consumers = N,] - [rabbitmq_num_queues = N,] - [rabbitmq_queue_base = 'queue',] - [rabbitmq_deadletter_exchange = 'dl-exchange',] - [rabbitmq_persistent = 0,] - [rabbitmq_skip_broken_messages = N,] - [rabbitmq_max_block_size = N,] - [rabbitmq_flush_interval_ms = N,] - [rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish',] - [rabbitmq_queue_consume = false,] - [rabbitmq_address = '',] - [rabbitmq_vhost = '/',] - [rabbitmq_username = '',] - [rabbitmq_password = '',] - [rabbitmq_commit_on_select = false,] - [rabbitmq_max_rows_per_message = 1,] - [rabbitmq_handle_error_mode = 'default'] -``` - -必要なパラメータ: - -- `rabbitmq_host_port` – host:port (例: `localhost:5672`)。 -- `rabbitmq_exchange_name` – RabbitMQ のエクスチェンジ名。 -- `rabbitmq_format` – メッセージフォーマット。SQL の `FORMAT` 関数と同じ記法を使用します。例えば、`JSONEachRow`。詳細については、[Formats](../../../interfaces/formats.md) セクションを参照してください。 - -オプションのパラメータ: - -- `rabbitmq_exchange_type` – RabbitMQ のエクスチェンジのタイプ:`direct`, `fanout`, `topic`, `headers`, `consistent_hash`。デフォルト:`fanout`。 -- `rabbitmq_routing_key_list` – カンマ区切りのルーティングキーのリスト。 -- `rabbitmq_schema` – フォーマットがスキーマ定義を必要とする場合に使用するパラメータ。例えば、[Cap'n Proto](https://capnproto.org/) はスキーマファイルのパスとルートの `schema.capnp:Message` オブジェクトの名前を必要とします。 -- `rabbitmq_num_consumers` – テーブルごとの消費者の数。一つの消費者のスループットが不足している場合はより多くの消費者を指定してください。デフォルト:`1`。 -- `rabbitmq_num_queues` – キューの総数。この数を増やすことでパフォーマンスが大幅に向上する可能性があります。デフォルト:`1`。 -- `rabbitmq_queue_base` - キュー名のヒントを指定します。この設定の使用事例は以下に記載されています。 -- `rabbitmq_deadletter_exchange` - [デッドレターエクスチェンジ](https://www.rabbitmq.com/dlx.html) の名前を指定します。このエクスチェンジ名で別のテーブルを作成し、メッセージを収集できます。デフォルトではデッドレターエクスチェンジは指定されていません。 -- `rabbitmq_persistent` - 1 (true) に設定すると、挿入クエリの配信モードが 2 に設定されます(メッセージを 'persistent' とマークします)。デフォルト:`0`。 -- `rabbitmq_skip_broken_messages` – スキーマ不適合のメッセージのブロックごとの RabbitMQ メッセージパーサーの許容度。`rabbitmq_skip_broken_messages = N` の場合、エンジンは解析できない *N* の RabbitMQ メッセージをスキップします(メッセージはデータの行に相当します)。デフォルト:`0`。 -- `rabbitmq_max_block_size` - RabbitMQ からデータをフラッシュする前に収集される行の数。デフォルト:[max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size)。 -- `rabbitmq_flush_interval_ms` - RabbitMQ からデータをフラッシュするためのタイムアウト。デフォルト:[stream_flush_interval_ms](/operations/settings/settings#stream_flush_interval_ms)。 -- `rabbitmq_queue_settings_list` - キュー作成時に RabbitMQ 設定を設定するために使用されます。利用可能な設定:`x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`。キューの `durable` 設定は自動的に有効になります。 -- `rabbitmq_address` - 接続のためのアドレス。この設定または `rabbitmq_host_port` を使用します。 -- `rabbitmq_vhost` - RabbitMQ の vhost。デフォルト: `'\''`。 -- `rabbitmq_queue_consume` - ユーザー定義のキューを使用し、RabbitMQ の設定を行わない(エクスチェンジ、キュー、バインディングを宣言しない)。デフォルト:`false`。 -- `rabbitmq_username` - RabbitMQ のユーザー名。 -- `rabbitmq_password` - RabbitMQ のパスワード。 -- `reject_unhandled_messages` - エラーが発生した場合にメッセージを拒否します(RabbitMQ に否定確認を送信します)。この設定は、`rabbitmq_queue_settings_list` に `x-dead-letter-exchange` が定義されている場合、自動的に有効になります。 -- `rabbitmq_commit_on_select` - セレクトクエリが実行されたときにメッセージをコミットします。デフォルト:`false`。 -- `rabbitmq_max_rows_per_message` — 行ベースフォーマットにおける一つの RabbitMQ メッセージあたりの最大行数。デフォルト : `1`。 -- `rabbitmq_empty_queue_backoff_start` — RabbitMQ キューが空のときにリードを再スケジュールするための開始バックオフポイント。 -- `rabbitmq_empty_queue_backoff_end` — RabbitMQ キューが空のときにリードを再スケジュールするための終了バックオフポイント。 -- `rabbitmq_handle_error_mode` — RabbitMQ エンジンのエラー処理方法。可能な値:default(メッセージの解析に失敗した場合に例外がスローされる)、stream(例外メッセージと生のメッセージが仮想カラム `_error` と `_raw_message` に保存される)。 - -* [ ] SSL 接続: - -`rabbitmq_secure = 1` または接続アドレスに `amqps` を使用します: `rabbitmq_address = 'amqps://guest:guest@localhost/vhost'`。 -使用されるライブラリのデフォルトの動作は、生成された TLS 接続が十分に安全であることを確認しないことです。証明書が期限切れ、自己署名、存在しない、または無効である場合でも、接続は単に許可されます。証明書の厳格なチェックは、将来的に実装される可能性があります。 - -また、rabbitmq 関連の設定と一緒にフォーマット設定を追加することもできます。 - -例: - -```sql - CREATE TABLE queue ( - key UInt64, - value UInt64, - date DateTime - ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'localhost:5672', - rabbitmq_exchange_name = 'exchange1', - rabbitmq_format = 'JSONEachRow', - rabbitmq_num_consumers = 5, - date_time_input_format = 'best_effort'; -``` - -RabbitMQ サーバーの設定は、ClickHouse の設定ファイルを使用して追加する必要があります。 - -必要な設定: - -```xml - - root - clickhouse - -``` - -追加の設定: - -```xml - - clickhouse - -``` - -## 説明 {#description} - -`SELECT` はメッセージを読むためには特に有用ではありません(デバッグを除く)、なぜなら各メッセージは一度しか読み取れないからです。リアルタイムスレッドを作成することがより実用的です。それには、[materialized views](../../../sql-reference/statements/create/view.md) を使用します。そのためには: - -1. エンジンを利用して RabbitMQ のコンシューマーを作成し、それをデータストリームとみなします。 -2. 希望する構造を持つテーブルを作成します。 -3. エンジンからデータを変換し、以前に作成したテーブルに挿入する Materialized View を作成します。 - -`MATERIALIZED VIEW` がエンジンと結合すると、バックグラウンドでデータの収集を開始します。これにより、RabbitMQ からメッセージを継続的に受信し、`SELECT` を使用して必要なフォーマットに変換できます。 -一つの RabbitMQ テーブルは、好きなだけの Materialized View を持つことができます。 - -データは `rabbitmq_exchange_type` と指定された `rabbitmq_routing_key_list` に基づいてチャネルされることがあります。 -テーブルごとにエクスチェンジは 1 つまでしか存在できません。1 つのエクスチェンジは複数のテーブル間で共有でき、複数のテーブルへのルーティングを同時に可能にします。 - -エクスチェンジタイプのオプション: - -- `direct` - ルーティングはキーの正確な一致に基づいています。例:テーブルキーリスト:`key1,key2,key3,key4,key5`、メッセージキーはそれらのいずれかに等しいことができます。 -- `fanout` - キーに関わらず、すべてのテーブルにルーティング(エクスチェンジ名が同じ場合)。 -- `topic` - ルーティングはドットで区切られたキーのパターンに基づいています。例:`*.logs`, `records.*.*.2020`, `*.2018,*.2019,*.2020`。 -- `headers` - ルーティングは `key=value` の一致に基づき、設定 `x-match=all` または `x-match=any` があります。例:テーブルキーリスト:`x-match=all,format=logs,type=report,year=2020`。 -- `consistent_hash` - データはすべてのバウンドテーブル間で均等に分配されます(エクスチェンジ名が同じ場合)。このエクスチェンジタイプは RabbitMQ プラグイン `rabbitmq-plugins enable rabbitmq_consistent_hash_exchange` を使って有効化する必要があります。 - -`rabbitmq_queue_base` を設定することで次のようなケースで使用できます: - -- 異なるテーブルがキューを共有できるようにし、複数の消費者が同じキューに登録できるようにします。これによりパフォーマンスが向上します。 `rabbitmq_num_consumers` および/または `rabbitmq_num_queues` 設定を使用する場合、これらのパラメータが同じであればキューが正確に一致します。 -- 全てのメッセージが正常に消費されなかった場合に、特定の耐久性キューからの読み取りを復元できるようにします。特定のキューからの消費を再開するには、その名前を `rabbitmq_queue_base` 設定に設定し、`rabbitmq_num_consumers` および `rabbitmq_num_queues` を指定しないでください(デフォルトは 1)。特定のテーブルに宣言された全てのキューからの消費を再開したい場合は、同じ設定:`rabbitmq_queue_base`, `rabbitmq_num_consumers`, `rabbitmq_num_queues` を指定してください。デフォルトでは、キュー名はテーブルに固有のものになります。 -- キューが耐久性であり、自動的に削除されないため、再利用できます。(RabbitMQ CLI ツールを使用して削除できます。) - -パフォーマンスを向上させるため、受信したメッセージは [max_insert_block_size](/operations/settings/settings#max_insert_block_size) のサイズのブロックにグループ化されます。ブロックが [stream_flush_interval_ms](../../../operations/server-configuration-parameters/settings.md) ミリ秒以内で形成されなかった場合、データはブロックの完全性に関係なく、テーブルにフラッシュされます。 - -`rabbitmq_num_consumers` および/または `rabbitmq_num_queues` 設定が `rabbitmq_exchange_type` とともに指定された場合: - -- `rabbitmq-consistent-hash-exchange` プラグインを有効にする必要があります。 -- 発行されたメッセージの `message_id` プロパティを指定する必要があります(各メッセージ/バッチに対して一意)。 - -挿入クエリには、各発行されたメッセージに対して追加されるメッセージメタデータがあります: `messageID` と `republished` フラグ(再発行された場合は true) - メッセージヘッダーを介してアクセスできます。 - -挿入と Materialized View に同じテーブルを使用しないでください。 - -例: - -```sql - CREATE TABLE queue ( - key UInt64, - value UInt64 - ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'localhost:5672', - rabbitmq_exchange_name = 'exchange1', - rabbitmq_exchange_type = 'headers', - rabbitmq_routing_key_list = 'format=logs,type=report,year=2020', - rabbitmq_format = 'JSONEachRow', - rabbitmq_num_consumers = 5; - - CREATE TABLE daily (key UInt64, value UInt64) - ENGINE = MergeTree() ORDER BY key; - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT key, value FROM queue; - - SELECT key, value FROM daily ORDER BY key; -``` - -## 仮想カラム {#virtual-columns} - -- `_exchange_name` - RabbitMQ エクスチェンジ名。データ型: `String`。 -- `_channel_id` - メッセージを受信したコンシューマーが宣言された ChannelID。データ型: `String`。 -- `_delivery_tag` - 受信したメッセージの DeliveryTag。チャネルごとにスコープが設定されています。データ型: `UInt64`。 -- `_redelivered` - メッセージの `redelivered` フラグ。データ型: `UInt8`。 -- `_message_id` - 受信したメッセージの messageID;発行時に設定されていれば非空です。データ型: `String`。 -- `_timestamp` - 受信したメッセージのタイムスタンプ;発行時に設定されていれば非空です。データ型: `UInt64`。 - -`kafka_handle_error_mode='stream'` の場合の追加の仮想カラム: - -- `_raw_message` - 正しく解析できなかった生のメッセージ。データ型: `Nullable(String)`。 -- `_error` - 解析に失敗したときに発生した例外メッセージ。データ型: `Nullable(String)`。 - -注意: `_raw_message` と `_error` の仮想カラムは、解析中に例外が発生した場合のみ埋められ、メッセージが正常に解析された場合は常に `NULL` です。 - -## 注意点 {#caveats} - -[デフォルトカラム式](/sql-reference/statements/create/table.md/#default_values)(`DEFAULT`、`MATERIALIZED`、`ALIAS` など)をテーブル定義に指定することができますが、これらは無視されます。その代わり、カラムはそれぞれの型のデフォルト値で埋められます。 - -## データフォーマットのサポート {#data-formats-support} - -RabbitMQ エンジンは、ClickHouse でサポートされているすべての [フォーマット](../../../interfaces/formats.md) をサポートしています。 -一つの RabbitMQ メッセージ内の行数は、フォーマットが行ベースかブロックベースかに依存します: - -- 行ベースフォーマットの場合、一つの RabbitMQ メッセージ内の行数は `rabbitmq_max_rows_per_message` を設定することで制御できます。 -- ブロックベースフォーマットの場合、ブロックを小さな部分に分割することはできませんが、ブロック内の行数は一般設定 [max_block_size](/operations/settings/settings#max_block_size) によって制御できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md.hash deleted file mode 100644 index f6c567e35dd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md.hash +++ /dev/null @@ -1 +0,0 @@ -a71238cbd01305b7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md deleted file mode 100644 index 83f5d6ccdb9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -description: 'This engine allows integrating ClickHouse with Redis.' -sidebar_label: 'Redis' -sidebar_position: 175 -slug: '/engines/table-engines/integrations/redis' -title: 'Redis' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# Redis - - - -このエンジンは、ClickHouseを[Redis](https://redis.io/)と統合することを可能にします。Redisはkvモデルを使用するため、`where k=xx`や`where k in (xx, xx)`のようにポイントでのみクエリを実行することを強く推奨します。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name -( - name1 [type1], - name2 [type2], - ... -) ENGINE = Redis({host:port[, db_index[, password[, pool_size]]] | named_collection[, option=value [,..]] }) -PRIMARY KEY(primary_key_name); -``` - -**エンジンパラメータ** - -- `host:port` — Redisサーバーのアドレス。ポートを無視することができ、デフォルトのRedisポート6379が使用されます。 -- `db_index` — Redisのdbインデックスは0から15の範囲、デフォルトは0です。 -- `password` — ユーザーパスワード、デフォルトは空文字列です。 -- `pool_size` — Redisの最大接続プールサイズ、デフォルトは16です。 -- `primary_key_name` - カラムリストの任意のカラム名。 - -:::note シリアル化 -`PRIMARY KEY`は1つのカラムのみをサポートします。プライマリキーはRedisキーとしてバイナリにシリアル化されます。 -プライマリキー以外のカラムは対応する順序でRedis値としてバイナリにシリアル化されます。 -::: - -引数は[named collections](/operations/named-collections.md)を使用して渡すこともできます。この場合、`host`と`port`は別々に指定する必要があります。このアプローチは、本番環境で推奨されます。この時点で、named collectionsを使用してRedisに渡されるすべてのパラメータは必須です。 - -:::note フィルタリング -`key equals`または`in filtering`を伴うクエリは、Redisからの複数キーのルックアップに最適化されます。フィルタリングキーなしのクエリでは、全テーブルスキャンが発生し、これは重い操作です。 -::: - -## 使用例 {#usage-example} - -プレーン引数を使用して`Redis`エンジンでClickHouseにテーブルを作成します: - -```sql -CREATE TABLE redis_table -( - `key` String, - `v1` UInt32, - `v2` String, - `v3` Float32 -) -ENGINE = Redis('redis1:6379') PRIMARY KEY(key); -``` - -もしくは[named collections](/operations/named-collections.md)を使用して: - -```xml - - - localhost - 6379 - **** - 16 - s0 - - -``` - -```sql -CREATE TABLE redis_table -( - `key` String, - `v1` UInt32, - `v2` String, - `v3` Float32 -) -ENGINE = Redis(redis_creds) PRIMARY KEY(key); -``` - -挿入: - -```sql -INSERT INTO redis_table Values('1', 1, '1', 1.0), ('2', 2, '2', 2.0); -``` - -クエリ: - -```sql -SELECT COUNT(*) FROM redis_table; -``` - -```text -┌─count()─┐ -│ 2 │ -└─────────┘ -``` - -```sql -SELECT * FROM redis_table WHERE key='1'; -``` - -```text -┌─key─┬─v1─┬─v2─┬─v3─┐ -│ 1 │ 1 │ 1 │ 1 │ -└─────┴────┴────┴────┘ -``` - -```sql -SELECT * FROM redis_table WHERE v1=2; -``` - -```text -┌─key─┬─v1─┬─v2─┬─v3─┐ -│ 2 │ 2 │ 2 │ 2 │ -└─────┴────┴────┴────┘ -``` - -更新: - -プライマリキーは更新できないことに注意してください。 - -```sql -ALTER TABLE redis_table UPDATE v1=2 WHERE key='1'; -``` - -削除: - -```sql -ALTER TABLE redis_table DELETE WHERE key='1'; -``` - -トランケート: - -Redis dbを非同期でフラッシュします。また、`Truncate`はSYNCモードをサポートしています。 - -```sql -TRUNCATE TABLE redis_table SYNC; -``` - -結合: - -他のテーブルと結合します。 - -```sql -SELECT * FROM redis_table JOIN merge_tree_table ON merge_tree_table.key=redis_table.key; -``` - -## 制限事項 {#limitations} - -Redisエンジンは、`where k > xx`のようなスキャンクエリもサポートしていますが、いくつかの制限があります: -1. スキャンクエリは、リハッシュ中に非常にまれに重複したキーを生成する可能性があります。詳細は[Redis Scan](https://github.com/redis/redis/blob/e4d183afd33e0b2e6e8d1c79a832f678a04a7886/src/dict.c#L1186-L1269)を参照してください。 -2. スキャン中にキーが作成され、削除される可能性があるため、結果のデータセットは有効な時点を表さないことがあります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md.hash deleted file mode 100644 index 7d52647fa51..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/redis.md.hash +++ /dev/null @@ -1 +0,0 @@ -7a8900a5b7fcee3e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md deleted file mode 100644 index af3fbdfbede..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -description: 'このエンジンは、Amazon S3 エコシステムとの統合を提供します。HDFS エンジンと同様ですが、S3 固有の機能を提供します。' -sidebar_label: 'S3' -sidebar_position: 180 -slug: '/engines/table-engines/integrations/s3' -title: 'S3 テーブルエンジン' ---- - - - - -# S3 テーブルエンジン - -このエンジンは、[Amazon S3](https://aws.amazon.com/s3/) エコシステムとの統合を提供します。このエンジンは、[HDFS](/engines/table-engines/integrations/hdfs) エンジンと似ていますが、S3 特有の機能を提供します。 - -## 例 {#example} - -```sql -CREATE TABLE s3_engine_table (name String, value UInt32) - ENGINE=S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip') - SETTINGS input_format_with_names_use_header = 0; - -INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); - -SELECT * FROM s3_engine_table LIMIT 2; -``` - -```text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE s3_engine_table (name String, value UInt32) - ENGINE = S3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression]) - [PARTITION BY expr] - [SETTINGS ...] -``` - -### エンジンパラメータ {#parameters} - -- `path` — バケット URL とファイルのパスです。読み取り専用モードで以下のワイルドカードをサポートします: `*`, `**`, `?`, `{abc,def}` および `{N..M}` ここで `N`, `M` は数字、`'abc'`, `'def'` は文字列です。詳細については [以下](#wildcards-in-path) を参照してください。 -- `NOSIGN` - このキーワードが認証情報の代わりに提供された場合、すべてのリクエストは署名されません。 -- `format` — ファイルの[フォーマット](/sql-reference/formats#formats-overview)です。 -- `aws_access_key_id`, `aws_secret_access_key` - [AWS](https://aws.amazon.com/) アカウントユーザーの長期認証情報です。これらを使用してリクエストを認証できます。パラメータはオプションです。認証情報が指定されていない場合、設定ファイルから使用されます。詳細については [S3 をデータストレージとして使用する](../mergetree-family/mergetree.md#table_engine-mergetree-s3) を参照してください。 -- `compression` — 圧縮タイプです。サポートされている値: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`。パラメータはオプションです。デフォルトでは、ファイル拡張子によって圧縮を自動検出します。 - -### データキャッシュ {#data-cache} - -`S3` テーブルエンジンは、ローカルディスクへのデータキャッシュをサポートしています。この[セクション](/operations/storing-data.md/#using-local-cache)でファイルシステムキャッシュの設定オプションと使用方法を参照してください。キャッシュは、ストレージオブジェクトのパスとETagに基づいて作成されるため、ClickHouseは古いキャッシュバージョンを読み取ることがありません。 - -キャッシュを有効にするには、設定 `filesystem_cache_name = ''` と `enable_filesystem_cache = 1` を使用します。 - -```sql -SELECT * -FROM s3('http://minio:10000/clickhouse//test_3.csv', 'minioadmin', 'minioadminpassword', 'CSV') -SETTINGS filesystem_cache_name = 'cache_for_s3', enable_filesystem_cache = 1; -``` - -設定ファイルでキャッシュを定義する方法は2つあります。 - -1. ClickHouse 設定ファイルに次のセクションを追加します: - -```xml - - - - キャッシュディレクトリへのパス - 10Gi - - - -``` - -2. ClickHouse `storage_configuration` セクションからキャッシュ設定(したがってキャッシュストレージ)を再利用します。[ここで説明されています](/operations/storing-data.md/#using-local-cache) - -### PARTITION BY {#partition-by} - -`PARTITION BY` — オプションです。ほとんどの場合、パーティションキーは必要ありません。必要な場合でも、通常は月毎のパーティションキーを使用します。パーティショニングは、クエリを高速化しません(ORDER BY 式とは対照的に)。過度に詳細なパーティションニングを使用するべきではありません。クライアント識別子や名前でデータをパーティショニングしないでください(代わりに、クライアント識別子や名前を ORDER BY 式の最初のカラムにしてください)。 - -月別にパーティショニングするには、`toYYYYMM(date_column)` 式を使用します。ここで `date_column` は [Date](/sql-reference/data-types/date.md) 型の日付を持つカラムです。パーティション名は `"YYYYMM"` フォーマットになります。 - -### パーティション化されたデータのクエリ {#querying-partitioned-data} - -この例では、ClickHouse と MinIO を統合した[docker composeレシピ](https://github.com/ClickHouse/examples/tree/5fdc6ff72f4e5137e23ea075c88d3f44b0202490/docker-compose-recipes/recipes/ch-and-minio-S3)を使用しています。エンドポイントと認証情報を置き換えることで、S3 を使用して同じクエリを再現できるはずです。 - -`ENGINE` 設定内の S3 エンドポイントでは、パラメータトークン `{_partition_id}` が S3 オブジェクト(ファイル名)の一部として使用されており、SELECT クエリはその結果として生成されるオブジェクト名(例: `test_3.csv`)に対して選択します。 - -:::note -例に示されているように、パーティション化された S3 テーブルからのクエリは現在直接サポートされていませんが、S3 テーブル関数を使用して個々のパーティションをクエリすることで実現できます。 - -S3 にパーティション化されたデータを書き込む主なユースケースは、そのデータを別の ClickHouse システムに転送することを可能にすることです(たとえば、オンプレミスシステムから ClickHouse Cloud に移動する)。ClickHouse データセットは非常に大きいことが多く、ネットワークの信頼性が時々不完全であるため、データセットを部分セットで転送することが理にかなっています。したがって、パーティション化された書き込みが行われます。 -::: - -#### テーブルを作成する {#create-the-table} -```sql -CREATE TABLE p -( - `column1` UInt32, - `column2` UInt32, - `column3` UInt32 -) -ENGINE = S3( --- highlight-next-line - 'http://minio:10000/clickhouse//test_{_partition_id}.csv', - 'minioadmin', - 'minioadminpassword', - 'CSV') -PARTITION BY column3 -``` - -#### データを挿入する {#insert-data} -```sql -insert into p values (1, 2, 3), (3, 2, 1), (78, 43, 45) -``` - -#### パーティション 3 から選択 {#select-from-partition-3} - -:::tip -このクエリは s3 テーブル関数を使用します -::: - -```sql -SELECT * -FROM s3('http://minio:10000/clickhouse//test_3.csv', 'minioadmin', 'minioadminpassword', 'CSV') -``` -```response -┌─c1─┬─c2─┬─c3─┐ -│ 1 │ 2 │ 3 │ -└────┴────┴────┘ -``` - -#### パーティション 1 から選択 {#select-from-partition-1} -```sql -SELECT * -FROM s3('http://minio:10000/clickhouse//test_1.csv', 'minioadmin', 'minioadminpassword', 'CSV') -``` -```response -┌─c1─┬─c2─┬─c3─┐ -│ 3 │ 2 │ 1 │ -└────┴────┴────┘ -``` - -#### パーティション 45 から選択 {#select-from-partition-45} -```sql -SELECT * -FROM s3('http://minio:10000/clickhouse//test_45.csv', 'minioadmin', 'minioadminpassword', 'CSV') -``` -```response -┌─c1─┬─c2─┬─c3─┐ -│ 78 │ 43 │ 45 │ -└────┴────┴────┘ -``` - -#### 制限 {#limitation} - -`Select * from p` を実行しようとするかもしれませんが、上記のように、このクエリは失敗します。前のクエリを使用してください。 - -```sql -SELECT * FROM p -``` -```response -サーバーから受信した例外 (バージョン 23.4.1): -コード: 48. DB::Exception: localhost:9000 から受信。DB::Exception: パーティション化された S3 ストレージからの読み取りはまだ実装されていません。 (NOT_IMPLEMENTED) -``` - -## データの挿入 {#inserting-data} - -新しいファイルにのみ行を挿入できることに注意してください。マージサイクルやファイル分割操作はありません。ファイルが書き込まれた後、追加の挿入は失敗します。これを回避するには、`s3_truncate_on_insert` と `s3_create_new_file_on_insert` 設定を使用できます。詳細は [こちら](/integrations/s3#inserting-data) を参照してください。 - -## 仮想カラム {#virtual-columns} - -- `_path` — ファイルへのパス。タイプ: `LowCardinality(String)`。 -- `_file` — ファイルの名前。タイプ: `LowCardinality(String)`。 -- `_size` — ファイルのサイズ(バイト単位)。タイプ: `Nullable(UInt64)`。サイズが不明な場合、値は `NULL` です。 -- `_time` — ファイルの最終修正時間。タイプ: `Nullable(DateTime)`。時間が不明な場合、値は `NULL` です。 -- `_etag` — ファイルのETag。タイプ: `LowCardinality(String)`。etagが不明な場合、値は `NULL` です。 - -仮想カラムの詳細については [こちら](../../../engines/table-engines/index.md#table_engines-virtual_columns) を参照してください。 - -## 実装の詳細 {#implementation-details} - -- 読み取りと書き込みは並列に行うことができます。 -- サポートされていない: - - `ALTER` および `SELECT...SAMPLE` 操作。 - - インデックス。 - - [ゼロコピー](../../../operations/storing-data.md#zero-copy) レプリケーションは可能ですが、サポートされていません。 - - :::note ゼロコピー レプリケーションは本番用ではありません - ゼロコピー レプリケーションは ClickHouse バージョン 22.8 以降でデフォルトで無効になっています。この機能は本番環境での使用には推奨されません。 - ::: - -## パス内のワイルドカード {#wildcards-in-path} - -`path` 引数は、Bash のようなワイルドカードを使用して複数のファイルを指定できます。処理されるファイルは存在し、完全なパスパターンに一致する必要があります。ファイルのリストは `SELECT` 時に決定されます(`CREATE` 時ではありません)。 - -- `*` — `/` を含む任意の文字数と任意の文字に置き換え、空文字も含めます。 -- `**` — `/` を含む任意の文字数と任意の文字に置き換え、空文字も含めます。 -- `?` — 任意の単一文字に置き換えます。 -- `{some_string,another_string,yet_another_one}` — 文字列 `'some_string', 'another_string', 'yet_another_one'` のいずれかに置き換えます。 -- `{N..M}` — N から M までの範囲内の任意の数字に置き換えます。N と M は先頭ゼロを持つことができ、例: `000..078`。 - -`{}` を含む構文は、[リモート](../../../sql-reference/table-functions/remote.md) テーブル関数に似ています。 - -:::note -ファイルのリストに先頭ゼロを持つ数値範囲が含まれている場合は、各桁を個別に中括弧で囲むか、`?` を使用してください。 -::: - -**ワイルドカードを使った例 1** - -`file-000.csv`, `file-001.csv`, ... , `file-999.csv` という名前のファイルを持つテーブルを作成します: - -```sql -CREATE TABLE big_table (name String, value UInt32) - ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/my_folder/file-{000..999}.csv', 'CSV'); -``` - -**ワイルドカードを使った例 2** - -次のURIのCSV形式のファイルが複数あるとします: - -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_1.csv' -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_2.csv' -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_3.csv' -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_1.csv' -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_2.csv' -- 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_3.csv' - -これらの6つのファイルから構成されるテーブルを作成するには、いくつかの方法があります。 - -1. ファイル接尾辞の範囲を指定する: - -```sql -CREATE TABLE table_with_range (name String, value UInt32) - ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV'); -``` - -2. `some_file_` プレフィックスを持つすべてのファイルを取得する(両フォルダーにはそのようなプレフィックスの余分なファイルがない必要があります): - -```sql -CREATE TABLE table_with_question_mark (name String, value UInt32) - ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/some_file_?', 'CSV'); -``` - -3. 両方のフォルダー内のすべてのファイルを取得する(すべてのファイルはクエリで記述された形式およびスキーマを満たす必要があります): - -```sql -CREATE TABLE table_with_asterisk (name String, value UInt32) - ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV'); -``` - -## ストレージ設定 {#storage-settings} - -- [s3_truncate_on_insert](/operations/settings/settings.md#s3_truncate_on_insert) - 挿入前にファイルを切り詰めることを許可します。デフォルトでは無効です。 -- [s3_create_new_file_on_insert](/operations/settings/settings.md#s3_create_new_file_on_insert) - フォーマットにサフィックスがある場合、各挿入に対して新しいファイルを作成することを許可します。デフォルトでは無効です。 -- [s3_skip_empty_files](/operations/settings/settings.md#s3_skip_empty_files) - 読み取り中に空のファイルをスキップすることを許可します。デフォルトで有効です。 - -## S3 に関連する設定 {#settings} - -以下の設定は、クエリの実行前に設定するか、設定ファイルに配置することができます。 - -- `s3_max_single_part_upload_size` — シングルパートアップロードを使用して S3 にアップロードするオブジェクトの最大サイズ。デフォルト値は `32Mb` です。 -- `s3_min_upload_part_size` — マルチパートアップロード中にアップロードするパートの最小サイズ。[S3 マルチパートアップロード](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) へのデフォルト値は `16Mb` です。 -- `s3_max_redirects` — 許可される S3 リダイレクトホップの最大数。デフォルト値は `10` です。 -- `s3_single_read_retries` — シングルリード中の最大再試行回数。デフォルト値は `4` です。 -- `s3_max_put_rps` — スロットリングがかかる前の最大 PUT リクエスト毎秒レート。デフォルト値は `0`(無制限)です。 -- `s3_max_put_burst` — リクエスト毎秒制限に達する前に同時に発行できる最大リクエスト数。デフォルト(`0` 値)では `s3_max_put_rps` と等しくなります。 -- `s3_max_get_rps` — スロットリングがかかる前の最大 GET リクエスト毎秒レート。デフォルト値は `0`(無制限)です。 -- `s3_max_get_burst` — リクエスト毎秒制限に達する前に同時に発行できる最大リクエスト数。デフォルト(`0` 値)では `s3_max_get_rps` と等しくなります。 -- `s3_upload_part_size_multiply_factor` - `s3_min_upload_part_size` をこのファクターで掛け算し、`s3_multiply_parts_count_threshold` パーツが S3 にアップロードされるたびにそれを適用します。デフォルト値は `2` です。 -- `s3_upload_part_size_multiply_parts_count_threshold` - この数のパーツが S3 にアップロードされるたびに、`s3_min_upload_part_size` は `s3_upload_part_size_multiply_factor` で掛け算されます。デフォルト値は `500` です。 -- `s3_max_inflight_parts_for_one_file` - 1つのオブジェクトに対して同時に実行できる PUT リクエストの数を制限します。その数は制限すべきです。値が `0` の場合は無制限です。デフォルト値は `20` です。各インフライトパーツには、最初の `s3_upload_part_size_multiply_factor` パーツに対して `s3_min_upload_part_size` サイズのバッファがあり、ファイルが十分に大きい場合はより大きくなります。デフォルトの設定では、アップロードされたファイルは、8G未満のファイルに対して `320Mb` を超えません。大きなファイルの消費は増加します。 - -セキュリティ考慮事項: 悪意のあるユーザーが任意の S3 URL を指定できる場合、`s3_max_redirects` はゼロに設定して [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) 攻撃を回避する必要があります。または、サーバー設定で `remote_host_filter` を指定する必要もあります。 - -## エンドポイントベースの設定 {#endpoint-settings} - -次の設定は、設定ファイルの指定されたエンドポイントに対して指定できます(URL の完全なプレフィックスによって一致します): - -- `endpoint` — エンドポイントのプレフィックスを指定します。必須です。 -- `access_key_id` と `secret_access_key` — 指定されたエンドポイントで使用する認証情報を指定します。オプションです。 -- `use_environment_credentials` — `true` に設定されている場合、S3 クライアントは指定されたエンドポイントに対して環境変数および [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) メタデータから資格情報を取得しようとします。オプションで、デフォルト値は `false` です。 -- `region` — S3 リージョン名を指定します。オプションです。 -- `use_insecure_imds_request` — `true` に設定されている場合、S3 クライアントは Amazon EC2 メタデータから資格情報を取得する際に非安全な IMDS リクエストを使用します。オプションで、デフォルト値は `false` です。 -- `expiration_window_seconds` — 有効期限に基づく認証情報が失効したかどうかをチェックするための許容期間。オプションで、デフォルト値は `120` です。 -- `no_sign_request` - すべての認証情報を無視してリクエストを署名しないようにします。パブリックバケットへのアクセスに便利です。 -- `header` — 指定された HTTP ヘッダーを指定されたエンドポイントへのリクエストに追加します。オプションで、複数回指定できます。 -- `access_header` - 別のソースからの他の認証情報がない場合に、指定された HTTP ヘッダーを指定されたエンドポイントへのリクエストに追加します。 -- `server_side_encryption_customer_key_base64` — 指定されている場合、S3 オブジェクトに対する SSE-C 暗号化アクセスに必要なヘッダーが設定されます。オプションです。 -- `server_side_encryption_kms_key_id` - 指定されている場合、[SSE-KMS 暗号化](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) で S3 オブジェクトにアクセスするために必要なヘッダーが設定されます。空の文字列が指定された場合、AWS 管理の S3 キーが使用されます。オプションです。 -- `server_side_encryption_kms_encryption_context` - `server_side_encryption_kms_key_id` と一緒に指定された場合、SSE-KMS のための暗号化コンテキストヘッダーが設定されます。オプションです。 -- `server_side_encryption_kms_bucket_key_enabled` - `server_side_encryption_kms_key_id` とともに指定された場合、SSE-KMS のための S3 バケットキーを有効にするヘッダーが設定されます。オプションで、`true` または `false` で指定できます。デフォルトでは何も設定されず(バケットレベルの設定によく合います)。 -- `max_single_read_retries` — シングル読み取り時の最大再試行回数。デフォルト値は `4` です。オプションです。 -- `max_put_rps`, `max_put_burst`, `max_get_rps` および `max_get_burst` - クエリごとの代わりに特定のエンドポイントで使用されるスロットリング設定(上記の説明を参照)。オプションです。 - -**例:** - -```xml - - - https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/ - - - - - - - - - - - - - - - -``` - -## アーカイブの操作 {#working-with-archives} - -次のURIのいくつかのアーカイブファイルがあるとします: - -- 'https://s3-us-west-1.amazonaws.com/umbrella-static/top-1m-2018-01-10.csv.zip' -- 'https://s3-us-west-1.amazonaws.com/umbrella-static/top-1m-2018-01-11.csv.zip' -- 'https://s3-us-west-1.amazonaws.com/umbrella-static/top-1m-2018-01-12.csv.zip' - -これらのアーカイブからデータを抽出することは、:: を使用して可能です。グロブは、URL 部分とアーカイブ内のファイル名に該当する部分の両方で使用できます。 - -```sql -SELECT * -FROM s3( - 'https://s3-us-west-1.amazonaws.com/umbrella-static/top-1m-2018-01-1{0..2}.csv.zip :: *.csv' -); -``` - -:::note -ClickHouse は次の3つのアーカイブ形式をサポートしています: -ZIP -TAR -7Z -ZIP および TAR アーカイブには、サポートされているストレージの場所からアクセスできますが、7Z アーカイブは ClickHouse がインストールされているローカルファイルシステムからのみ読み取ることができます。 -::: - - -## パブリックバケットへのアクセス {#accessing-public-buckets} - -ClickHouse はさまざまなタイプのソースから資格情報を取得しようとします。時々、パブリックなバケットへのアクセス時に問題が発生することがあり、クライアントが `403` エラーコードを返すことがあります。この問題は、`NOSIGN` キーワードを使用して、クライアントがすべての認証情報を無視し、リクエストを署名しないように強制することで回避できます。 - -```sql -CREATE TABLE big_table (name String, value UInt32) - ENGINE = S3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv', NOSIGN, 'CSVWithNames'); -``` - -## パフォーマンスの最適化 {#optimizing-performance} - -s3 関数のパフォーマンスを最適化する詳細については、[こちらの詳細ガイド](/integrations/s3/performance) を参照してください。 - -## 参照 {#see-also} - -- [s3 テーブル関数](../../../sql-reference/table-functions/s3.md) -- [ClickHouse と S3 の統合](/integrations/s3) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md.hash deleted file mode 100644 index b236b4404a0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3.md.hash +++ /dev/null @@ -1 +0,0 @@ -f6c87801dd861bf6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md deleted file mode 100644 index 24d92868d03..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md +++ /dev/null @@ -1,411 +0,0 @@ ---- -description: 'This engine provides integration with the Amazon S3 ecosystem and - allows streaming imports. Similar to the Kafka and RabbitMQ engines, but provides - S3-specific features.' -sidebar_label: 'S3Queue' -sidebar_position: 181 -slug: '/engines/table-engines/integrations/s3queue' -title: 'S3Queue テーブルエンジン' ---- - -import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - - -# S3Queue テーブルエンジン - -このエンジンは [Amazon S3](https://aws.amazon.com/s3/) エコシステムと統合されており、ストリーミングインポートを可能にします。このエンジンは [Kafka](../../../engines/table-engines/integrations/kafka.md) や [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) エンジンに似ていますが、S3固有の機能を提供します。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE s3_queue_engine_table (name String, value UInt32) - ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression], [headers]) - [SETTINGS] - [mode = '',] - [after_processing = 'keep',] - [keeper_path = '',] - [loading_retries = 0,] - [processing_threads_num = 16,] - [parallel_inserts = false,] - [enable_logging_to_queue_log = true,] - [last_processed_path = "",] - [tracked_files_limit = 1000,] - [tracked_file_ttl_sec = 0,] - [polling_min_timeout_ms = 1000,] - [polling_max_timeout_ms = 10000,] - [polling_backoff_ms = 0,] - [cleanup_interval_min_ms = 10000,] - [cleanup_interval_max_ms = 30000,] - [buckets = 0,] - [list_objects_batch_size = 1000,] - [enable_hash_ring_filtering = 0,] - [max_processed_files_before_commit = 100,] - [max_processed_rows_before_commit = 0,] - [max_processed_bytes_before_commit = 0,] - [max_processing_time_sec_before_commit = 0,] -``` - -:::warning -`24.7`未満では、`mode`、`after_processing`、`keeper_path`を除くすべての設定に`s3queue_`プレフィックスを使用する必要があります。 -::: - -**エンジンパラメータ** - -`S3Queue`のパラメータは、`S3`テーブルエンジンがサポートするものと同じです。パラメータセクションの詳細は[こちら](../../../engines/table-engines/integrations/s3.md#parameters)を参照してください。 - -**例** - -```sql -CREATE TABLE s3queue_engine_table (name String, value UInt32) -ENGINE=S3Queue('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/*', 'CSV', 'gzip') -SETTINGS - mode = 'unordered'; -``` - -名前付きコレクションを使用する場合: - -```xml - - - - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/* - test - test - - - -``` - -```sql -CREATE TABLE s3queue_engine_table (name String, value UInt32) -ENGINE=S3Queue(s3queue_conf, format = 'CSV', compression_method = 'gzip') -SETTINGS - mode = 'ordered'; -``` - -## 設定 {#settings} - -テーブルに設定された設定のリストを取得するには、`system.s3_queue_settings`テーブルを使用します。`24.10`から利用可能です。 - -### mode {#mode} - -可能な値: - -- unordered — 順序が保証されないモードでは、すべての処理済みファイルの集合がZooKeeper内の永続ノードで追跡されます。 -- ordered — 順序付きモードでは、ファイルは字典順序で処理されます。つまり、ファイル名が'BBB'のファイルがある時、それに後から追加されたファイル名'AA'は無視されます。成功裏に消費されたファイルの最大名(字典順に意味する)と、処理に失敗し再試行されるファイルの名前のみがZooKeeperに保存されます。 - -デフォルト値: `ordered` は24.6未満のバージョンでは。24.6以降ではデフォルト値はなく、手動で指定する必要があります。以前のバージョンで作成されたテーブルのデフォルト値は互換性のため`Ordered`のままです。 - -### after_processing {#after_processing} - -成功裏に処理した後にファイルを削除するか保持するか。 -可能な値: - -- keep. -- delete. - -デフォルト値: `keep`. - -### keeper_path {#keeper_path} - -ZooKeeper内のパスは、テーブルエンジンの設定として指定するか、グローバル設定から提供されたパスとテーブルのUUIDから形成されたデフォルトパスを使用できます。 -可能な値: - -- String. - -デフォルト値: `/`. - -### s3queue_loading_retries {#loading_retries} - -指定した回数までファイルの読み込みを再試行します。デフォルトでは、リトライはありません。 -可能な値: - -- 正の整数。 - -デフォルト値: `0`. - -### s3queue_processing_threads_num {#processing_threads_num} - -処理を行うスレッドの数。`Unordered`モードのみに適用されます。 - -デフォルト値: CPUの数または16。 - -### s3queue_parallel_inserts {#parallel_inserts} - -デフォルトでは`processing_threads_num`は1つの`INSERT`を生成しますので、ファイルをダウンロードし、複数のスレッドで解析することしかしません。 -しかし、これにより並列性が制限されるので、より良いスループットのためには`parallel_inserts=true`を使用すると、データを並行して挿入できるようになります(ただし、これによりMergeTreeファミリーの生成されるデータパーツの数が増えることに注意してください)。 - -:::note -`INSERT`は`max_process*_before_commit`設定に従って生成されます。 -::: - -デフォルト値: `false`. - -### s3queue_enable_logging_to_s3queue_log {#enable_logging_to_s3queue_log} - -`system.s3queue_log`へのログ記録を有効にします。 - -デフォルト値: `0`. - -### s3queue_polling_min_timeout_ms {#polling_min_timeout_ms} - -ClickHouseが次のポーリング試行を行う前に待機する最小時間(ミリ秒)を指定します。 - -可能な値: - -- 正の整数。 - -デフォルト値: `1000`. - -### s3queue_polling_max_timeout_ms {#polling_max_timeout_ms} - -ClickHouseが次のポーリング試行を開始する前に待機する最大時間(ミリ秒)を定義します。 - -可能な値: - -- 正の整数。 - -デフォルト値: `10000`. - -### s3queue_polling_backoff_ms {#polling_backoff_ms} - -新しいファイルが見つからないときに、前回のポーリング間隔に追加される待機時間を決定します。次のポーリングは、前回の間隔とこのバックオフ値の合計、または最大間隔のうち、いずれか低い方の後に発生します。 - -可能な値: - -- 正の整数。 - -デフォルト値: `0`. - -### s3queue_tracked_files_limit {#tracked_files_limit} - -'unordered'モードの場合、ZooKeeperノードの数を制限できます。'ordered'モードでは何もしません。 -制限に達した場合、最も古い処理済みファイルがZooKeeperノードから削除され、再処理されます。 - -可能な値: - -- 正の整数。 - -デフォルト値: `1000`. - -### s3queue_tracked_file_ttl_sec {#tracked_file_ttl_sec} - -'unordered'モードの場合、ZooKeeperノード内で処理済みファイルを保存する最大秒数(デフォルトでは無限)で、'ordered'モードでは何もしません。 -指定した秒数が経過した後、ファイルは再インポートされます。 - -可能な値: - -- 正の整数。 - -デフォルト値: `0`. - -### s3queue_cleanup_interval_min_ms {#cleanup_interval_min_ms} - -'Ordered'モードの場合。バックグラウンドタスクの再スケジュール間隔の最小境界を定義します。このタスクは、追跡されたファイルのTTLと最大追跡ファイルセットを維持する役割を果たします。 - -デフォルト値: `10000`. - -### s3queue_cleanup_interval_max_ms {#cleanup_interval_max_ms} - -'Ordered'モードの場合。バックグラウンドタスクの再スケジュール間隔の最大境界を定義します。このタスクは、追跡されたファイルのTTLと最大追跡ファイルセットを維持する役割を果たします。 - -デフォルト値: `30000`. - -### s3queue_buckets {#buckets} - -'Ordered'モードの場合。`24.6`以降から利用可能です。S3Queueテーブルの複数のレプリカがあり、いずれも同じメタデータディレクトリを保持している場合、`s3queue_buckets`の値は少なくともレプリカの数と同じである必要があります。`s3queue_processing_threads`設定も使用する場合は、`s3queue_buckets`の設定値をさらに増加させることが合理的です。これは、`S3Queue`の処理の実際の並行性を定義します。 - -## S3関連の設定 {#s3-settings} - -エンジンはすべてのS3関連の設定をサポートしています。S3設定の詳細については[こちら](../../../engines/table-engines/integrations/s3.md)を参照してください。 - -## S3 ロールベースアクセス {#s3-role-based-access} - - - -s3Queueテーブルエンジンはロールベースのアクセスをサポートしています。 -バケットにアクセスするためのロールを設定する手順については、[こちらのドキュメント](/cloud/security/secure-s3)を参照してください。 - -ロールが設定されると、`roleARN`を下記のように`extra_credentials`パラメータを介して渡すことができます: -```sql -CREATE TABLE s3_table -( - ts DateTime, - value UInt64 -) -ENGINE = S3Queue( - 'https:///*.csv', - extra_credentials(role_arn = 'arn:aws:iam::111111111111:role/') - ,'CSV') -SETTINGS - ... -``` - -## S3Queue オーダーモード {#ordered-mode} - -`S3Queue`処理モードは、ZooKeeper内のメタデータをより少なく保存することを可能にしますが、時間的に後から追加されたファイルがアルファベット順に大きい名前を持つ必要があるという制限があります。 - -`S3Queue`の`ordered`モードは、`unordered`モードと同様に`(s3queue_)processing_threads_num`設定をサポートしています(`s3queue_`プレフィックスはオプショナルです)。この設定により、サーバー上で`S3`ファイルの処理を行うスレッドの数を制御できます。 -さらに、`ordered`モードは`(s3queue_)buckets`と呼ばれる別の設定も導入しています。これは「論理スレッド」を意味します。これは分散シナリオでのことで、`S3Queue`テーブルのレプリカが複数のサーバー上に存在し、この設定が処理ユニットの数を定義します。例として、各`S3Queue`レプリカの各処理スレッドが特定のファイル処理のために特定の`bucket`をロックしようとします。各`bucket`はファイル名のハッシュによって特定のファイルに割り当てられます。したがって、分散シナリオにおいては、`(s3queue_)buckets`設定がレプリカの数と同じ、またはそれ以上であることが強く推奨されます。この設定はレプリカの数よりも多くても問題ありません。最も最適なシナリオは、`(s3queue_)buckets`設定が`number_of_replicas`と`(s3queue_)processing_threads_num`の掛け算に等しいことです。 -`(s3queue_)processing_threads_num`設定はバージョン`24.6`以前では使用が推奨されていません。 -`(s3queue_)buckets`設定はバージョン`24.6`以降から利用可能です。 - -## 説明 {#description} - -`SELECT`はストリーミングインポートにはそれほど有用ではありません(デバッグを除く)、なぜなら各ファイルは一度だけインポートできるからです。したがって、指定されたS3のパスからデータストリームとして消費するためのテーブルを作成するのがより実用的です。 -1. エンジンを使用してS3内の指定パスから消費するためのテーブルを作成し、それをデータストリームと見なします。 -2. 必要な構造でテーブルを作成します。 -3. エンジンからデータを変換し、事前に作成されたテーブルに格納するマテリアライズドビューを作成します。 - -`MATERIALIZED VIEW`がエンジンと接続すると、バックグラウンドでデータを収集し始めます。 - -例: - -```sql - CREATE TABLE s3queue_engine_table (name String, value UInt32) - ENGINE=S3Queue('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/*', 'CSV', 'gzip') - SETTINGS - mode = 'unordered'; - - CREATE TABLE stats (name String, value UInt32) - ENGINE = MergeTree() ORDER BY name; - - CREATE MATERIALIZED VIEW consumer TO stats - AS SELECT name, value FROM s3queue_engine_table; - - SELECT * FROM stats ORDER BY name; -``` - -## 仮想カラム {#virtual-columns} - -- `_path` — ファイルへのパス。 -- `_file` — ファイル名。 - -仮想カラムに関する詳細は[こちら](../../../engines/table-engines/index.md#table_engines-virtual_columns)を参照してください。 - -## パス内のワイルドカード {#wildcards-in-path} - -`path`引数は、bash風のワイルドカードを使用して複数のファイルを指定できます。処理されるファイルは存在し、全体のパスパターンと一致している必要があります。ファイルのリストは`SELECT`の際に決定され(`CREATE`時ではありません)。 - -- `*` — `/`を除く任意の文字の数を表し、空文字列も含まれます。 -- `**` — `/`を含む任意の字符の数を表し、空文字列も含まれます。 -- `?` — 任意の単一文字を表します。 -- `{some_string,another_string,yet_another_one}` — 任意の文字列`'some_string', 'another_string', 'yet_another_one'`を表します。 -- `{N..M}` — NからMまでの範囲内の任意の数を表し、両端を含みます。NおよびMには先頭ゼロを含めることができます(例: `000..078`)。 - -`{}`を使用した構文は、[remote](../../../sql-reference/table-functions/remote.md)テーブル関数に似ています。 - -## 制限事項 {#limitations} - -1. 重複行が発生する可能性がある理由: - -- ファイル処理の途中で解析中に例外が発生し、リトライが`s3queue_loading_retries`で有効になっている場合。 - -- `S3Queue`が複数のサーバーで設定されており、同じパスのZooKeeperを指している場合、処理されたファイルのコミットが完了する前にキーパーセッションが期限切れになり、別のサーバーがファイル処理を引き継ぐことにより、最初のサーバーによって部分的または完全に処理されたファイルの処理が行われる可能性があります。 - -- サーバーの異常終了。 - -2. `S3Queue`が複数のサーバーで設定され、同じパスのZooKeeperを指している場合、`Ordered`モードが使用されると、`s3queue_loading_retries`は機能しません。これはすぐに修正される予定です。 - -## 内部構造の把握 {#introspection} - -内部構造を把握するには、`system.s3queue`のステートレステーブルと`system.s3queue_log`の永続テーブルを使用します。 - -1. `system.s3queue`。このテーブルは永続的でなく、現在処理中の`S3Queue`のメモリ内状態を表示します:現在どのファイルが処理中か、どのファイルが処理済みまたは失敗したか。 - -```sql -┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ CREATE TABLE system.s3queue -( - `database` String, - `table` String, - `file_name` String, - `rows_processed` UInt64, - `status` String, - `processing_start_time` Nullable(DateTime), - `processing_end_time` Nullable(DateTime), - `ProfileEvents` Map(String, UInt64), - `exception` String -) -ENGINE = SystemS3Queue -COMMENT 'Contains in-memory state of S3Queue metadata and currently processed rows per file.' │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -例: - -```sql - -SELECT * -FROM system.s3queue - -Row 1: -────── -zookeeper_path: /clickhouse/s3queue/25ea5621-ae8c-40c7-96d0-cec959c5ab88/3b3f66a1-9866-4c2e-ba78-b6bfa154207e -file_name: wikistat/original/pageviews-20150501-030000.gz -rows_processed: 5068534 -status: Processed -processing_start_time: 2023-10-13 13:09:48 -processing_end_time: 2023-10-13 13:10:31 -ProfileEvents: {'ZooKeeperTransactions':3,'ZooKeeperGet':2,'ZooKeeperMulti':1,'SelectedRows':5068534,'SelectedBytes':198132283,'ContextLock':1,'S3QueueSetFileProcessingMicroseconds':2480,'S3QueueSetFileProcessedMicroseconds':9985,'S3QueuePullMicroseconds':273776,'LogTest':17} -exception: -``` - -2. `system.s3queue_log`。永続テーブル。`system.s3queue`と同じ情報を持ちますが、`processed`および`failed`ファイルについてです。 - -テーブルは以下の構造を持っています: - -```sql -SHOW CREATE TABLE system.s3queue_log - -Query id: 0ad619c3-0f2a-4ee4-8b40-c73d86e04314 - -┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ CREATE TABLE system.s3queue_log -( - `event_date` Date, - `event_time` DateTime, - `table_uuid` String, - `file_name` String, - `rows_processed` UInt64, - `status` Enum8('Processed' = 0, 'Failed' = 1), - `processing_start_time` Nullable(DateTime), - `processing_end_time` Nullable(DateTime), - `ProfileEvents` Map(String, UInt64), - `exception` String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(event_date) -ORDER BY (event_date, event_time) -SETTINGS index_granularity = 8192 │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -`system.s3queue_log`を使用するためには、その設定をサーバーの設定ファイルに定義する必要があります: - -```xml - - system - s3queue_log
-
-``` - -例: - -```sql -SELECT * -FROM system.s3queue_log - -Row 1: -────── -event_date: 2023-10-13 -event_time: 2023-10-13 13:10:12 -table_uuid: -file_name: wikistat/original/pageviews-20150501-020000.gz -rows_processed: 5112621 -status: Processed -processing_start_time: 2023-10-13 13:09:48 -processing_end_time: 2023-10-13 13:10:12 -ProfileEvents: {'ZooKeeperTransactions':3,'ZooKeeperGet':2,'ZooKeeperMulti':1,'SelectedRows':5112621,'SelectedBytes':198577687,'ContextLock':1,'S3QueueSetFileProcessingMicroseconds':1934,'S3QueueSetFileProcessedMicroseconds':17063,'S3QueuePullMicroseconds':5841972,'LogTest':17} -exception: -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md.hash deleted file mode 100644 index f094dbd2c1f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/s3queue.md.hash +++ /dev/null @@ -1 +0,0 @@ -f5fbb18ef240a13a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md deleted file mode 100644 index 83d49b7699b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: 'The engine allows to import and export data to SQLite and supports - queries to SQLite tables directly from ClickHouse.' -sidebar_label: 'SQLite' -sidebar_position: 185 -slug: '/engines/table-engines/integrations/sqlite' -title: 'SQLite' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# SQLite - - - -このエンジンは、SQLiteへのデータのインポートおよびエクスポートを可能にし、ClickHouseからSQLiteテーブルへのクエリをサポートします。 - -## テーブルの作成 {#creating-a-table} - -```sql - CREATE TABLE [IF NOT EXISTS] [db.]table_name - ( - name1 [type1], - name2 [type2], ... - ) ENGINE = SQLite('db_path', 'table') -``` - -**エンジンパラメータ** - -- `db_path` — データベースを持つSQLiteファイルのパス。 -- `table` — SQLiteデータベース内のテーブルの名前。 - -## 使用例 {#usage-example} - -SQLiteテーブルを作成するクエリを示します: - -```sql -SHOW CREATE TABLE sqlite_db.table2; -``` - -```text -CREATE TABLE SQLite.table2 -( - `col1` Nullable(Int32), - `col2` Nullable(String) -) -ENGINE = SQLite('sqlite.db','table2'); -``` - -テーブルからデータを返します: - -```sql -SELECT * FROM sqlite_db.table2 ORDER BY col1; -``` - -```text -┌─col1─┬─col2──┐ -│ 1 │ text1 │ -│ 2 │ text2 │ -│ 3 │ text3 │ -└──────┴───────┘ -``` - -**関連情報** - -- [SQLite](../../../engines/database-engines/sqlite.md) エンジン -- [sqlite](../../../sql-reference/table-functions/sqlite.md) テーブル関数 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md.hash deleted file mode 100644 index 3ef046bf39e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md.hash +++ /dev/null @@ -1 +0,0 @@ -39c98bbcdf7cebc3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md deleted file mode 100644 index 795ae30c2f7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -description: 'A table engine storing time series, i.e. a set of values associated - with timestamps and tags (or labels).' -sidebar_label: 'TimeSeries' -sidebar_position: 60 -slug: '/engines/table-engines/special/time_series' -title: 'TimeSeries Engine' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# TimeSeries Engine - - - - -タイムシリーズ、すなわちタイムスタンプとタグ(またはラベル)に関連付けられた値のセットを格納するテーブルエンジン: - -```sql -metric_name1[tag1=value1, tag2=value2, ...] = {timestamp1: value1, timestamp2: value2, ...} -metric_name2[...] = ... -``` - -:::info -これは実験的な機能であり、将来のリリースで後方互換性のない方法で変更される可能性があります。 -[allow_experimental_time_series_table](/operations/settings/settings#allow_experimental_time_series_table) 設定を使用して、TimeSeriesテーブルエンジンの使用を有効にします。 -コマンド `set allow_experimental_time_series_table = 1` を入力します。 -::: - -## Syntax {#syntax} - -```sql -CREATE TABLE name [(columns)] ENGINE=TimeSeries -[SETTINGS var1=value1, ...] -[DATA db.data_table_name | DATA ENGINE data_table_engine(arguments)] -[TAGS db.tags_table_name | TAGS ENGINE tags_table_engine(arguments)] -[METRICS db.metrics_table_name | METRICS ENGINE metrics_table_engine(arguments)] -``` - -## Usage {#usage} - -すべてがデフォルトで設定される状態から始める方が簡単です(カラムのリストを指定せずに `TimeSeries` テーブルを作成することが許可されます): - -```sql -CREATE TABLE my_table ENGINE=TimeSeries -``` - -その後、このテーブルは以下のプロトコルで使用できます(ポートはサーバー設定で割り当てる必要があります): -- [prometheus remote-write](../../../interfaces/prometheus.md#remote-write) -- [prometheus remote-read](../../../interfaces/prometheus.md#remote-read) - -## Target tables {#target-tables} - -`TimeSeries` テーブルは独自のデータを持っておらず、すべてのデータはターゲットテーブルに保存されています。 -これは [materialized view](../../../sql-reference/statements/create/view#materialized-view) の動作に似ていますが、 -materialized view は1つのターゲットテーブルであるのに対し、`TimeSeries` テーブルは [data](#data-table)、[tags](#tags-table)、および [metrics](#metrics-table) という名前の3つのターゲットテーブルを持っています。 - -ターゲットテーブルは `CREATE TABLE` クエリで明示的に指定することもでき、 -`TimeSeries` テーブルエンジンは内部のターゲットテーブルを自動的に生成することもできます。 - -ターゲットテーブルは以下です: - -### Data table {#data-table} - -_data_ テーブルは、特定の識別子に関連付けられたタイムシリーズを含みます。 - -_data_ テーブルは次のカラムを持つ必要があります: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `id` | [x] | `UUID` | いずれでもよい | メトリック名とタグの組み合わせを識別します | -| `timestamp` | [x] | `DateTime64(3)` | `DateTime64(X)` | 時間ポイント | -| `value` | [x] | `Float64` | `Float32` または `Float64` | `timestamp` に関連付けられた値 | - - -### Tags table {#tags-table} - -_tags_ テーブルは、メトリック名とタグの組み合わせごとに計算された識別子を含んでいます。 - -_tags_ テーブルは次のカラムを持つ必要があります: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `id` | [x] | `UUID` | いずれでもよい([data](#data-table) テーブルの `id` の型と一致する必要があります) | `id` はメトリック名とタグの組み合わせを識別します。DEFAULT式はそのような識別子を計算する方法を指定します。 | -| `metric_name` | [x] | `LowCardinality(String)` | `String` または `LowCardinality(String)` | メトリックの名前 | -| `` | [ ] | `String` | `String` または `LowCardinality(String)` または `LowCardinality(Nullable(String))` | 特定のタグの値、タグの名前と対応するカラムの名前は [tags_to_columns](#settings) 設定で指定されます | -| `tags` | [x] | `Map(LowCardinality(String), String)` | `Map(String, String)` または `Map(LowCardinality(String), String)` または `Map(LowCardinality(String), LowCardinality(String))` | `__name__` タグを除くメトリックの名前を含むタグのマップ、[tags_to_columns](#settings) 設定で列挙された名前のタグを除外します | -| `all_tags` | [ ] | `Map(String, String)` | `Map(String, String)` または `Map(LowCardinality(String), String)` または `Map(LowCardinality(String), LowCardinality(String))` | 一時カラム、各行はメトリックの名前を含むすべてのタグのマップです。このカラムの唯一の目的は `id` を計算する際に使用されることです。 | -| `min_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` または `Nullable(DateTime64(X))` | その `id` を持つタイムシリーズの最小タイムスタンプ。このカラムは [store_min_time_and_max_time](#settings) が `true` の場合に作成されます。 | -| `max_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` または `Nullable(DateTime64(X))` | その `id` を持つタイムシリーズの最小タイムスタンプ。このカラムは [store_min_time_and_max_time](#settings) が `true` の場合に作成されます。 | - -### Metrics table {#metrics-table} - -_metrics_ テーブルは、収集されたメトリックについての情報、メトリックの種類、およびその説明を含みます。 - -_metrics_ テーブルは次のカラムを持つ必要があります: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `metric_family_name` | [x] | `String` | `String` または `LowCardinality(String)` | メトリックファミリの名前 | -| `type` | [x] | `String` | `String` または `LowCardinality(String)` | メトリックファミリのタイプ、"counter"、"gauge"、"summary"、"stateset"、"histogram"、"gaugehistogram" のいずれか | -| `unit` | [x] | `String` | `String` または `LowCardinality(String)` | メトリックで使用される単位 | -| `help` | [x] | `String` | `String` または `LowCardinality(String)` | メトリックの説明 | - -`TimeSeries` テーブルに挿入されたすべての行は、実際にはこれらの3つのターゲットテーブルに格納されます。 -`TimeSeries` テーブルには、[data](#data-table)、[tags](#tags-table)、[metrics](#metrics-table) テーブルからこれらのすべてのカラムが含まれます。 - -## Creation {#creation} - -`TimeSeries` テーブルエンジンを使用してテーブルを作成する方法はいくつかあります。 -最も簡単なステートメントは次の通りです。 - -```sql -CREATE TABLE my_table ENGINE=TimeSeries -``` - -実際には、以下のテーブルが作成されます(`SHOW CREATE TABLE my_table` を実行すると確認できます): - -```sql -CREATE TABLE my_table -( - `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), - `timestamp` DateTime64(3), - `value` Float64, - `metric_name` LowCardinality(String), - `tags` Map(LowCardinality(String), String), - `all_tags` Map(String, String), - `min_time` Nullable(DateTime64(3)), - `max_time` Nullable(DateTime64(3)), - `metric_family_name` String, - `type` String, - `unit` String, - `help` String -) -ENGINE = TimeSeries -DATA ENGINE = MergeTree ORDER BY (id, timestamp) -DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -TAGS ENGINE = AggregatingMergeTree PRIMARY KEY metric_name ORDER BY (metric_name, id) -TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -METRICS ENGINE = ReplacingMergeTree ORDER BY metric_family_name -METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -``` - -したがって、カラムは自動的に生成され、また、このステートメントには作成された各内部ターゲットテーブルに対する1つの内部UUIDが含まれています。 -(内部UUIDは通常、設定された場合を除いて表示されません。 -[show_table_uuid_in_table_create_query_if_not_nil](../../../operations/settings/settings#show_table_uuid_in_table_create_query_if_not_nil) が設定されている場合。) - -内部ターゲットテーブルの名前は、`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`、`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`、`.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` のようになり、 -各ターゲットテーブルには、その主な `TimeSeries` テーブルのカラムのサブセットが含まれます: - -```sql -CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID, - `timestamp` DateTime64(3), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp) -``` - -```sql -CREATE TABLE default.`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), - `metric_name` LowCardinality(String), - `tags` Map(LowCardinality(String), String), - `all_tags` Map(String, String) EPHEMERAL, - `min_time` SimpleAggregateFunction(min, Nullable(DateTime64(3))), - `max_time` SimpleAggregateFunction(max, Nullable(DateTime64(3))) -) -ENGINE = AggregatingMergeTree -PRIMARY KEY metric_name -ORDER BY (metric_name, id) -``` - -```sql -CREATE TABLE default.`.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `metric_family_name` String, - `type` String, - `unit` String, - `help` String -) -ENGINE = ReplacingMergeTree -ORDER BY metric_family_name -``` - -## Adjusting types of columns {#adjusting-column-types} - -内部ターゲットテーブルのほとんどのカラムの型を、メインテーブルを定義する際に明示的に指定することによって調整できます。 -たとえば、 - -```sql -CREATE TABLE my_table -( - timestamp DateTime64(6) -) ENGINE=TimeSeries -``` - -は、内部の [data](#data-table) テーブルがミリ秒ではなくマイクロ秒でタイムスタンプを格納するようにします: - -```sql -CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID, - `timestamp` DateTime64(6), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp) -``` - -## The `id` column {#id-column} - -`id` カラムは識別子を含み、各識別子はメトリック名とタグの組み合わせのために計算されます。 -`id` カラムのデフォルト式は、そのような識別子を計算するために使用される式です。 -`id` カラムの型ともその式は、明示的に指定することによって調整できます: - -```sql -CREATE TABLE my_table -( - id UInt64 DEFAULT sipHash64(metric_name, all_tags) -) ENGINE=TimeSeries -``` - -## The `tags` and `all_tags` columns {#tags-and-all-tags} - -`tags` と `all_tags` の2つのカラムがあります。これらはタグのマップを含みます。この例では同じ意味ですが、 -`tags_to_columns` 設定が使用される場合には異なることがあります。この設定は、特定のタグをマップ内に格納する代わりに、別のカラムに格納することを指定できます: - -```sql -CREATE TABLE my_table ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} -``` - -このステートメントは、両方の `my_table` とその内部 [tags](#tags-table) ターゲットテーブルの定義に次のカラムを追加します。 -```sql - `instance` String, - `job` String -``` -この場合、`tags` カラムには `instance` と `job` タグは含まれませんが、`all_tags` カラムには含まれます。`all_tags` カラムは一時的なもので、その唯一の目的は `id` カラムのデフォルト式で使用されることです。 - -カラムの型は明示的に指定することによって調整できます: - -```sql -CREATE TABLE my_table (instance LowCardinality(String), job LowCardinality(Nullable(String))) -ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} -``` - -## Table engines of inner target tables {#inner-table-engines} - -デフォルトでは、内部ターゲットテーブルは以下のテーブルエンジンを使用します: -- [data](#data-table) テーブルは [MergeTree](../mergetree-family/mergetree) を使用します。 -- [tags](#tags-table) テーブルは [AggregatingMergeTree](../mergetree-family/aggregatingmergetree) を使用します。これは、同じデータがこのテーブルに何度も挿入されるため、重複を削除する方法が必要であり、また `min_time` および `max_time` カラムの集計を行うために必要です。 -- [metrics](#metrics-table) テーブルは [ReplacingMergeTree](../mergetree-family/replacingmergetree) を使用します。これは、同じデータがこのテーブルに何度も挿入されるため、重複を削除する方法が必要です。 - -他のテーブルエンジンも、明示的に指定すれば内部ターゲットテーブルで使用できます: - -```sql -CREATE TABLE my_table ENGINE=TimeSeries -DATA ENGINE=ReplicatedMergeTree -TAGS ENGINE=ReplicatedAggregatingMergeTree -METRICS ENGINE=ReplicatedReplacingMergeTree -``` - -## External target tables {#external-target-tables} - -手動で作成したテーブルを使用する `TimeSeries` テーブルを作成することも可能です: - -```sql -CREATE TABLE data_for_my_table -( - `id` UUID, - `timestamp` DateTime64(3), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp); - -CREATE TABLE tags_for_my_table ... - -CREATE TABLE metrics_for_my_table ... - -CREATE TABLE my_table ENGINE=TimeSeries DATA data_for_my_table TAGS tags_for_my_table METRICS metrics_for_my_table; -``` - -## Settings {#settings} - -ここに、`TimeSeries` テーブルを定義する際に指定できる設定のリストがあります: - -| Name | Type | Default | Description | -|---|---|---|---| -| `tags_to_columns` | Map | {} | 特定のタグを [tags](#tags-table) テーブルの別々のカラムに入れるべきかを指定するマップ。構文: `{'tag1': 'column1', 'tag2' : column2, ...}` | -| `use_all_tags_column_to_generate_id` | Bool | true | タイムシリーズの識別子を計算するための式を生成する際、このフラグは `all_tags` カラムをその計算に使用することを有効にします。 | -| `store_min_time_and_max_time` | Bool | true | `true` に設定すると、テーブルは各タイムシリーズの `min_time` と `max_time` を保存します。 | -| `aggregate_min_time_and_max_time` | Bool | true | 内部ターゲット `tags` テーブルを作成する際に、このフラグは `min_time` カラムの型として `SimpleAggregateFunction(min, Nullable(DateTime64(3)))` 使用することを可能にします。同様に `max_time` カラムにも適用されます。 | -| `filter_by_min_time_and_max_time` | Bool | true | `true` に設定すると、テーブルはタイムシリーズのフィルタリングに `min_time` および `max_time` カラムを使用します。 | - - -# Functions {#functions} - -以下は、`TimeSeries` テーブルを引数としてサポートする関数のリストです: -- [timeSeriesData](../../../sql-reference/table-functions/timeSeriesData.md) -- [timeSeriesTags](../../../sql-reference/table-functions/timeSeriesTags.md) -- [timeSeriesMetrics](../../../sql-reference/table-functions/timeSeriesMetrics.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md.hash deleted file mode 100644 index c40945511d2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/time-series.md.hash +++ /dev/null @@ -1 +0,0 @@ -47ca1152facf1ab3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md deleted file mode 100644 index aa7e971de30..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: 'Log Engine Familyのドキュメント' -sidebar_label: 'ログファミリー' -sidebar_position: 20 -slug: '/engines/table-engines/log-family/' -title: 'Log Engine Family' ---- - - - - -# Log Engine Family - -これらのエンジンは、多くの小さなテーブル(約100万行まで)を迅速に書き込み、その後全体として読み取る必要があるシナリオ向けに開発されました。 - -ファミリーのエンジン: - -| Log Engines | -|---------------------------------------------------------------------| -| [StripeLog](/engines/table-engines/log-family/stripelog.md) | -| [Log](/engines/table-engines/log-family/log.md) | -| [TinyLog](/engines/table-engines/log-family/tinylog.md) | - -`Log`ファミリーのテーブルエンジンは、[HDFS](/engines/table-engines/integrations/hdfs)または[S3](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3)の分散ファイルシステムにデータを保存できます。 - -:::warning このエンジンはログデータ用ではありません。 -名前に反して、*Logテーブルエンジンはログデータの保存を目的としたものではありません。 迅速に書き込む必要がある小規模なボリュームにのみ使用するべきです。 -::: - -## Common Properties {#common-properties} - -エンジンの特性: - -- ディスクにデータを保存します。 - -- 書き込み時にファイルの末尾にデータを追加します。 - -- 同時データアクセスのためのロックをサポートしています。 - - `INSERT`クエリの間、テーブルはロックされ、他のデータの読み書きクエリはテーブルのロック解除を待機します。データ書き込みクエリがない場合、任意の数のデータ読み込みクエリを同時に実行できます。 - -- [ミューテーション](/sql-reference/statements/alter#mutations)をサポートしていません。 - -- インデックスをサポートしていません。 - - これは、データの範囲に対する`SELECT`クエリが効率的でないことを意味します。 - -- データを原子性で書き込みません。 - - 書き込み操作が破損した場合(例えば、異常なサーバーシャットダウン)、破損したデータを持つテーブルが得られる可能性があります。 - -## Differences {#differences} - -`TinyLog`エンジンはファミリーの中で最も単純で、最も機能が限られ、効率が低いです。`TinyLog`エンジンは、単一のクエリ内で複数のスレッドによる並列データ読み込みをサポートしていません。データを読む速度は、単一のクエリからの並列読み込みをサポートしているファミリーの他のエンジンよりも遅く、各カラムを別々のファイルに保存するため、`Log`エンジンとほぼ同じ数のファイルディスクリプタを使用します。単純なシナリオでのみ使用してください。 - -`Log`および`StripeLog`エンジンは並列データ読み込みをサポートしています。データを読み取る際、ClickHouseは複数のスレッドを使用します。各スレッドは別々のデータブロックを処理します。`Log`エンジンはテーブルの各カラムに対して別々のファイルを使用します。`StripeLog`はすべてのデータを1つのファイルに保存します。その結果、`StripeLog`エンジンはファイルディスクリプタの数が少なくなりますが、データを読み込む際の効率は`Log`エンジンの方が高いです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md.hash deleted file mode 100644 index e657cc8536d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -26a2ea3004cc8f4a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md deleted file mode 100644 index 9c4fba4f35f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: 'Logのドキュメント' -slug: '/engines/table-engines/log-family/log' -toc_priority: 33 -toc_title: 'Log' -title: 'Log' ---- - - - - -# Log - -このエンジンは `Log` エンジンファミリーに属しています。 `Log` エンジンの一般的なプロパティと、[Log Engine Family](../../../engines/table-engines/log-family/index.md) 記事におけるその違いを参照してください。 - -`Log` は、[TinyLog](../../../engines/table-engines/log-family/tinylog.md) と異なり、カラムファイルと共に小さなファイルの「マーク」が存在します。これらのマークは各データブロックに書き込まれ、指定された行数をスキップするためにファイルを読み始めるオフセットを含んでいます。これにより、複数のスレッドでテーブルデータを読み取ることが可能になります。 -同時データアクセスの場合、読み取り操作は同時に行うことができ、書き込み操作は読み取りや他の書き込みをブロックします。 -`Log` エンジンはインデックスをサポートしていません。同様に、テーブルへの書き込みが失敗した場合、テーブルは壊れ、そこからの読み取りはエラーを返します。`Log` エンジンは、一時的データ、一度書き込みテーブル、またはテストやデモ目的に適しています。 - -## テーブルの作成 {#table_engines-log-creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = Log -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -## データの書き込み {#table_engines-log-writing-the-data} - -`Log` エンジンは、各カラムをそれぞれのファイルに書き込むことによってデータを効率的に格納します。各テーブルについて、Log エンジンは指定されたストレージパスに次のファイルを作成します: - -- `.bin`: 各カラムのデータファイルで、シリアライズされた圧縮データを含んでいます。 -`__marks.mrk`: 各データブロックに挿入されたオフセットと行数を格納するマークファイルです。マークは、エンジンが不必要なデータブロックをスキップして効率的にクエリを実行できるようにするために使用されます。 - -### 書き込みプロセス {#writing-process} - -`Log` テーブルにデータが書き込まれる際は、次の手順が行われます: - -1. データがブロックにシリアライズされて圧縮されます。 -2. 各カラムについて、圧縮データがそれぞれの `.bin` ファイルに追加されます。 -3. 新しく挿入されたデータのオフセットと行数を記録するために、`__marks.mrk` ファイルに対応するエントリが追加されます。 - -## データの読み取り {#table_engines-log-reading-the-data} - -マークのあるファイルにより、ClickHouse はデータの並行読み取りを実現します。つまり、`SELECT` クエリは予測できない順序で行を返します。`ORDER BY` 句を使用して行をソートしてください。 - -## 使用例 {#table_engines-log-example-of-use} - -テーブルの作成: - -```sql -CREATE TABLE log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = Log -``` - -データの挿入: - -```sql -INSERT INTO log_table VALUES (now(),'REGULAR','The first regular message') -INSERT INTO log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') -``` - -私たちは二つの `INSERT` クエリを使用して、`.bin` ファイル内に二つのデータブロックを作成しました。 - -ClickHouse は、データを選択する際に複数のスレッドを使用します。各スレッドが独立して結果行を返すため、出力の行のブロックの順序は、入力の同じブロックの順序と一致しない場合があります。例: - -```sql -SELECT * FROM log_table -``` - -```text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -┌───────────timestamp─┬─message_type─┬─message───────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -└─────────────────────┴──────────────┴───────────────────────────┘ -``` - -結果をソートする(デフォルトでは昇順): - -```sql -SELECT * FROM log_table ORDER BY timestamp -``` - -```text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md.hash deleted file mode 100644 index a32fa19f370..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/log.md.hash +++ /dev/null @@ -1 +0,0 @@ -0e206bcbab741ace diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md deleted file mode 100644 index cedc5fe3610..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: 'StripeLog のドキュメント' -slug: '/engines/table-engines/log-family/stripelog' -toc_priority: 32 -toc_title: 'StripeLog' -title: 'StripeLog' ---- - - - - -# StripeLog - -このエンジンはログエンジンのファミリーに属します。ログエンジンの一般的な特性とその違いについては、[Log Engine Family](../../../engines/table-engines/log-family/index.md)の記事を参照してください。 - -このエンジンは、少量のデータ(1百万行未満)で多くのテーブルを書き込む必要があるシナリオで使用します。たとえば、このテーブルは原子的な処理が必要な変換のために、着信データバッチを保存するのに使用できます。ClickHouseサーバーには100kインスタンスのこのテーブルタイプが適しており、高数のテーブルが必要な場合には[Log](./log.md)よりもこのテーブルエンジンを選択するべきです。これは読み込み効率を犠牲にします。 - -## テーブルの作成 {#table_engines-stripelog-creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = StripeLog -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -## データの書き込み {#table_engines-stripelog-writing-the-data} - -`StripeLog`エンジンは、すべてのカラムを1つのファイルに格納します。各`INSERT`クエリに対して、ClickHouseはテーブルファイルの末尾にデータブロックを追加し、カラムを1つずつ書き込みます。 - -各テーブルに対してClickHouseは次のファイルを作成します: - -- `data.bin` — データファイル。 -- `index.mrk` — マークファイル。マークには、挿入された各データブロックの各カラムのオフセットが含まれています。 - -`StripeLog`エンジンは`ALTER UPDATE`および`ALTER DELETE`操作をサポートしていません。 - -## データの読み込み {#table_engines-stripelog-reading-the-data} - -マークファイルにより、ClickHouseはデータの読み込みを並列化できます。これにより、`SELECT`クエリは予測不可能な順序で行を返します。行をソートするには、`ORDER BY`句を使用します。 - -## 使用例 {#table_engines-stripelog-example-of-use} - -テーブルの作成: - -```sql -CREATE TABLE stripe_log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = StripeLog -``` - -データの挿入: - -```sql -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','最初の通常メッセージ') -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','2番目の通常メッセージ'),(now(),'WARNING','最初の警告メッセージ') -``` - -私たちは2つの`INSERT`クエリを使用して、`data.bin`ファイル内に2つのデータブロックを作成しました。 - -ClickHouseはデータ選択時に複数のスレッドを使用します。各スレッドは別々のデータブロックを読み込み、終了するたびに結果の行を独立して返します。そのため、出力の行のブロックの順序は、通常、入力の同じブロックの順序と一致しません。たとえば: - -```sql -SELECT * FROM stripe_log_table -``` - -```text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:27:32 │ REGULAR │ 2番目の通常メッセージ │ -│ 2019-01-18 14:34:53 │ WARNING │ 最初の警告メッセージ │ -└─────────────────────┴──────────────┴────────────────────────────┘ -┌───────────timestamp─┬─message_type─┬─message───────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ 最初の通常メッセージ │ -└─────────────────────┴──────────────┴───────────────────────────┘ -``` - -結果のソート(デフォルトでは昇順): - -```sql -SELECT * FROM stripe_log_table ORDER BY timestamp -``` - -```text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ 最初の通常メッセージ │ -│ 2019-01-18 14:27:32 │ REGULAR │ 2番目の通常メッセージ │ -│ 2019-01-18 14:34:53 │ WARNING │ 最初の警告メッセージ │ -└─────────────────────┴──────────────┴────────────────────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md.hash deleted file mode 100644 index 6a58cede329..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/stripelog.md.hash +++ /dev/null @@ -1 +0,0 @@ -288f1e7c2c04bccd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md deleted file mode 100644 index 8f66a02966c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: 'TinyLogのドキュメント' -slug: '/engines/table-engines/log-family/tinylog' -toc_priority: 34 -toc_title: 'TinyLog' -title: 'TinyLog' ---- - - - - -# TinyLog - -このエンジンは、ログエンジンファミリーに属します。ログエンジンの共通の特性や違いについては、[Log Engine Family](../../../engines/table-engines/log-family/index.md) を参照してください。 - -このテーブルエンジンは、一般的に書き込み一回のメソッドで使用されます:データを書き込んだら、必要に応じて何度でも読み取ります。例えば、`TinyLog`タイプのテーブルを、少量バッチで処理される中間データに使用できます。小さなテーブルを多数保持することは非効率であることに注意してください。 - -クエリは単一のストリームで実行されます。言い換えれば、このエンジンは比較的に小さなテーブル(約1,000,000行まで)を想定しています。多くの小さなテーブルを持っている場合には、このテーブルエンジンを使用するのが理にかなっています。なぜなら、[Log](../../../engines/table-engines/log-family/log.md)エンジンよりも簡単で(開く必要のあるファイルが少ないため)、管理が容易だからです。 - -## Characteristics {#characteristics} - -- **シンプルな構造**: Logエンジンとは異なり、TinyLogはマークファイルを使用しません。これにより複雑さが軽減されますが、大規模データセットのパフォーマンス最適化が制限されます。 -- **単一ストリームクエリ**: TinyLogテーブルに対するクエリは単一のストリームで実行され、通常は1,000,000行までの比較的小さなテーブルに適しています。 -- **小規模テーブルに対する効率性**: TinyLogエンジンのシンプルさは、多くの小さなテーブルを管理する際に有利であり、Logエンジンと比べてファイル操作が少なくて済みます。 - -Logエンジンとは異なり、TinyLogはマークファイルを使用しません。これにより複雑さが軽減されますが、大規模データセットのパフォーマンス最適化が制限されます。 - -## Creating a Table {#table_engines-tinylog-creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = TinyLog -``` - -[CREATE TABLE](/sql-reference/statements/create/table)クエリの詳細な説明を参照してください。 - -## Writing the Data {#table_engines-tinylog-writing-the-data} - -`TinyLog`エンジンは、すべてのカラムを1つのファイルに保存します。各`INSERT`クエリに対して、ClickHouseはデータブロックをテーブルファイルの末尾に追加し、カラムを1つずつ書き込みます。 - -ClickHouseは各テーブルに対して次のファイルを書きます: - -- `.bin`: シリアライズされ圧縮されたデータを含む各カラム用のデータファイル。 - -`TinyLog`エンジンは、`ALTER UPDATE`および`ALTER DELETE`操作をサポートしていません。 - -## Example of Use {#table_engines-tinylog-example-of-use} - -テーブルの作成: - -```sql -CREATE TABLE tiny_log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = TinyLog -``` - -データの挿入: - -```sql -INSERT INTO tiny_log_table VALUES (now(),'REGULAR','The first regular message') -INSERT INTO tiny_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') -``` - -私たちは、`INSERT`クエリを2つ使用して、`.bin`ファイル内に2つのデータブロックを作成しました。 - -ClickHouseはデータを選択する際に単一のストリームを使用します。その結果、出力内の行ブロックの順序は、入力内の同じブロックの順序と一致します。例えば: - -```sql -SELECT * FROM tiny_log_table -``` - -```text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2024-12-10 13:11:58 │ REGULAR │ The first regular message │ -│ 2024-12-10 13:12:12 │ REGULAR │ The second regular message │ -│ 2024-12-10 13:12:12 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md.hash deleted file mode 100644 index 980f3425f7e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/log-family/tinylog.md.hash +++ /dev/null @@ -1 +0,0 @@ -4f43b4e1a44fdfce diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md deleted file mode 100644 index 04dad5b8055..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -description: '同じ主キー(あるいは正確には同じ[ソーティングキー](../../../engines/table-engines/mergetree-family/mergetree.md)を使用している行)を、1つのデータ部分内に集計関数の状態の組み合わせを格納する単一の行に置き換えます。' -sidebar_label: 'AggregatingMergeTree' -sidebar_position: 60 -slug: '/engines/table-engines/mergetree-family/aggregatingmergetree' -title: 'AggregatingMergeTree' ---- - - - - -# AggregatingMergeTree - -エンジンは [MergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree) から継承され、データパーツのマージロジックが変更されます。ClickHouseは、同じ主キーを持つすべての行(正確には、同じ [ソートキー](../../../engines/table-engines/mergetree-family/mergetree.md) を持つ行)を、集約関数の状態の組み合わせを保存する単一の行(単一のデータパーツ内)に置き換えます。 - -`AggregatingMergeTree` テーブルを使用して、インクリメンタルデータ集約を行うことができます。これには集約されたマテリアライズドビューも含まれます。 - -以下のビデオで、AggregatingMergeTree と Aggregate 関数の使用例を確認できます: -
- -
- -エンジンは、次のタイプのすべてのカラムを処理します: - -## [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md) {#aggregatefunction} -## [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md) {#simpleaggregatefunction} - -行数がオーダーで削減される場合は、`AggregatingMergeTree` を使用することが適切です。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = AggregatingMergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[TTL expr] -[SETTINGS name=value, ...] -``` - -リクエストパラメータの説明については、[リクエストの説明](../../../sql-reference/statements/create/table.md) を参照してください。 - -**クエリ句** - -`AggregatingMergeTree` テーブルを作成する場合は、`MergeTree` テーブルを作成する際と同様の [句](../../../engines/table-engines/mergetree-family/mergetree.md) が必要です。 - -
- -テーブルを作成するための非推奨メソッド - -:::note -新しいプロジェクトでこのメソッドを使用せず、可能であれば古いプロジェクトを上記のメソッドに切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -すべてのパラメータは `MergeTree` のものと同じ意味を持ちます。 -
- -## SELECT および INSERT {#select-and-insert} - -データを挿入するには、集約 -State- 関数を用いた [INSERT SELECT](../../../sql-reference/statements/insert-into.md) クエリを使用します。`AggregatingMergeTree` テーブルからデータを選択する際は、`GROUP BY` 句を使用し、データ挿入時と同じ集約関数を使用しますが、`-Merge` サフィックスを使用します。 - -`SELECT` クエリの結果では、`AggregateFunction` 型の値が、すべての ClickHouse 出力形式のために実装依存のバイナリ表現を持ちます。例えば、`TabSeparated` 形式にデータをダンプする場合、そのダンプは `INSERT` クエリを使用して再ロードできます。 - -## 集約されたマテリアライズドビューの例 {#example-of-an-aggregated-materialized-view} - -以下の例では、`test` という名前のデータベースがあると想定していますので、存在しない場合は作成してください: - -```sql -CREATE DATABASE test; -``` - -次に、生データを含むテーブル `test.visits` を作成します: - -```sql -CREATE TABLE test.visits - ( - StartDate DateTime64 NOT NULL, - CounterID UInt64, - Sign Nullable(Int32), - UserID Nullable(Int32) -) ENGINE = MergeTree ORDER BY (StartDate, CounterID); -``` - -次に、訪問の総数とユニークユーザー数を追跡する `AggregationFunction` を保存する `AggregatingMergeTree` テーブルを作成する必要があります。 - -`test.visits` テーブルを監視し、`AggregateFunction` 型を使用する `AggregatingMergeTree` マテリアライズドビューを作成します: - -```sql -CREATE TABLE test.agg_visits ( - StartDate DateTime64 NOT NULL, - CounterID UInt64, - Visits AggregateFunction(sum, Nullable(Int32)), - Users AggregateFunction(uniq, Nullable(Int32)) -) -ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID); -``` - -`test.visits` から `test.agg_visits` にデータを入力するマテリアライズドビューを作成します: - -```sql -CREATE MATERIALIZED VIEW test.visits_mv TO test.agg_visits -AS SELECT - StartDate, - CounterID, - sumState(Sign) AS Visits, - uniqState(UserID) AS Users -FROM test.visits -GROUP BY StartDate, CounterID; -``` - -`test.visits` テーブルにデータを挿入します: - -```sql -INSERT INTO test.visits (StartDate, CounterID, Sign, UserID) - VALUES (1667446031000, 1, 3, 4), (1667446031000, 1, 6, 3); -``` - -データは `test.visits` と `test.agg_visits` の両方に挿入されます。 - -集約データを取得するには、マテリアライズドビュー `test.visits_mv` から `SELECT ... GROUP BY ...` のようなクエリを実行します: - -```sql -SELECT - StartDate, - sumMerge(Visits) AS Visits, - uniqMerge(Users) AS Users -FROM test.visits_mv -GROUP BY StartDate -ORDER BY StartDate; -``` - -```text -┌───────────────StartDate─┬─Visits─┬─Users─┐ -│ 2022-11-03 03:27:11.000 │ 9 │ 2 │ -└─────────────────────────┴────────┴───────┘ -``` - -`test.visits` に別のレコードを追加しますが、今回は異なるタイムスタンプを使用してみてください: - -```sql -INSERT INTO test.visits (StartDate, CounterID, Sign, UserID) - VALUES (1669446031000, 2, 5, 10), (1667446031000, 3, 7, 5); -``` - -再度 `SELECT` クエリを実行すると、次の出力が返されます: - -```text -┌───────────────StartDate─┬─Visits─┬─Users─┐ -│ 2022-11-03 03:27:11.000 │ 16 │ 3 │ -│ 2022-11-26 07:00:31.000 │ 5 │ 1 │ -└─────────────────────────┴────────┴───────┘ -``` - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouse における集約コンビネータの利用](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md.hash deleted file mode 100644 index 6cc8b7697c4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/aggregatingmergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -93aecd45d93d1f25 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md deleted file mode 100644 index 925dd961e56..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -description: 'Documentation for Exact and Approximate Nearest Neighbor Search' -keywords: -- 'vector similarity search' -- 'ann' -- 'knn' -- 'hnsw' -- 'indices' -- 'index' -- 'nearest neighbor' -sidebar_label: 'Exact and Approximate Nearest Neighbor Search' -slug: '/engines/table-engines/mergetree-family/annindexes' -title: 'Exact and Approximate Nearest Neighbor Search' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; - - - -# 正確および近似最近傍検索 - -与えられた点に対して多次元(ベクトル)空間内のN個の最も近い点を見つける問題は、[最近傍検索](https://en.wikipedia.org/wiki/Nearest_neighbor_search)として知られています。 -最近傍検索を解決するための2つの一般的なアプローチがあります: -- 正確な最近傍検索は、与えられた点とベクトル空間内のすべての点との距離を計算します。これにより、最高の精度、すなわち返された点が実際の最近傍であることが保証されます。ベクトル空間を徹底的に探索するため、正確な最近傍検索は実世界での使用には遅すぎる場合があります。 -- 近似最近傍検索は、結果をはるかに速く計算する技術(例えば、グラフやランダムフォレストなどの特殊なデータ構造)を指します。結果の精度は通常、「実用的には十分な」レベルです。多くの近似技術は、結果の精度と検索時間の間のトレードオフを調整するためのパラメータを提供します。 - -最近傍検索(正確または近似)は、次のようにSQLで記述できます: - -```sql -WITH [...] AS reference_vector -SELECT [...] -FROM table -WHERE [...] -- WHERE句はオプションです -ORDER BY (vectors, reference_vector) -LIMIT -``` - -ベクトル空間内の点は、配列型のカラム `vectors` に格納されています。例えば、 [Array(Float64)](../../../sql-reference/data-types/array.md)、[Array(Float32)](../../../sql-reference/data-types/array.md)、または [Array(BFloat16)](../../../sql-reference/data-types/array.md) のいずれかです。 -参照ベクトルは定数配列であり、共通テーブル式として与えられます。 -`` は、参照点とすべての格納された点との間の距離を計算します。 -そのために使用できる [距離関数](/sql-reference/functions/distance-functions) のいずれかを使用できます。 -`` は、返されるべき隣接点の数を指定します。 - -## 正確な最近傍検索 {#exact-nearest-neighbor-search} - -正確な最近傍検索は、上記のSELECTクエリをそのまま使用して実行できます。 -そのようなクエリの実行時間は、一般に格納されたベクトルの数と次元、すなわち配列要素の数に比例します。 -また、ClickHouseはすべてのベクトルをブルートフォーススキャンするため、実行時間はクエリによるスレッド数にも依存します(設定 [max_threads](../../../operations/settings/settings.md#max_threads) を参照)。 - -正確な最近傍検索を高速化するための一般的なアプローチの1つは、低精度の [floatデータ型](../../../sql-reference/data-types/float.md) を使用することです。 -例えば、ベクトルが `Array(BFloat16)` として格納されている場合、`Array(Float32)` の代わりに、データサイズは半分にカットされ、クエリの実行時間も半分に減少すると予想されます。 -この方法は量子化として知られており、すべてのベクトルの徹底的なスキャンにもかかわらず、結果の精度を低下させる可能性があります。 -精度の損失が許容できるかどうかは使用ケースによりますが、通常は実験を要します。 - -### 例 {#exact-nearest-neighbor-search-example} - -```sql -CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; - -INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); - -WITH [0., 2.] AS reference_vec -SELECT id, vec -FROM tab -ORDER BY L2Distance(vec, reference_vec) ASC -LIMIT 3; -``` - -は以下を返します: - -```result - ┌─id─┬─vec─────┐ -1. │ 6 │ [0,2] │ -2. │ 7 │ [0,2.1] │ -3. │ 8 │ [0,2.2] │ - └────┴─────────┘ -``` - -## 近似最近傍検索 {#approximate-nearest-neighbor-search} - - - -ClickHouseは、近似最近傍検索を実行するための特別な「ベクトル類似性」インデックスを提供します。 - -:::note -ベクトル類似性インデックスは現在実験的です。 -それを有効にするには、最初に `SET allow_experimental_vector_similarity_index = 1` を実行してください。 -問題が発生した場合は、[ClickHouseリポジトリ](https://github.com/clickhouse/clickhouse/issues) で問題を報告してください。 -::: - -### ベクトル類似性インデックスの作成 {#creating-a-vector-similarity-index} - -新しいテーブルにベクトル類似性インデックスを作成するには、次のようにします: - -```sql -CREATE TABLE table -( - [...], - vectors Array(Float*), - INDEX vectors TYPE vector_similarity(, , ) [GRANULARITY ] -) -ENGINE = MergeTree -ORDER BY [...] -``` - -既存のテーブルにベクトル類似性インデックスを追加するには: - -```sql -ALTER TABLE table ADD INDEX vectors TYPE vector_similarity(, , ) [GRANULARITY ]; -``` - -ベクトル類似性インデックスは、特別な種類のスキッピングインデックスです([こちら](mergetree.md#table_engine-mergetree-data_skipping-indexes) および [こちら](../../../optimize/skipping-indexes)を参照)。 -そのため、上記の `ALTER TABLE` 文は、テーブルに新しく挿入されたデータに対してのみインデックスを作成します。 -既存のデータのインデックスを作成するには、それをマテリアライズする必要があります: - -```sql -ALTER TABLE table MATERIALIZE SETTINGS mutations_sync = 2; -``` - -関数 `` は次のものでなければなりません: -- `L2Distance` 、[ユークリッド距離](https://en.wikipedia.org/wiki/Euclidean_distance)で、ユークリッド空間内の二つの点間の直線の長さを表します、または -- `cosineDistance` 、[コサイン距離](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)で、二つの非零ベクトル間の角度を表します。 - -正規化されたデータの場合、`L2Distance`が通常の最適選択です。そうでない場合は、スケールを補正するために`cosineDistance`を推奨します。 - -``は、基になるカラムにおける配列の基数(要素の数)を指定します。 -ClickHouseがインデックス作成中に異なる基数の配列を見つけた場合、インデックスは破棄され、エラーが返されます。 - -オプションのGRANULARITYパラメータ `` は、インデックス粒度のサイズを指します([こちら](../../../optimize/skipping-indexes)を参照)。 -デフォルト値は1億で、ほとんどの使用ケースでは合理的にうまく機能しますが、調整も可能です。 -高度なユーザーのみが調整することをお勧めします。調整の影響を理解しているユーザーのみが行うべきです([以下](#differences-to-regular-skipping-indexes)を参照)。 - -ベクトル類似性インデックスは、異なる近似検索方法に対応できる汎用性を持っています。 -実際に使用される方法は、パラメータ `` で指定されます。 -現在のところ、唯一の利用可能な方法はHNSW([学術論文](https://arxiv.org/abs/1603.09320))、階層的近接グラフに基づく近似ベクトル検索のための人気のあり、最先端の技術です。 -タイプとしてHNSWが使用される場合、ユーザーはさらにHNSW専用のパラメータを任意で指定できます: - -```sql -CREATE TABLE table -( - [...], - vectors Array(Float*), - INDEX index_name vectors TYPE vector_similarity('hnsw', , [, , , ]) [GRANULARITY N] -) -ENGINE = MergeTree -ORDER BY [...] -``` - -これらのHNSW専用パラメータは次のものがあります: -- `` は、近接グラフ内のベクトルの量子化を制御します。可能な値は `f64`、`f32`、`f16`、`bf16`、または `i8` です。デフォルト値は `bf16` です。このパラメータは基盤となるカラム内のベクトルの表現に影響を与えません。 -- `` は、グラフノードごとの隣接点の数を制御します。これはHNSWのハイパーパラメータ `M` でも知られています。デフォルト値は `32` です。値 `0` はデフォルト値を使用することを意味します。 -- `` は、HNSWグラフ構築時の動的候補リストのサイズを制御します。これはHNSWのハイパーパラメータ `ef_construction` でも知られています。デフォルト値は `128` です。値 `0` はデフォルト値を使用することを意味します。 - -すべてのHNSW専用パラメータのデフォルト値は、ほとんどの使用ケースで合理的にうまく機能します。 -したがって、HNSW専用パラメータをカスタマイズすることはお勧めしません。 - -さらに、以下の制限が適用されます: -- ベクトル類似性インデックスは、[Array(Float32)](../../../sql-reference/data-types/array.md)、[Array(Float64)](../../../sql-reference/data-types/array.md)、または [Array(BFloat16)](../../../sql-reference/data-types/array.md) 型のカラムでのみ作成できます。`Array(Nullable(Float32))` や `Array(LowCardinality(Float32))` のようなNullableや低基数のfloatの配列は許可されていません。 -- ベクトル類似性インデックスは、単一のカラムにのみ作成する必要があります。 -- ベクトル類似性インデックスは計算式で作成できます(例:`INDEX index_name arraySort(vectors) TYPE vector_similarity([...])`)、ただし、そのようなインデックスは後で近似隣接検索に使用できません。 -- ベクトル類似性インデックスは、基になるカラムのすべての配列が `` 多くの要素を持っている必要があります - これはインデックス作成時に確認されます。この要件の違反を早く検出するために、ユーザーはベクトルカラムに制約を追加できます。例えば、`CONSTRAINT same_length CHECK length(vectors) = 256` のようにします。 -- 同様に、基になるカラムの配列値は空(`[]`)であってはならず、デフォルト値(同じく `[]`)を持つこともできません。 - -### ベクトル類似性インデックスの使用 {#using-a-vector-similarity-index} - -:::note -ベクトル類似性インデックスを使用するには、設定 [compatibility](../../../operations/settings/settings.md) を `''`(デフォルト値)または `'25.1'` 以上にする必要があります。 -::: - -ベクトル類似性インデックスは、次の形式のSELECTクエリをサポートしています: - -```sql -WITH [...] AS reference_vector -SELECT [...] -FROM table -WHERE [...] -- WHERE句はオプションです -ORDER BY (vectors, reference_vector) -LIMIT -``` - -ClickHouseのクエリオプティマイザは、上記のクエリテンプレートに一致させ、利用可能なベクトル類似性インデックスを使用しようとします。 -クエリは、SELECTクエリの距離関数がインデックス定義内の距離関数と同じである場合にのみ、ベクトル類似性インデックスを使用できます。 - -高度なユーザーは、設定 [hnsw_candidate_list_size_for_search](../../../operations/settings/settings.md#hnsw_candidate_list_size_for_search)(HNSWのハイパーパラメータ「ef_search」としても知られています)に対するカスタム値を提供して、検索中の候補リストのサイズを調整することができます(例:`SELECT [...] SETTINGS hnsw_candidate_list_size_for_search = `)。 -デフォルト値の設定256はほとんどの使用ケースでうまく機能します。 -設定値を大きくするほど、パフォーマンスが遅くなる代わりに精度が向上します。 - -クエリがベクトル類似性インデックスを使用できる場合、ClickHouseはSELECTクエリで提供されたLIMIT `` が合理的な範囲内であることを確認します。 -具体的には、LIMIT `` がデフォルト値100の設定 [max_limit_for_vector_search_queries](../../../operations/settings/settings.md#max_limit_for_vector_search_queries) よりも大きい場合、エラーが返されます。 -過度に大きなLIMIT値は検索を遅くし、通常は使用エラーを示します。 - -SELECTクエリがベクトル類似性インデックスを使用しているかどうかを確認するには、クエリの先頭に `EXPLAIN indexes = 1` を追加します。 - -例えば、クエリ - -```sql -EXPLAIN indexes = 1 -WITH [0.462, 0.084, ..., -0.110] AS reference_vec -SELECT id, vec -FROM tab -ORDER BY L2Distance(vec, reference_vec) ASC -LIMIT 10; -``` - -は次のように返される場合があります: - -```result - ┌─explain─────────────────────────────────────────────────────────────────────────────────────────┐ - 1. │ Expression (Project names) │ - 2. │ Limit (preliminary LIMIT (without OFFSET)) │ - 3. │ Sorting (Sorting for ORDER BY) │ - 4. │ Expression ((Before ORDER BY + (Projection + Change column names to column identifiers))) │ - 5. │ ReadFromMergeTree (default.tab) │ - 6. │ Indexes: │ - 7. │ PrimaryKey │ - 8. │ Condition: true │ - 9. │ Parts: 1/1 │ -10. │ Granules: 575/575 │ -11. │ Skip │ -12. │ Name: idx │ -13. │ Description: vector_similarity GRANULARITY 100000000 │ -14. │ Parts: 1/1 │ -15. │ Granules: 10/575 │ - └─────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -この例では、[dbpediaデータセット](https://huggingface.co/datasets/KShivendu/dbpedia-entities-openai-1M) に含まれる100万のベクトルが、各1536次元で575のグラニュールに格納されています。クエリは10個の近傍を求めており、ベクトル類似性インデックスはこれら10個の近傍を10個の異なるグラニュールで見つけます。 -これら10のグラニュールは、クエリ実行中に読み込まれます。 - -出力に `Skip` とベクトルインデックスの名前とタイプ(この例では `idx` と `vector_similarity`)が含まれている場合、ベクトル類似性インデックスが使用されたことを示します。 -この場合、ベクトル類似性インデックスは4つのグラニュールのうち2つをスキップしました。すなわち、データの50%を削減しました。 -より多くのグラニュールを削除できるほど、インデックスの使用が効果的になります。 - -:::tip -インデックスの使用を強制するには、設定 [force_data_skipping_indexes](../../../operations/settings/settings#force_data_skipping_indices) を使用してSELECTクエリを実行できます(設定値としてインデックス名を指定してください)。 -::: - -**ポストフィルタリングおよびプレフィルタリング** - -ユーザーは、SELECTクエリに追加のフィルタ条件を指定するために `WHERE` 句をオプションで指定できます。 -ClickHouseはこれらのフィルタ条件をポストフィルタリングまたはプレフィルタリング戦略を使用して評価します。 -簡単に言えば、両方の戦略は、フィルタが評価される順序を決定します: -- ポストフィルタリングは、最初にベクトル類似性インデックスが評価され、その後ClickHouseが `WHERE` 句で指定された追加のフィルタを評価します。 -- プレフィルタリングは、フィルタ評価の順序がその逆になります。 - -これらの戦略には異なるトレードオフがあります: -- ポストフィルタリングには、`LIMIT ` 句で要求された行数未満を返す可能性があるという一般的な問題があります。この状況は、ベクトル類似性インデックスによって返された1つ以上の結果行が追加フィルタを満たさないときに発生します。 -- プレフィルタリングは、一般的に未解決の問題です。特定の専門化されたベクトルデータベースは、プレフィルタリングアルゴリズムを提供しますが、ほとんどのリレーショナルデータベース(ClickHouseを含む)は、正確な隣接検索、すなわちインデックスなしのブルートフォーススキャンに戻ります。 - -使用される戦略は、フィルタ条件によって決まります。 - -*追加のフィルタはパーティションキーの一部* - -追加のフィルタ条件がパーティションキーの一部である場合、ClickHouseはパーティションプルーニングを適用します。 -例えば、テーブルが列 `year` で範囲パーティションされていて、次のクエリが実行される場合を考えます: - -```sql -WITH [0., 2.] AS reference_vec -SELECT id, vec -FROM tab -WHERE year = 2025 -ORDER BY L2Distance(vec, reference_vec) ASC -LIMIT 3; -``` - -ClickHouseは2025年のパーティションを除いてすべてのパーティションをプルーニングします。 - -*追加のフィルタはインデックスを使用して評価できません* - -追加のフィルタ条件がインデックス(主キーインデックス、スキッピングインデックス)を使用して評価できない場合、ClickHouseはポストフィルタリングを適用します。 - -*追加のフィルタは主キーインデックスを使用して評価できます* - -追加のフィルタ条件が[主キー](mergetree.md#primary-key)を使用して評価できる場合(つまり、主キーのプレフィックスを形成する場合)、 -- フィルタ条件がパート内の少なくとも1行を除外する場合、ClickHouseはパート内の「生き残った」範囲に対してプレフィルタリングに戻ります。 -- フィルタ条件がパート内に行を除外しない場合、ClickHouseはそのパートに対してポストフィルタリングを実行します。 - -実際の使用ケースでは、後者のケースは相当考えにくいです。 - -*追加のフィルタはスキッピングインデックスを使用して評価できます* - -追加のフィルタ条件が[スキッピングインデックス](mergetree.md#table_engine-mergetree_data_skipping_indexes)を使用して評価できる場合(最小最大インデックス、セットインデックスなど)、Clickhouseはポストフィルタリングを実行します。 -そのような場合、ベクトル類似性インデックスは、他のスキッピングインデックスと比較して最も多くの行を削除すると期待されるため、最初に評価されます。 - -ポストフィルタリングとプレフィルタリングの細かな制御には、2つの設定が使用できます: - -設定 [vector_search_filter_strategy](../../../operations/settings/settings#vector_search_filter_strategy)(デフォルト:`auto`、上記のヒューリスティックスを実装)は `prefilter` に設定できます。 -これは、追加のフィルタ条件が非常に選択的な場合、プレフィルタリングを強制するために便利です。 -例えば、次のクエリはプレフィルタリングから利益を得る可能性があります: - -```sql -SELECT bookid, author, title -FROM books -WHERE price < 2.00 -ORDER BY cosineDistance(book_vector, getEmbedding('Books on ancient Asian empires')) -LIMIT 10 -``` - -もし2ドル未満の本が非常に少数しか存在しないと仮定すると、ポストフィルタリングは結果としてゼロ行を返す可能性があります。なぜなら、ベクトルインデックスが返す上位10の一致がすべて2ドル以上の価格だった可能性があるからです。 -プレフィルタリングを強制することで(クエリに`SETTINGS vector_search_filter_strategy = 'prefilter'`を追加)、ClickHouseは価格が2ドル未満のすべての本をまず見つけ、その後で発見した本に対してブルートフォースベクトル検索を実行します。 - -上記の問題を解決するための別のアプローチとして、設定 [vector_search_postfilter_multiplier](../../../operations/settings/settings.md#vector_search_postfilter_multiplier)(デフォルト:`1.0`)を `1.0` より大きい値に設定することができます(例:`2.0`)。 -ベクトルインデックスから取得する最近傍の数が設定値で乗算され、その後、それらの行に追加フィルタが適用されてLIMIT多くの行が返されます。 -例えば、もう一度クエリを実行して、乗算子を `3.0` に設定してみましょう: - -```sql -SELECT bookid, author, title -FROM books -WHERE price < 2.00 -ORDER BY cosineDistance(book_vector, getEmbedding('Books on ancient Asian empires')) -LIMIT 10 -SETTING vector_search_postfilter_multiplier = 3.0; -``` - -ClickHouseは各パートから3.0 x 10 = 30の最近傍をベクトルインデックスから取得し、その後に追加のフィルタを評価します。 -最も近い10の隣接点のみが返されます。 -`vector_search_postfilter_multiplier` 設定は問題を軽減できますが、極端なケース(非常に選択的なWHERE条件)では、返される行数がN未満である可能性が依然として残ります。 - -### パフォーマンス調整 {#performance-tuning} - -**圧縮の調整** - -ほとんどの使用ケースでは、基盤となるカラム内のベクトルは密であり、圧縮しづらいです。 -その結果、[圧縮](/sql-reference/statements/create/table.md#column_compression_codec)は、ベクトルカラムへの挿入や読み込みを遅くします。 -したがって、圧縮を無効にすることをお勧めします。 -そのためには、ベクトルカラムに `CODEC(NONE)` を指定します: - -```sql -CREATE TABLE tab(id Int32, vec Array(Float32) CODEC(NONE), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id; -``` - -**インデックス作成の調整** - -ベクトル類似性インデックスのライフサイクルは、パーツのライフサイクルに関連しています。 -言い換えれば、定義されたベクトル類似性インデックスがある新しいパートが作成されるたびに、そのインデックスも作成されます。 -これは通常、データが[挿入](https://clickhouse.com/docs/guides/inserting-data)されたときや、[マージ](https://clickhouse.com/docs/merges)中に発生します。 -残念ながら、HNSWは長いインデックス作成時間で知られており、これが挿入やマージを著しく遅くすることがあります。 -ベクトル類似性インデックスは、データが不変またはめったに変更されない場合にのみ理想的に使用されるべきです。 - -インデックス作成を高速化するために、次の技術を使用できます: - -まず、インデックス作成を並列化できます。 -インデックス作成スレッドの最大数は、サーバーの設定 [max_build_vector_similarity_index_thread_pool_size](/operations/server-configuration-parameters/settings#max_build_vector_similarity_index_thread_pool_size) を使用して構成できます。 -最適なパフォーマンスを得るには、設定値をCPUコア数に構成することが推奨されます。 - -次に、INSERT文の速度向上のために、セッション設定 [materialize_skip_indexes_on_insert](../../../operations/settings/settings.md#materialize_skip_indexes_on_insert) を使用して新しく挿入されたパーツでのスキッピングインデックスの作成を無効にできます。 -そのようなパーツに対するSELECTクエリは、正確な検索に戻ります。 -挿入されたパーツは通常、テーブル全体に対して小さいため、その影響は微小であると予想されます。 - -第三に、マージを高速化するために、セッション設定 [materialize_skip_indexes_on_merge](../../../operations/settings/merge-tree-settings.md#materialize_skip_indexes_on_merge)を使用してマージされたパーツでのスキッピングインデックスの作成を無効にできます。 -これは、ステートメント [ALTER TABLE \[...\] MATERIALIZE INDEX \[...\]](../../../sql-reference/statements/alter/skipping-index.md#materialize-index) と組み合わせることで、ベクトル類似性インデックスのライフサイクルを明示的に制御します。 -例えば、インデックス作成をすべてのデータが取り込まれるまで、またはシステムの負荷が少ない期間(土曜日など)まで延期できます。 - -**インデックスの使用調整** - -SELECTクエリは、ベクトル類似性インデックスを使用するために、それをメインメモリにロードする必要があります。 -同じベクトル類似性インデックスが繰り返しメインメモリにロードされないようにするため、ClickHouseはそのようなインデックス用の専用インメモリキャッシュを提供しています。 -このキャッシュのサイズが大きいほど、不要なロードは少なくなります。 -最大キャッシュサイズは、サーバー設定 [vector_similarity_index_cache_size](../../../operations/server-configuration-parameters/settings.md#vector_similarity_index_cache_size) を使用して構成できます。 -デフォルトでは、キャッシュは最大5GBに成長できます。 - -ベクトル類似性インデックスキャッシュの現在のサイズは、[system.metrics](../../../operations/system-tables/metrics.md) で表示されます: - -```sql -SELECT metric, value -FROM system.metrics -WHERE metric = 'VectorSimilarityIndexCacheSize' -``` - -特定のクエリIDに対するクエリのキャッシュヒットおよびミスは、[system.query_log](../../../operations/system-tables/query_log.md) から取得できます: - -```sql -SYSTEM FLUSH LOGS query_log; - -SELECT ProfileEvents['VectorSimilarityIndexCacheHits'], ProfileEvents['VectorSimilarityIndexCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' AND query_id = '<...>' -ORDER BY event_time_microseconds; -``` - -本番使用ケースでは、すべてのベクトルインデックスが常にメモリ内に保持されるようにキャッシュのサイズを大きくすることをお勧めします。 - -### 管理と監視 {#administration} - -ディスク上のベクトル類似性インデックスのサイズは、[system.data_skipping_indices](../../../operations/system-tables/data_skipping_indices) から取得できます: - -```sql -SELECT database, table, name, formatReadableSize(data_compressed_bytes) -FROM system.data_skipping_indices -WHERE type = 'vector_similarity'; -``` - -出力例: - -```result -┌─database─┬─table─┬─name─┬─formatReadab⋯ssed_bytes)─┐ -│ default │ tab │ idx │ 348.00 MB │ -└──────────┴───────┴──────┴──────────────────────────┘ -``` - -### 通常のスキッピングインデックスとの違い {#differences-to-regular-skipping-indexes} - -すべての通常の[スキッピングインデックス](/optimize/skipping-indexes)と同様に、ベクトル類似性インデックスはグラニュールに対して構築され、各インデックスブロックは `GRANULARITY = [N]` のグラニュールから構成されています(通常のスキッピングインデックスのデフォルトは `[N] = 1`)。 -例えば、テーブルの主インデックス粒度が8192(設定 `index_granularity = 8192`)で、`GRANULARITY = 2` の場合、各インデックスブロックは16384行を含みます。 -しかし、近似隣接検索のためのデータ構造やアルゴリズムは本質的に行指向です。 -それらは行のセットのコンパクトな表現を格納し、またベクトル検索クエリのために行を返します。 -これは、通常のスキッピングインデックスに比べて、ベクトル類似性インデックスの動作にいくつかの非常に直感的でない違いを引き起こします。 - -ユーザーがカラムにベクトル類似性インデックスを定義すると、ClickHouseは内部的に各インデックスブロックに対してベクトル類似性「サブインデックス」を作成します。 -サブインデックスは、その含まれるインデックスブロック内の行についてのみ知識を持つ「ローカル」なものです。 -前述の例で、カラムが65536行を有する場合、4つのインデックスブロック(8つのグラニュールをまたがっています)が得られ、各インデックスブロックに対してベクトル類似性サブインデックスが作成されます。 -サブインデックスは理論上、そのインデックスブロック内で最も近いN個のポイントを直接返すことができます。 -しかし、ClickHouseはグラニュールの粒度でディスクからメモリにデータをロードするため、サブインデックスは一致する行をグラニュールの粒度まで外挿します。 -これは、インデックスブロックの粒度でデータをスキップする通常のスキッピングインデックスとは異なります。 - -`GRANULARITY` パラメータは、どのくらいの数のベクトル類似性サブインデックスが作成されるかを決定します。 -大きな `GRANULARITY` 値は、ベクトル類似性サブインデックスの数を減らし、逆に大きくします。 -その結果、サブインデックスが1つしか持たなくなるまでになります。そうすると、そのサブインデックスは全てのカラム行に対して「グローバル」な見方を持つことになり、関連する行を持つカラム(部分)の全グラニュールを直接返すことができます(関連する行を持つグラニュールはせいぜい `LIMIT [N]` までです)。 -次のステップでは、ClickHouseがこれらのグラニュールをロードし、グラニュール内のすべての行に対してブルートフォース距離計算を実行し、実際に最も良い行を特定します。 -小さな `GRANULARITY` 値では、各サブインデックスが最大 `LIMIT N` 個のグラニュールを返します。 -その結果、より多くのグラニュールをロードして、ポストフィルタリングを実行する必要があります。 -両ケースで検索精度は同等に良好ですが、処理性能が異なります。 -近似検索には一般的に大きな `GRANULARITY` を使用することが推奨され、ベクトル類似性構造体が過剰にメモリを消費する場合にのみ小さな `GRANULARITY` 値に戻ります。 -ベクトル類似性インデックスに対して `GRANULARITY` が指定されていない場合、デフォルト値は1億です。 - -### 例 {#approximate-nearest-neighbor-search-example} - -```sql -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id; - -INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); - -WITH [0., 2.] AS reference_vec -SELECT id, vec -FROM tab -ORDER BY L2Distance(vec, reference_vec) ASC -LIMIT 3; -``` - -は以下を返します: - -```result - ┌─id─┬─vec─────┐ -1. │ 6 │ [0,2] │ -2. │ 7 │ [0,2.1] │ -3. │ 8 │ [0,2.2] │ - └────┴─────────┘ -``` - -## 参考文献 {#references} - -ブログ: -- [ClickHouseによるベクトル検索 - パート1](https://clickhouse.com/blog/vector-search-clickhouse-p1) -- [ClickHouseによるベクトル検索 - パート2](https://clickhouse.com/blog/vector-search-clickhouse-p2) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md.hash deleted file mode 100644 index b4d7086fc64..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/annindexes.md.hash +++ /dev/null @@ -1 +0,0 @@ -0717911fec5b2286 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md deleted file mode 100644 index e8cc1f61aba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -description: 'MergeTree から継承され、マージプロセス中に行を折り畳むロジックが追加されています。' -keywords: -- 'updates' -- 'collapsing' -sidebar_label: 'CollapsingMergeTree' -sidebar_position: 70 -slug: '/engines/table-engines/mergetree-family/collapsingmergetree' -title: 'CollapsingMergeTree' ---- - - - - -# CollapsingMergeTree - -## Description {#description} - -`CollapsingMergeTree` エンジンは [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) から継承され、マージプロセス中に行を統合するためのロジックを追加します。 `CollapsingMergeTree` テーブルエンジンは、すべてのフィールドがソートキー (`ORDER BY`) で等価で、特別なフィールド `Sign` の値が `1` または `-1` の場合に、対になる行を非同期的に削除 (統合) します。 対になる値の `Sign` を持たない行は保持されます。 - -詳細については、ドキュメントの [Collapsing](#table_engine-collapsingmergetree-collapsing) セクションを参照してください。 - -:::note -このエンジンはストレージのボリュームを大幅に削減し、その結果、`SELECT` クエリの効率を高める可能性があります。 -::: - -## Parameters {#parameters} - -`Sign` パラメータを除く、このテーブルエンジンのすべてのパラメータは、[`MergeTree`](/engines/table-engines/mergetree-family/mergetree) と同じ意味を持ちます。 - -- `Sign` — `1` が「状態」行、`-1` が「キャンセル」行を持つ行のタイプのカラムに与えられた名前。タイプ: [Int8](/sql-reference/data-types/int-uint)。 - -## Creating a Table {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) -ENGINE = CollapsingMergeTree(Sign) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -
- -Deprecated Method for Creating a Table - -:::note -以下の手法は新しいプロジェクトでの使用が推奨されません。 可能であれば、古いプロジェクトを新しい手法に更新することをお勧めします。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) -ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, Sign) -``` - -`Sign` — `1` が「状態」行、`-1` が「キャンセル」行を持つ行のタイプのカラムに与えられた名前。 [Int8](/sql-reference/data-types/int-uint)。 - -
- -- クエリパラメータの説明については [query description](../../../sql-reference/statements/create/table.md) を参照してください。 -- `CollapsingMergeTree` テーブルを作成する際には、`MergeTree` テーブルを作成する際と同様の [クエリ句](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) が必要です。 - -## Collapsing {#table_engine-collapsingmergetree-collapsing} - -### Data {#data} - -ある特定のオブジェクトのために継続的に変化するデータを保存する必要があるとしましょう。 1つの行をオブジェクトごとに持ち、何かが変わるたびに更新するのが論理的に思えるかもしれませんが、更新操作はコストが高く、遅いため、ストレージ上のデータを再書き込みする必要があります。 データを書き込むために迅速な処理が必要な場合、大量の更新を行うことは受け入れられませんが、常にオブジェクトの変更を順次記録することができます。 これを行うために、特別なカラム `Sign` を利用します。 - -- `Sign` = `1` の場合、それは行が「状態」行であることを意味します: _現在の有効な状態を表すフィールドを含む行_。 -- `Sign` = `-1` の場合、それは行が「キャンセル」行であることを意味します: _同じ属性を持つオブジェクトの状態をキャンセルするために使用される行_。 - -例えば、私たちはユーザーがあるウェブサイトでチェックしたページ数とその訪問期間を計算したいとします。 ある時点で、ユーザー活動の状態を持つ次の行を書き込みます: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -後のタイミングで、ユーザー活動の変化を記録し、次の2行で書き込みます: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -最初の行はオブジェクトの以前の状態をキャンセルします (この場合、ユーザーを表現)。 それは「キャンセル」された行のすべてのソートキーのフィールドを `Sign` を除いてコピーする必要があります。 上の2行目は現在の状態を含みます。 - -我々はユーザー活動の最後の状態のみを必要とするため、元の「状態」行と挿入した「キャンセル」行は以下のように削除される可能性があります。無効(古い)状態のオブジェクトを統合します: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -- 古い "状態" 行は削除可能 -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -- "キャンセル" 行は削除可能 -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -- 新しい "状態" 行は残る -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -`CollapsingMergeTree` はデータパーツのマージ時にこの_統合_の動作を正確に実行します。 - -:::note -各変更に2行が必要な理由は、[Algorithm](#table_engine-collapsingmergetree-collapsing-algorithm) の段落でさらに説明されています。 -::: - -**そのようなアプローチの特異性** - -1. データを書き込むプログラムは、キャンセルできるようにオブジェクトの状態を記憶しておく必要があります。「キャンセル」行には「状態」行のソートキーのフィールドのコピーと反対の `Sign` を含む必要があります。これにより、初期のストレージサイズは増加しますが、迅速にデータを書き込むことが可能になります。 -2. カラム内の長い成長配列は、書き込みの負荷が増加するため、エンジンの効率を低下させます。データがシンプルであればあるほど、効率は高くなります。 -3. `SELECT` 結果はオブジェクト変更履歴の整合性に大きく依存します。挿入用にデータを準備する際は正確であることが大切です。整合性のないデータでは予測不可能な結果を得ることがあります。例えば、セッション深度などの非負メトリクスに対する負の値です。 - -### Algorithm {#table_engine-collapsingmergetree-collapsing-algorithm} - -ClickHouseがデータ [parts](/concepts/glossary#parts) をマージする際、同じソートキー (`ORDER BY`) を持つ連続した行の各グループは、最大で2行(`Sign` = `1` の「状態」行と `Sign` = `-1` の「キャンセル」行)に削減されます。 言い換えれば、ClickHouseエントリは統合されます。 - -各結果データパートについて ClickHouse は次を保存します: - -| | | -|--|-------------------------------------------------------------------------------------------------------------------------------------| -|1.| 「状態」行と「キャンセル」行の数が一致し、最後の行が「状態」行である場合に、最初の「キャンセル」行と最後の「状態」行。 | -|2.| 「キャンセル」行の数が「状態」行の数より少ない場合、最後の「状態」行。 | -|3.| 「状態」行の数が「キャンセル」行の数より少ない場合、最初の「キャンセル」行。 | -|4.| その他のすべてのケースでは、行は何も保存されません。 | - -さらに、「状態」行が「キャンセル」行よりも少なくとも2本多い場合や、「キャンセル」行が「状態」行よりも少なくとも2本多い場合は、マージが続行されます。ただし、ClickHouseはこの状況を論理エラーと見なし、サーバーログに記録します。このエラーは、同じデータを複数回挿入した場合に発生する可能性があります。したがって、統合は統計計算の結果を変更してはなりません。変更は徐々に統合され、最終的にはほぼすべてのオブジェクトの最新の状態のみが残ります。 - -`Sign` カラムが必要なのは、マージアルゴリズムが同じソートキーを持つすべての行が同じ結果データパートにあり、同じ物理サーバーにもいると保証しないからです。 ClickHouseは複数のスレッドで `SELECT` クエリを処理し、結果の行の順序を予測することができません。 - -完全に「統合」されたデータを `CollapsingMergeTree` テーブルから取得する必要がある場合は、集約が必要です。 統合を最終化するために、`GROUP BY` 句と `Sign` を考慮した集約関数を持つクエリを書きます。 例えば、数量を計算するには `count()` の代わりに `sum(Sign)` を使用します。 何かの合計を計算するには `sum(Sign * x)` を使用し、 `HAVING sum(Sign) > 0` と組み合わせて `sum(x)` の代わりに使用します。以下の [example](#example-of-use) 参照。 - -集計 `count`, `sum` および `avg` はこのように計算できます。オブジェクトに少なくとも1つの非統合状態がある場合、集約 `uniq` を計算できます。集計 `min` および `max` は計算できません、なぜなら `CollapsingMergeTree` は統合されている状態の履歴を保存しないからです。 - -:::note -集約なしでデータを抽出する必要がある場合(例えば、最新の値が特定の条件に一致する行が存在するかどうかを確認するため)、`FROM` 句の [`FINAL`](../../../sql-reference/statements/select/from.md#final-modifier) 修飾子を使用できます。これにより、結果を返す前にデータがマージされます。 -CollapsingMergeTree では、各キーの最新の状態行のみが返されます。 -::: - -## Examples {#examples} - -### Example of Use {#example-of-use} - -次の例データを考えてみましょう: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -`CollapsingMergeTree` を使用してテーブル `UAct` を作成しましょう: - -```sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -次に、データを挿入します: - -```sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) -``` - -```sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) -``` - -2つの `INSERT` クエリを使用して、2つの異なるデータパーツを作成します。 - -:::note -単一のクエリでデータを挿入した場合、ClickHouseは1つのデータパートのみを作成し、マージは行われません。 -::: - -データを選択するには: - -```sql -SELECT * FROM UAct -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -返されたデータを見て、統合が行われたかどうか確認しましょう... 2つの `INSERT` クエリで、2つのデータパーツを作成しました。 `SELECT` クエリは2つのスレッドで実行され、行の順序はランダムになりました。 しかし、統合は **行われませんでした** なぜなら、データパーツのマージはまだ行われておらず、ClickHouseは未知の瞬間にバックグラウンドでデータパーツをマージするからです。 - -したがって、集約が必要です。 これは、[`sum`](/sql-reference/aggregate-functions/reference/sum) 集約関数と [`HAVING`](/sql-reference/statements/select/having) 句を使用して実行します: - -```sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration -FROM UAct -GROUP BY UserID -HAVING sum(Sign) > 0 -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -集約が不要で統合を強制したい場合は、`FROM` 句に対して `FINAL` 修飾子も使用できます。 - -```sql -SELECT * FROM UAct FINAL -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -:::note -この方法でデータを選択することは非効率的であり、大量のスキャンデータ(数百万行)には使用をお勧めしません。 -::: - -### Example of Another Approach {#example-of-another-approach} - -このアプローチの考えは、マージがキーのフィールドのみを考慮するということです。「キャンセル」行では、したがって、`Sign` カラムを使用せずに合計する際、行の前のバージョンを等しくするマイナス値を指定できます。 - -この例では、以下のサンプルデータを使用します: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ -5 │ -146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -このアプローチでは、負の値を保存するために `PageViews` および `Duration` のデータ型を変更する必要があります。したがって、`collapsingMergeTree` を使用してテーブル `UAct`を作成する際にこれらの列の値を `UInt8` から `Int16` に変更します: - -```sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews Int16, - Duration Int16, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -テーブルにデータを挿入してアプローチをテストします。 - -例や小規模なテーブルでは、これは受け入れられます: - -```sql -INSERT INTO UAct VALUES(4324182021466249494, 5, 146, 1); -INSERT INTO UAct VALUES(4324182021466249494, -5, -146, -1); -INSERT INTO UAct VALUES(4324182021466249494, 6, 185, 1); - -SELECT * FROM UAct FINAL; -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -```sql -SELECT - UserID, - sum(PageViews) AS PageViews, - sum(Duration) AS Duration -FROM UAct -GROUP BY UserID -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -```sql -SELECT COUNT() FROM UAct -``` - -```text -┌─count()─┐ -│ 3 │ -└─────────┘ -``` - -```sql -OPTIMIZE TABLE UAct FINAL; - -SELECT * FROM UAct -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md.hash deleted file mode 100644 index 636a6030eae..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/collapsingmergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -dc19e21d60bf17b0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md deleted file mode 100644 index 9b8e17097bf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -description: 'MergeTree テーブルにカスタムパーティショニングキーを追加する方法について学びます。' -sidebar_label: 'カスタムパーティショニングキー' -sidebar_position: 30 -slug: '/engines/table-engines/mergetree-family/custom-partitioning-key' -title: 'カスタムパーティショニングキー' ---- - - - - -# カスタムパーティショニングキー - -:::note -ほとんどの場合、パーティションキーは不要であり、他のほとんどのケースでも、月単位以上の粒度のパーティションキーは必要ありません。 - -あまりにも粒度が細かいパーティショニングを使用しないでください。クライアントの識別子や名前でデータをパーティションしないでください。その代わりに、ORDER BY式の最初のカラムとしてクライアント識別子または名前を指定してください。 -::: - -パーティショニングは、[MergeTreeファミリーのテーブル](../../../engines/table-engines/mergetree-family/mergetree.md)で利用可能であり、[レプリケートテーブル](../../../engines/table-engines/mergetree-family/replication.md)や[マテリアライズドビュー](/sql-reference/statements/create/view#materialized-view)も含まれます。 - -パーティションは、指定された基準によってテーブル内のレコードの論理的な組み合わせです。パーティションは、月、日、またはイベントタイプなどの任意の基準で設定できます。各パーティションはデータの操作を簡素化するために別々に保存されます。データにアクセスする際、ClickHouseは可能な限り最小のサブセットのパーティションを使用します。パーティションは、パーティショニングキーを含むクエリのパフォーマンスを向上させます。なぜなら、ClickHouseはパーツやグラニュールを選択する前に、そのパーティションのフィルタリングを行うからです。 - -パーティションは、[テーブルを作成する際](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table)に `PARTITION BY expr` 節で指定されます。パーティションキーはテーブルのカラムからの任意の式にすることができます。例えば、月ごとにパーティショニングを指定するには、`toYYYYMM(date_column)` という式を使用します: - -```sql -CREATE TABLE visits -( - VisitDate Date, - Hour UInt8, - ClientID UUID -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(VisitDate) -ORDER BY Hour; -``` - -パーティションキーは、式のタプルでもできます([主キー](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries)に類似)。例えば: - -```sql -ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) -PARTITION BY (toMonday(StartDate), EventType) -ORDER BY (CounterID, StartDate, intHash32(UserID)); -``` - -この例では、現在の週に発生したイベントタイプによってパーティショニングを設定しています。 - -デフォルトでは、浮動小数点数のパーティションキーはサポートされていません。使用するには、設定 [allow_floating_point_partition_key](../../../operations/settings/merge-tree-settings.md#allow_floating_point_partition_key) を有効にします。 - -テーブルに新しいデータを挿入する際、このデータは主キーによってソートされた別のパート(チャンク)として保存されます。挿入から10~15分後に、同じパーティションのパーツが全体のパートにマージされます。 - -:::info -マージは、パーティショニング式の同じ値を持つデータパーツに対してのみ機能します。これは、**あまりにも粒度の細かいパーティションを作成しないべき**であることを意味します(大体1000パーティション以上の粒度)。そうでなければ、`SELECT` クエリのパフォーマンスが悪化します。ファイルシステム内のファイル数やオープンファイルディスクリプタの異常に大きい数が原因です。 -::: - -[system.parts](../../../operations/system-tables/parts.md) テーブルを使用して、テーブルのパーツとパーティションを表示します。例えば、`visits` テーブルが月ごとにパーティショニングされていると仮定しましょう。`system.parts` テーブルの `SELECT` クエリを実行します: - -```sql -SELECT - partition, - name, - active -FROM system.parts -WHERE table = 'visits' -``` - -```text -┌─partition─┬─name──────────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2_11 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1_11 │ 1 │ -│ 201902 │ 201902_10_10_0_11 │ 1 │ -│ 201902 │ 201902_11_11_0_11 │ 1 │ -└───────────┴───────────────────┴────────┘ -``` - -`partition` カラムにはパーティションの名前が含まれています。この例では、`201901` と `201902` の2つのパーティションがあります。この列の値を使用して、[ALTER ... PARTITION](../../../sql-reference/statements/alter/partition.md) クエリでパーティション名を指定できます。 - -`name` カラムにはパーティションデータパーツの名前が含まれています。この列を使用して、[ALTER ATTACH PART](/sql-reference/statements/alter/partition#attach-partitionpart) クエリでパートの名前を指定できます。 - -パートの名前 `201901_1_9_2_11` を分解してみましょう: - -- `201901` はパーティション名です。 -- `1` はデータブロックの最小番号です。 -- `9` はデータブロックの最大番号です。 -- `2` はチャンクレベル(形成されたマージツリーの深さ)です。 -- `11` は変異バージョン(パートが変異した場合) - -:::info -古いタイプのテーブルのパーツは、名前が `20190117_20190123_2_2_0` です(最小日付 - 最大日付 - 最小ブロック番号 - 最大ブロック番号 - レベル)。 -::: - -`active` カラムはパートの状態を示します。`1` はアクティブ、`0` は非アクティブです。非アクティブなパーツは、例えば、大きなパートにマージされた後に残るソースパーツです。破損したデータパーツも非アクティブとして表示されます。 - -例のように、同一のパーティションのいくつかの分離されたパーツ(例えば、`201901_1_3_1` と `201901_1_9_2`)があります。これは、これらのパーツがまだマージされていないことを意味します。ClickHouseは、データの挿入から約15分後に挿入されたパーツを定期的にマージします。その上、[OPTIMIZE](../../../sql-reference/statements/optimize.md) クエリを使用することで、スケジュール外のマージを実行できます。例: - -```sql -OPTIMIZE TABLE visits PARTITION 201902; -``` - -```text -┌─partition─┬─name─────────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2_11 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1 │ 0 │ -│ 201902 │ 201902_4_11_2_11 │ 1 │ -│ 201902 │ 201902_10_10_0 │ 0 │ -│ 201902 │ 201902_11_11_0 │ 0 │ -└───────────┴──────────────────┴────────┘ -``` - -非アクティブなパーツは、マージ後約10分で削除されます。 - -パーツとパーティションのセットを表示するもう1つの方法は、テーブルのディレクトリにアクセスすることです:`/var/lib/clickhouse/data///`。例えば: - -```bash -/var/lib/clickhouse/data/default/visits$ ls -l -total 40 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2_11 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2_11 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached -``` - -`201901_1_1_0` や `201901_1_7_1` などのフォルダは、パーツのディレクトリです。各パートは対応するパーティションに関連しており、特定の月のデータだけを含んでいます(この例のテーブルは月ごとにパーティショニングされています)。 - -`detached` ディレクトリには、[DETACH](/sql-reference/statements/detach) クエリを使用してテーブルから切り離されたパーツが含まれています。破損したパーツも削除されるのではなく、このディレクトリに移動されます。サーバーは `detached` ディレクトリのパーツを使用しません。このディレクトリ内のデータをいつでも追加、削除、または変更できます。サーバーは、[ATTACH](/sql-reference/statements/alter/partition#attach-partitionpart) クエリを実行するまで、これについて知ることはありません。 - -稼働中のサーバーでは、ファイルシステム上のパーツのセットやそのデータを手動で変更することはできません。サーバーはそれについて知ることがないためです。レプリケートされていないテーブルでは、サーバーが停止している時にこれを行うことができますが、お勧めはしません。レプリケートされたテーブルでは、パーツのセットはどのような場合でも変更できません。 - -ClickHouseでは、パーティションに対して操作を行うことができます:削除、別のテーブルからのコピー、またはバックアップを作成することです。操作のリストは、[パーティションとパーツの操作](/sql-reference/statements/alter/partition) セクションで確認してください。 - -## パーティションキーを使用したグループ化最適化 {#group-by-optimisation-using-partition-key} - -テーブルのパーティションキーとクエリのグループ化キーの組み合わせによっては、各パーティションを独立して集約することが可能な場合があります。 -その場合、全ての実行スレッドの集約データを最後にマージする必要はありません。 -なぜなら、各グループ化キーの値が2つの異なるスレッドの作業セットに出現しないことが保証されているからです。 - -典型的な例は次のとおりです: - -```sql -CREATE TABLE session_log -( - UserID UInt64, - SessionID UUID -) -ENGINE = MergeTree -PARTITION BY sipHash64(UserID) % 16 -ORDER BY tuple(); - -SELECT - UserID, - COUNT() -FROM session_log -GROUP BY UserID; -``` - -:::note -そのようなクエリのパフォーマンスは、テーブルのレイアウトに大きく依存します。そのため、最適化はデフォルトでは有効になっていません。 -::: - -良好なパフォーマンスのための重要な要素: - -- クエリに関与するパーティションの数が十分に大きいこと(`max_threads / 2` より多い)。そうでないと、クエリは機械を十分に活用できません。 -- パーティションがあまり小さくならず、バッチ処理が行単位の処理に陥らないこと。 -- パーティションのサイズが比較可能であること。そうすれば、全てのスレッドが大体同じ量の作業を行います。 - -:::info -データをパーティション間で均等に分配するために、`partition by` 節のカラムに対して何らかのハッシュ関数を適用することをお勧めします。 -::: - -関連する設定: - -- `allow_aggregate_partitions_independently` - 最適化の使用を制御します。 -- `force_aggregate_partitions_independently` - 正しさの観点から適用できるときにその使用を強制しますが、内部ロジックによってその適用が無効にされる場合があります。 -- `max_number_of_partitions_for_independent_aggregation` - テーブルが持つことができる最大のパーティション数についての厳しい制限。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md.hash deleted file mode 100644 index 68a3ddca8c7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/custom-partitioning-key.md.hash +++ /dev/null @@ -1 +0,0 @@ -980ce16c01fb58e2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md deleted file mode 100644 index b6de5a9421a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -description: 'Designed for thinning and aggregating/averaging (rollup) Graphite - data.' -sidebar_label: 'GraphiteMergeTree' -sidebar_position: 90 -slug: '/engines/table-engines/mergetree-family/graphitemergetree' -title: 'GraphiteMergeTree' ---- - - - - -# GraphiteMergeTree - -このエンジンは、[Graphite](http://graphite.readthedocs.io/en/latest/index.html)データのスリムと集約/平均(ロールアップ)のために設計されています。ClickHouseをGraphiteのデータストアとして使用したい開発者にとって便利です。 - -ロールアップが不要な場合は、任意のClickHouseテーブルエンジンを使用してGraphiteデータを保存できますが、ロールアップが必要な場合は`GraphiteMergeTree`を使用してください。このエンジンは、ストレージのボリュームを削減し、Graphiteからのクエリの効率を向上させます。 - -このエンジンは、[MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)からプロパティを継承します。 - -## テーブルの作成 {#creating-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - Path String, - Time DateTime, - Value Float64, - Version - ... -) ENGINE = GraphiteMergeTree(config_section) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -Graphiteデータ用のテーブルは、以下のデータのために次のカラムを持つ必要があります: - -- メトリック名(Graphiteセンサー)。データタイプ:`String`。 - -- メトリックを測定した時間。データタイプ:`DateTime`。 - -- メトリックの値。データタイプ:`Float64`。 - -- メトリックのバージョン。データタイプ:任意の数値(ClickHouseは、最高バージョンの行またはバージョンが同じ場合は最後に書き込まれた行を保存します。他の行はデータ部分のマージ中に削除されます)。 - -これらのカラムの名前は、ロールアップ設定で設定する必要があります。 - -**GraphiteMergeTreeのパラメータ** - -- `config_section` — ロールアップのルールが設定されている設定ファイルのセクション名。 - -**クエリの句** - -`GraphiteMergeTree`テーブルを作成する際には、`MergeTree`テーブルを作成する際と同じ[句](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table)が必要です。 - -
- -テーブル作成のための非推奨メソッド - -:::note -新しいプロジェクトではこのメソッドを使用せず、可能であれば古いプロジェクトを上記のメソッドに切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - EventDate Date, - Path String, - Time DateTime, - Value Float64, - Version - ... -) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) -``` - -`config_section`を除くすべてのパラメータは、`MergeTree`と同じ意味を持ちます。 - -- `config_section` — ロールアップのルールが設定されている設定ファイルのセクション名。 - -
- -## ロールアップ設定 {#rollup-configuration} - -ロールアップの設定は、サーバー設定の[graphite_rollup](../../../operations/server-configuration-parameters/settings.md#graphite)パラメータによって定義されます。このパラメータの名前は何でも構いません。複数の設定を作成し、異なるテーブルで使用することができます。 - -ロールアップ設定の構造: - - required-columns - patterns - -### 必須カラム {#required-columns} - -#### path_column_name {#path_column_name} - -`path_column_name` — メトリック名(Graphiteセンサー)を保存するカラムの名前。デフォルト値:`Path`。 - -#### time_column_name {#time_column_name} - -`time_column_name` — メトリックの測定時刻を保存するカラムの名前。デフォルト値:`Time`。 - -#### value_column_name {#value_column_name} - -`value_column_name` — `time_column_name`で設定した時刻におけるメトリックの値を保存するカラムの名前。デフォルト値:`Value`。 - -#### version_column_name {#version_column_name} - -`version_column_name` — メトリックのバージョンを保存するカラムの名前。デフォルト値:`Timestamp`。 - -### パターン {#patterns} - -`patterns`セクションの構造: - -```text -pattern - rule_type - regexp - function -pattern - rule_type - regexp - age + precision - ... -pattern - rule_type - regexp - function - age + precision - ... -pattern - ... -default - function - age + precision - ... -``` - -:::important -パターンは厳密に順序付けられている必要があります: - -1. `function`または`retention`のないパターン。 -1. 両方の`function`と`retention`を持つパターン。 -1. `default`パターン。 -::: - -行を処理する際、ClickHouseは`pattern`セクション内のルールをチェックします。それぞれの`pattern`(`default`を含む)セクションには、集約用の`function`パラメータ、`retention`パラメータのいずれかまたは両方が含まれている可能性があります。メトリック名が`regexp`に一致する場合、`pattern`セクション(またはセクション)のルールが適用されます。そうでない場合は、`default`セクションのルールが使用されます。 - -`pattern`および`default`セクションのフィールド: - -- `rule_type` - ルールのタイプ。特定のメトリックにのみ適用されます。エンジンはこれを使用して、単純なメトリックとタグ付けされたメトリックを区別します。オプションのパラメータ。デフォルト値:`all`。 -パフォーマンスが重要でない場合、または単一のメトリックタイプが使用されている場合(例えば、単純なメトリック)、これは必要ありません。デフォルトでは、ルールセットは1つだけが作成されます。別の特殊なタイプが定義されている場合は、異なる2つのセットが作成されます。1つは単純なメトリック(root.branch.leaf)用、もう1つはタグ付けされたメトリック(root.branch.leaf;tag1=value1)用です。 -デフォルトルールは両方のセットに終了します。 -有効な値: - - `all`(デフォルト) - ルールのタイプが省略されたときに使用される普遍的なルール。 - - `plain` - 単純メトリック用のルール。フィールド`regexp`は正規表現として処理されます。 - - `tagged` - タグ付けされたメトリック用のルール(メトリックは`someName?tag1=value1&tag2=value2&tag3=value3`形式でDBに保存されます)。正規表現はタグ名でソートされる必要があり、最初のタグは`__name__`である必要があります(存在する場合)。フィールド`regexp`は正規表現として処理されます。 - - `tag_list` - タグ付けされたメトリック用のルール、Graphiteフォーマットでメトリック記述を簡素化するためのシンプルなDSL `someName;tag1=value1;tag2=value2`、`someName`、または`tag1=value1;tag2=value2`。フィールド`regexp`は`tagged`ルールに変換されます。タグ名によるソートは不要で、自動的に行われます。タグの値(名前ではなく)は正規表現として設定できます。例:`env=(dev|staging)`。 -- `regexp` – メトリック名のパターン(通常のまたはDSL)。 -- `age` – データの最小年齢(秒単位)。 -- `precision`– データの年齢を定義する精度(秒単位)。86400(1日の秒数)の約数である必要があります。 -- `function` – `[age, age + precision]`範囲内にあるデータに適用する集約関数の名前。受け入れられる関数:min / max / any / avg。平均は不正確に計算され、平均の平均値として算出されます。 - -### ルールタイプなしの設定例 {#configuration-example} - -```xml - - Version - - click_cost - any - - 0 - 5 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -### ルールタイプありの設定例 {#configuration-typed-example} - -```xml - - Version - - plain - click_cost - any - - 0 - 5 - - - 86400 - 60 - - - - tagged - ^((.*)|.)min\? - min - - 0 - 5 - - - 86400 - 60 - - - - tagged - - min - - 0 - 5 - - - 86400 - 60 - - - - tag_list - someName;tag2=value2 - - 0 - 5 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -:::note -データのロールアップはマージ中に実行されます。通常、古いパーティションについてはマージは開始されないため、ロールアップには[optimize](../../../sql-reference/statements/optimize.md)を使用して予定外のマージをトリガーする必要があります。または、[graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer)などの追加ツールを使用します。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md.hash deleted file mode 100644 index 58af4a19569..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/graphitemergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -d2d8680e92e4e1d4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md deleted file mode 100644 index 15357f5d0cb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: 'Documentation for MergeTree Engine Family' -sidebar_label: 'MergeTree Family' -sidebar_position: 10 -slug: '/engines/table-engines/mergetree-family/' -title: 'MergeTree Engine Family' ---- - - - - -# MergeTree エンジンファミリー - -MergeTree ファミリーのテーブルエンジンは、ClickHouse のデータストレージ機能の中核を成しています。これらは、列指向ストレージ、カスタムパーティショニング、スパース主キーインデックス、二次データスキッピングインデックスなど、高耐障害性と高性能データ取得のためのほとんどの機能を提供します。 - -基本の [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) テーブルエンジンは、汎用性が高く多くのユースケースに実用的であるため、単一ノードの ClickHouse インスタンスのデフォルトのテーブルエンジンと見なすことができます。 - -本番環境での使用には [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) が最適です。なぜなら、これは通常の MergeTree エンジンのすべての機能に高可用性を追加するからです。データ取り込み時に自動データ重複排除が行われることもボーナスです。これにより、挿入中にネットワークの問題があった場合でも、安全にソフトウェアが再試行できます。 - -MergeTree ファミリーの他のすべてのエンジンは、特定のユースケースのために追加の機能を提供します。通常、それはバックグラウンドでのデータ操作として実装されています。 - -MergeTree エンジンの主な欠点は、それらがかなり重たいということです。したがって、一般的なパターンは、多くの MergeTree エンジンを持たないことです。例えば、一時データのために多くの小さなテーブルが必要な場合は、[Log エンジンファミリー](../../../engines/table-engines/log-family/index.md)を検討してください。 - - -| ページ | 説明 | -|-----|-----| -| [VersionedCollapsingMergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree) | 継続的に変更されるオブジェクトの状態を迅速に書き込み、古いオブジェクトの状態をバックグラウンドで削除します。 | -| [Data Replication](/engines/table-engines/mergetree-family/replication) | ClickHouse におけるデータレプリケーションの概要 | -| [MergeTree](/engines/table-engines/mergetree-family/mergetree) | `MergeTree` ファミリーのテーブルエンジンは、高速なデータ取り込み率と膨大なデータ量を処理するように設計されています。 | -| [Exact and Approximate Nearest Neighbor Search](/engines/table-engines/mergetree-family/annindexes) | 正確および近似最近傍検索のドキュメント | -| [CollapsingMergeTree](/engines/table-engines/mergetree-family/collapsingmergetree) | MergeTree から継承され、マージプロセス中に行を折り畳むロジックを追加します。 | -| [Custom Partitioning Key](/engines/table-engines/mergetree-family/custom-partitioning-key) | MergeTree テーブルにカスタムパーティショニングキーを追加する方法を学びます。 | -| [Full-text Search using Full-text Indexes](/engines/table-engines/mergetree-family/invertedindexes) | テキスト内の検索用語を迅速に見つけます。 | -| [SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree) | SummingMergeTree は MergeTree エンジンから継承されます。その主な機能は、パーツのマージ中に数値データを自動的に合計する能力です。 | -| [AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree) | 同じ主キー(またはより正確には、同じ [ソートキー](../../../engines/table-engines/mergetree-family/mergetree.md))を持つすべての行を、集計関数の状態の組み合わせを格納する単一行(単一データパーツ内)に置き換えます。 | -| [GraphiteMergeTree](/engines/table-engines/mergetree-family/graphitemergetree) | Graphite データをスリム化および集約/平均(ロールアップ)するために設計されています。 | -| [ReplacingMergeTree](/engines/table-engines/mergetree-family/replacingmergetree) | MergeTree と異なり、同じソートキー値を持つ重複エントリを削除します(`ORDER BY` テーブルセクション、`PRIMARY KEY` ではなく)。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md.hash deleted file mode 100644 index 8d706016199..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -0273222a00ee21fe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md deleted file mode 100644 index cb11a9f2340..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -description: 'テキスト内の検索用語を迅速に見つけます。' -keywords: -- 'full-text search' -- 'text search' -- 'index' -- 'indices' -sidebar_label: 'フルテキストインデックス' -slug: '/engines/table-engines/mergetree-family/invertedindexes' -title: 'フルテキスト検索を使用したフルテキストインデックス' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# フルテキスト検索とフルテキストインデックスの使用 - - - - -フルテキストインデックスは、[セカンダリインデックス](/engines/table-engines/mergetree-family/mergetree.md/#available-types-of-indices)の実験的なタイプで、[String](/sql-reference/data-types/string.md)または[FixedString](/sql-reference/data-types/fixedstring.md)カラムのための高速テキスト検索機能を提供します。フルテキストインデックスの主なアイデアは、「用語」とそれらを含む行とのマッピングを保存することです。「用語」は文字列カラムのトークン化されたセルです。たとえば、文字列セル「I will be a little late」はデフォルトで六つの用語「I」、「will」、「be」、「a」、「little」、「late」にトークン化されます。別のトークナイザの種類はn-グラムです。例えば、3-グラムトークン化の結果は21の用語「I w」、「 wi」、「wil」、「ill」、「ll 」、「l b」、「 be」などとなります。入力文字列が細かくトークン化されるほど、結果として得られるフルテキストインデックスは大きく、かつより有用になります。 - -
- -
- -:::note -フルテキストインデックスは実験的であり、まだ本番環境での使用には適していません。将来的にはDDL/DQL構文やパフォーマンス/圧縮特性に関して後方互換性のない方法で変更される可能性があります。 -::: - -## 使用法 {#usage} - -フルテキストインデックスを使用するには、まず設定でそれを有効にします: - -```sql -SET allow_experimental_full_text_index = true; -``` - -フルテキストインデックスは、次の構文を使用して文字列カラムに定義できます。 - -```sql -CREATE TABLE tab -( - `key` UInt64, - `str` String, - INDEX inv_idx(str) TYPE gin(tokenizer = 'default|ngram|noop' [, ngram_size = N] [, max_rows_per_postings_list = M]) GRANULARITY 1 -) -ENGINE = MergeTree -ORDER BY key -``` - -ここで、`tokenizer`はトークナイザを指定します: - -- `default`はトークナイザを「tokens('default')」に設定します。すなわち、非英数字文字に沿って文字列を分割します。 -- `ngram`はトークナイザを「tokens('ngram')」に設定します。すなわち、文字列を等しいサイズの用語に分割します。 -- `noop`はトークナイザを「tokens('noop')」に設定します。すなわち、各値自体が用語となります。 - -ngramサイズは、`ngram_size`パラメータを介して指定できます。これはオプションのパラメータです。以下のバリエーションが存在します: - -- `ngram_size = N`:`N`が2から8の範囲内で、トークナイザを「tokens('ngram', N)」に設定します。 -- 指定しない場合:デフォルトのngramサイズは3を使用します。 - -最大行数は、オプションの`max_rows_per_postings_list`を介して指定できます。このパラメータは、巨大なポスティングリストファイルを生成しないようにポスティングリストサイズを制御するために使用できます。以下のバリエーションが存在します: - -- `max_rows_per_postings_list = 0`:ポスティングリストあたりの最大行数に制限はありません。 -- `max_rows_per_postings_list = M`:`M`は少なくとも8192である必要があります。 -- 指定しない場合:デフォルトの最大行数は64Kを使用します。 - -フルテキストインデックスは、テーブル作成後にカラムにドロップまたは追加できます。 - -```sql -ALTER TABLE tab DROP INDEX inv_idx; -ALTER TABLE tab ADD INDEX inv_idx(s) TYPE gin(tokenizer = 'default'); -``` - -インデックスを使用するには、特別な関数や構文は必要ありません。典型的な文字列検索述語は自動的にインデックスを利用します。例えば: - -```sql -INSERT INTO tab(key, str) values (1, 'Hello World'); -SELECT * from tab WHERE str == 'Hello World'; -SELECT * from tab WHERE str IN ('Hello', 'World'); -SELECT * from tab WHERE str LIKE '%Hello%'; -SELECT * from tab WHERE multiSearchAny(str, ['Hello', 'World']); -SELECT * from tab WHERE hasToken(str, 'Hello'); -``` - -フルテキストインデックスは、`Array(String)`、`Array(FixedString)`、`Map(String)`、および`Map(String)`タイプのカラムでも機能します。 - -他のセカンダリインデックスと同様に、各カラムパートには独自のフルテキストインデックスがあります。さらに、各フルテキストインデックスは内部的に「セグメント」に分割されます。セグメントの存在とサイズは一般的にユーザーには透明ですが、セグメントサイズはインデックス構築中のメモリ消費を決定します(例えば、2つのパーツがマージされるとき)。設定パラメータ「max_digestion_size_per_segment」(デフォルト:256 MB)は、新しいセグメントが作成される前に基盤となるカラムから読み込まれるデータ量を制御します。このパラメータを増やすことにより、インデックス構築中の中間メモリ消費が増加しますが、クエリを評価するためにチェックする必要のあるセグメントが少なくなるため、ルックアップパフォーマンスも向上します。 - -## Hacker Newsデータセットのフルテキスト検索 {#full-text-search-of-the-hacker-news-dataset} - -テキストがたくさんある大規模データセットに対するフルテキストインデックスのパフォーマンス向上を見てみましょう。人気のあるHacker Newsウェブサイトの2870万行のコメントを使用します。以下はフルテキストインデックスのないテーブルです: - -```sql -CREATE TABLE hackernews ( - id UInt64, - deleted UInt8, - type String, - author String, - timestamp DateTime, - comment String, - dead UInt8, - parent UInt64, - poll UInt64, - children Array(UInt32), - url String, - score UInt32, - title String, - parts Array(UInt32), - descendants UInt32 -) -ENGINE = MergeTree -ORDER BY (type, author); -``` - -2870万行はS3のParquetファイルにあり、これを`hackernews`テーブルに挿入します: - -```sql -INSERT INTO hackernews - SELECT * FROM s3Cluster( - 'default', - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/hackernews/hacknernews.parquet', - 'Parquet', - ' - id UInt64, - deleted UInt8, - type String, - by String, - time DateTime, - text String, - dead UInt8, - parent UInt64, - poll UInt64, - kids Array(UInt32), - url String, - score UInt32, - title String, - parts Array(UInt32), - descendants UInt32'); -``` - -`comment`カラムで `ClickHouse`(さまざまな大文字と小文字のバリエーション)を探す以下の単純な検索を考えてみましょう: - -```sql -SELECT count() -FROM hackernews -WHERE hasToken(lower(comment), 'clickhouse'); -``` - -クエリの実行に3秒かかることに注意してください: - -```response -┌─count()─┐ -│ 1145 │ -└─────────┘ - -1 row in set. Elapsed: 3.001 sec. Processed 28.74 million rows, 9.75 GB (9.58 million rows/s., 3.25 GB/s.) -``` - -次に、`ALTER TABLE`を使用して、`comment`カラムの小文字に対してフルテキストインデックスを追加し、それをマテリアライズします(これはしばらく時間がかかる場合があります。マテリアライズされるまで待ってください): - -```sql -ALTER TABLE hackernews - ADD INDEX comment_lowercase(lower(comment)) TYPE gin; - -ALTER TABLE hackernews MATERIALIZE INDEX comment_lowercase; -``` - -同じクエリを実行します... - -```sql -SELECT count() -FROM hackernews -WHERE hasToken(lower(comment), 'clickhouse') -``` - -...そしてクエリが4倍速く実行されることに気付きます: - -```response -┌─count()─┐ -│ 1145 │ -└─────────┘ - -1 row in set. Elapsed: 0.747 sec. Processed 4.49 million rows, 1.77 GB (6.01 million rows/s., 2.37 GB/s.) -``` - -また、複数の用語、すなわち、選言または共言で検索することもできます: - -```sql --- 複数のOR条件のある用語 -SELECT count(*) -FROM hackernews -WHERE multiSearchAny(lower(comment), ['oltp', 'olap']); - --- 複数のAND条件のある用語 -SELECT count(*) -FROM hackernews -WHERE hasToken(lower(comment), 'avx') AND hasToken(lower(comment), 'sve'); -``` - -:::note -他のセカンダリインデックスとは異なり、フルテキストインデックスは(現時点では)行番号(行ID)にマッピングされます。この設計の理由はパフォーマンスです。実際には、ユーザーはしばしば複数の用語を一度に検索します。たとえば、フィルタ述語 `WHERE s LIKE '%little%' OR s LIKE '%big%'` は、「little」と「big」の用語の行IDリストの和を形成することにより、フルテキストインデックスを使用して直接評価できます。これにより、インデックス作成時に提供されるパラメータ `GRANULARITY` は意味を持たなくなります(将来的には構文から削除される可能性があります)。 -::: - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouseにおける逆インデックスの導入](https://clickhouse.com/blog/clickhouse-search-with-inverted-indices) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md.hash deleted file mode 100644 index 17639896be0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/invertedindexes.md.hash +++ /dev/null @@ -1 +0,0 @@ -6dd69a900ca233bd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md deleted file mode 100644 index 6a1745ba12c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md +++ /dev/null @@ -1,1026 +0,0 @@ ---- -description: '`MergeTree`-family table engines are designed for high data ingest - rates and huge data volumes.' -sidebar_label: 'MergeTree' -sidebar_position: 11 -slug: '/engines/table-engines/mergetree-family/mergetree' -title: 'MergeTree' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# MergeTree - -`MergeTree` エンジンおよび `MergeTree` ファミリーの他のエンジン(例: `ReplacingMergeTree`, `AggregatingMergeTree`)は、ClickHouse で最も一般的に使用され、最も堅牢なテーブルエンジンです。 - -`MergeTree` ファミリーのテーブルエンジンは、高いデータ取り込み率と巨大なデータボリュームを想定して設計されています。 -挿入操作は、バックグラウンドプロセスによって他のテーブルパーツとマージされるテーブルパーツを作成します。 - -`MergeTree` ファミリーのテーブルエンジンの主な特徴。 - -- テーブルの主キーは、各テーブルパーツ内のソート順を決定します(クラスタインデックス)。主キーは、個々の行ではなく、8192 行のブロックであるグラニュールを参照します。これにより、大規模データセットの主キーはメインメモリに保持されるのに十分小さく、ディスク上のデータに迅速にアクセスできます。 - -- テーブルは任意のパーティション式を使用してパーティショニングできます。クエリが許可される場合、パーティションプルーニングは読取時にパーティションを省略します。 - -- データは、高可用性、フェイルオーバー、ゼロダウンタイムアップグレードのために、複数のクラスター ノード間でレプリケートできます。詳細は [データレプリケーション](/engines/table-engines/mergetree-family/replication.md) を参照してください。 - -- `MergeTree` テーブルエンジンは、クエリの最適化を助けるために、さまざまな統計の種類とサンプリング方法をサポートしています。 - -:::note -同名ですが、 [Merge](/engines/table-engines/special/merge) エンジンは `*MergeTree` エンジンとは異なります。 -::: -## テーブルの作成 {#table_engine-mergetree-creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [COMMENT ...] [CODEC(codec1)] [STATISTICS(stat1)] [TTL expr1] [PRIMARY KEY] [SETTINGS (name = value, ...)], - name2 [type2] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [COMMENT ...] [CODEC(codec2)] [STATISTICS(stat2)] [TTL expr2] [PRIMARY KEY] [SETTINGS (name = value, ...)], - ... - INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1], - INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2], - ... - PROJECTION projection_name_1 (SELECT [GROUP BY] [ORDER BY]), - PROJECTION projection_name_2 (SELECT [GROUP BY] [ORDER BY]) -) ENGINE = MergeTree() -ORDER BY expr -[PARTITION BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[TTL expr - [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx' [, ...] ] - [WHERE conditions] - [GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] ] -[SETTINGS name = value, ...] -``` - -パラメータの詳細な説明については、[CREATE TABLE](/sql-reference/statements/create/table.md) ステートメントを参照してください。 -### クエリ句 {#mergetree-query-clauses} -#### ENGINE {#engine} - -`ENGINE` — エンジンの名前とパラメータ。 `ENGINE = MergeTree()`。`MergeTree` エンジンにはパラメータはありません。 -#### ORDER BY {#order_by} - -`ORDER BY` — ソートキー。 - -カラム名または任意の式のタプル。例: `ORDER BY (CounterID + 1, EventDate)`。 - -主キーが定義されていない場合(つまり、`PRIMARY KEY` が指定されていない場合)、ClickHouse はソートキーを主キーとして使用します。 - -ソートが不要な場合は、構文 `ORDER BY tuple()` を使用できます。 -設定 `create_table_empty_primary_key_by_default` が有効になっている場合は、`ORDER BY tuple()` が `CREATE TABLE` ステートメントに暗黙的に追加されます。詳細は [主キーの選択](#selecting-a-primary-key) を参照してください。 -#### PARTITION BY {#partition-by} - -`PARTITION BY` — [パーティショニングキー](/engines/table-engines/mergetree-family/custom-partitioning-key.md)。オプション。ほとんどの場合、パーティションキーは必要ありません。必要な場合でも、通常、月単位でパーティションするよりも詳細なパーティションキーは必要ありません。パーティショニングはクエリの速度を上げません(`ORDER BY` 式とは対照的に)。過度に詳細なパーティショニングを使用すべきではありません。クライアント識別子や名前でデータをパーティションしないでください(その代わり、`ORDER BY` 式の最初のカラムとしてクライアント識別子または名前を指定してください)。 - -月ごとのパーティショニングには、`toYYYYMM(date_column)` 表現を使用します。ここで `date_column` は、[Date](/sql-reference/data-types/date.md) 型の日付を持つカラムです。ここでのパーティション名は `"YYYYMM"` 形式を持ちます。 -#### PRIMARY KEY {#primary-key} - -`PRIMARY KEY` — ソートキーと異なる場合の主キーです。オプション。 - -ソートキーを指定することは(`ORDER BY` 句を使用)、暗黙的に主キーを指定することになります。 -通常、ソートキーに加えて主キーを指定する必要はありません。 -#### SAMPLE BY {#sample-by} - -`SAMPLE BY` — サンプリング式。オプション。 - -指定した場合は、主キーに含まれている必要があります。 -サンプリング式は符号なし整数を生成する必要があります。 - -例: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`。 -#### TTL {#ttl} - -`TTL` — 行の保存期間と、自動的なパーツの移動のロジックを指定する規則のリスト [ディスク間とボリューム間](#table_engine-mergetree-multiple-volumes)での。オプション。 - -式は `Date` または `DateTime` を生成する必要があり、例: `TTL date + INTERVAL 1 DAY` です。 - -規則のタイプ `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` は、式が満たされたときにパーツで行われる動作を指定します(現在の時間に達したとき):期限切れ行の削除、指定されたディスク(`TO DISK 'xxx'`)またはボリューム(`TO VOLUME 'xxx'`)へのパーツの移動、または期限切れ行の値の集約。規則のデフォルトのタイプは削除(`DELETE`)です。複数の規則を指定できますが、`DELETE` 規則は 1 つだけにしてください。 - -詳細については、[列およびテーブルの TTL](#table_engine-mergetree-ttl) を参照してください。 -#### SETTINGS {#settings} - -[MergeTree 設定を参照](../../../operations/settings/merge-tree-settings.md)。 - -**セクション設定の例** - -```sql -ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 -``` - -この例では、月ごとのパーティショニングを設定しました。 - -また、ユーザー ID によるハッシュとしてサンプリングの式も設定しました。これにより、各 `CounterID` と `EventDate` に対してテーブルのデータを擬似的にランダム化できます。データを選択する際に[SAMPLE](/sql-reference/statements/select/sample)句を定義すると、ClickHouse はユーザーのサブセットに対して均等に擬似的なランダムデータサンプルを返します。 - -`index_granularity` 設定は 8192 がデフォルト値のため、省略することができます。 - -
- -テーブルを作成するための非推奨メソッド - -:::note -新しいプロジェクトではこのメソッドを使用しないでください。可能であれば、古いプロジェクトを上記のメソッドに切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -**MergeTree() パラメータ** - -- `date-column` — [Date](/sql-reference/data-types/date.md) 型のカラムの名前。ClickHouse はこのカラムに基づいて月ごとのパーティションを自動的に作成します。パーティション名は `"YYYYMM"` 形式です。 -- `sampling_expression` — サンプリングのための式。 -- `(primary, key)` — 主キー。タイプ: [Tuple()](/sql-reference/data-types/tuple.md) -- `index_granularity` — インデックスの粒度。インデックスの「マーク」間のデータ行の数。値 8192 はほとんどのタスクに適しています。 - -**例** - -```sql -MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) -``` - -`MergeTree` エンジンは、上記の例のようにメインエンジンの設定メソッドと同じ方法で構成されます。 -
-## データストレージ {#mergetree-data-storage} - -テーブルは、主キーによってソートされたデータパーツで構成されています。 - -テーブルにデータが挿入されると、独立したデータパーツが作成され、各パーツは主キーによって辞書式にソートされます。たとえば、主キーが `(CounterID, Date)` の場合、パーツ内のデータは `CounterID` でソートされ、各 `CounterID` の中で `Date` で順序付けられます。 - -異なるパーティションに属するデータは、異なるパーツに分けられます。ClickHouse はバックグラウンドで、データパーツをより効率的にストレージにマージします。異なるパーティションに属するパーツはマージされません。マージメカニズムは、同じ主キーを持つすべての行が同じデータパーツに存在することを保証するものではありません。 - -データパーツは `Wide` または `Compact` フォーマットで保存できます。`Wide` フォーマットでは、各カラムがファイルシステム内の別のファイルに保存され、`Compact` フォーマットでは、すべてのカラムが 1 つのファイルに保存されます。`Compact` フォーマットは、小さく頻繁な挿入のパフォーマンスを向上させるために使用できます。 - -データ保存フォーマットは、テーブルエンジンの `min_bytes_for_wide_part` および `min_rows_for_wide_part` 設定によって制御されます。データパーツ内のバイト数または行数が、対応する設定の値よりも少ない場合、パーツは `Compact` フォーマットで保存されます。それ以外の場合は、`Wide` フォーマットで保存されます。これらの設定がいずれも設定されていない場合、データパーツは `Wide` フォーマットで保存されます。 - -各データパーツは、論理的にグラニュールに分けられます。グラニュールは、ClickHouse がデータを選択する際に読み取る最小の分割可能なデータセットです。ClickHouse は行や値を分割しないため、各グラニュールは常に整数数の行を含みます。グラニュールの最初の行は、その行の主キーの値でマークされます。ClickHouse は各データパーツについて、マークを保存するインデックスファイルを作成します。プライマリーキーに含まれるかどうかに関係なく、各カラムについて、ClickHouse は同じマークも保存します。これらのマークにより、カラムファイル内のデータを直接見つけることができます。 - -グラニュールのサイズは、テーブルエンジンの `index_granularity` および `index_granularity_bytes` 設定によって制限されます。グラニュール内の行の数は、行のサイズに応じて `[1, index_granularity]` の範囲に配置されます。1 行のサイズが設定の値を超えている場合、グラニュールのサイズは `index_granularity_bytes` を超えることがあります。この場合、グラニュールのサイズは行のサイズに等しくなります。 -## 主キーとインデックスのクエリでの使用 {#primary-keys-and-indexes-in-queries} - -例として `(CounterID, Date)` 主キーを考えてみましょう。この場合、ソートとインデックスは次のように示すことができます: - -```text -Whole data: [---------------------------------------------] -CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] -Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] -Marks: | | | | | | | | | | | - a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 -Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 -``` - -データクエリが次のように指定されている場合: - -- `CounterID in ('a', 'h')` の場合、サーバーはマークの範囲 `[0, 3)` と `[6, 8)` のデータを読み取ります。 -- `CounterID IN ('a', 'h') AND Date = 3` の場合、サーバーはマークの範囲 `[1, 3)` と `[7, 8)` のデータを読み取ります。 -- `Date = 3` の場合、サーバーはマークの範囲 `[1, 10]` のデータを読み取ります。 - -上記の例は、インデックスを使用する方が常にフルスキャンよりも効果的であることを示しています。 - -スパースインデックスは、追加のデータを読み取ることができます。主キーの単一範囲を読み取る場合、各データブロック内で最大 `index_granularity * 2` の追加行を読み取ることができます。 - -スパースインデックスは、非常に多くのテーブル行と一緒に作業するのを可能にします。なぜなら、ほとんどの場合、これらのインデックスはコンピュータの RAM に収まるからです。 - -ClickHouse では、ユニークな主キーは必要ありません。同じ主キーを持つ複数の行を挿入できます。 - -`PRIMARY KEY` および `ORDER BY` 句で `Nullable` 型の式を使用できますが、強く推奨されません。この機能を許可するには、[allow_nullable_key](/operations/settings/merge-tree-settings/#allow_nullable_key) 設定をオンにします。[NULLS_LAST](/sql-reference/statements/select/order-by.md/#sorting-of-special-values) の原則は、`ORDER BY` 句の `NULL` 値に適用されます。 -### 主キーの選択 {#selecting-a-primary-key} - -主キー内のカラムの数に明示的な制限はありません。データ構造に応じて、主キーにより多くのカラムを含めることができます。これは次のことをもたらします: - -- インデックスのパフォーマンスを向上させる。 - - 主キーが `(a, b)` の場合、別のカラム `c` を追加すると次の条件が満たされる場合にパフォーマンスが向上します: - - - カラム `c` に条件があるクエリがある。 - - `(a, b)` の値が同じ長いデータ範囲(`index_granularity` の数倍長い)が一般的です。別のカラムを追加することで、かなり長いデータ範囲をスキップできる場合です。 - -- データ圧縮を改善する。 - - ClickHouse はデータを主キーでソートするため、一貫性が高いほど圧縮がよくなります。 - -- [CollapsingMergeTree](/engines/table-engines/mergetree-family/collapsingmergetree) や [SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree.md) エンジンでデータ部分をマージする際の追加ロジックを提供します。 - - この場合、主キーとは異なる *ソーティングキー* を指定することが理にかなっています。 - -長い主キーは、挿入パフォーマンスやメモリ消費に悪影響を与えますが、主キー内の追加カラムは `SELECT` クエリ中の ClickHouse のパフォーマンスに影響を与えません。 - -`ORDER BY tuple()` 構文を使用して主キーなしでテーブルを作成できます。この場合、ClickHouse は挿入順序でデータを保存します。`INSERT ... SELECT` クエリでデータを挿入する際にデータ順序を保存したい場合は、[max_insert_threads = 1](/operations/settings/settings#max_insert_threads) を設定します。 - -初期の順序でデータを選択するには、[シングルスレッド](/operations/settings/settings.md/#max_threads) `SELECT` クエリを使用します。 -### ソーティングキーとは異なる主キーの選択 {#choosing-a-primary-key-that-differs-from-the-sorting-key} - -主キー(インデックスファイルに各マークの値を書き込む式)をソートキー(データ部分の行をソートする式)とは異なるように指定することができます。この場合、主キー式のタプルはソートキー式のタプルの接頭辞でなければなりません。 - -この機能は、[SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree.md) および [AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree.md) テーブルエンジンを使用する際に役立ちます。これらのエンジンを使用する一般的なケースでは、テーブルには *次元* と *測定* の 2 種類のカラムがあります。典型的なクエリは、次元でフィルタリングしながら、任意の `GROUP BY` で測定カラムの値を集約します。SummingMergeTree と AggregatingMergeTree は、同じソートキーの値を持つ行を集約するため、すべての次元を追加することが自然です。その結果、キー式は長いカラムのリストで構成され、このリストは新しく追加された次元で頻繁に更新する必要があります。 - -この場合、効率的な範囲スキャンを提供する主キーには少数のカラムを残し、残りの次元のカラムをソートキーのタプルに追加することが理にかなっています。 - -ソートキーの [ALTER](/sql-reference/statements/alter/index.md) は軽量な操作であり、新しいカラムがテーブルとソートキーに同時に追加されるとき、既存のデータパーツは変更する必要がありません。古いソートキーが新しいソートキーの接頭辞であり、新しく追加されたカラムにデータがないため、テーブルの修正時にはデータは古いソートキーと新しいソートキーの両方でソートされます。 -### クエリでのインデックスとパーティションの使用 {#use-of-indexes-and-partitions-in-queries} - -`SELECT` クエリでは、ClickHouse はインデックスの使用が可能かどうかを分析します。インデックスは、`WHERE/PREWHERE` 句が等号または不等号の比較操作を表す式(結合要素の 1 つまたはすべて)を持つ場合、または主キーまたはパーティショニングキーのカラムまたは式に対して特定の接頭辞を持つ `IN` または `LIKE` を持つ場合、またはこれらのカラムの特定の部分的に繰り返しを持つ関数や論理関係の式を持つ場合に使用できます。 - -したがって、主キーの 1 つまたは複数の範囲でクエリを迅速に実行することができます。この例では、特定のトラッキングタグ、特定のタグと日付範囲、特定のタグと日付、複数のタグと日付範囲などについてクエリを実行するときに、クエリは迅速に実行されます。 - -次のように構成されたエンジンを見てみましょう: -```sql -ENGINE MergeTree() -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate) -SETTINGS index_granularity=8192 -``` - -この場合、クエリでは: - -```sql -SELECT count() FROM table -WHERE EventDate = toDate(now()) -AND CounterID = 34 - -SELECT count() FROM table -WHERE EventDate = toDate(now()) -AND (CounterID = 34 OR CounterID = 42) - -SELECT count() FROM table -WHERE ((EventDate >= toDate('2014-01-01') -AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) -AND CounterID IN (101500, 731962, 160656) -AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) -``` - -ClickHouse は、主キーインデックスを使用して不適切なデータを削減し、月ごとのパーティショニングキーを使用して不適切な日付範囲にあるパーティションを削減します。 - -上記のクエリは、インデックスが複雑な式でも使用されることを示しています。テーブルからの読み込みは、インデックスを使用するのがフルスキャンよりも遅くなることはありません。 - -以下の例で、インデックスは使用できません。 - -```sql -SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' -``` - -クエリ実行時に ClickHouse がインデックスを使用できるかどうかを確認するには、[force_index_by_date](/operations/settings/settings.md/#force_index_by_date) および [force_primary_key](/operations/settings/settings#force_primary_key) の設定を使用します。 - -月ごとのパーティショニングキーは、適切な範囲を持つ日付を含むデータブロックのみを読み取ります。この場合、データブロックには多くの日付(最大で 1 か月分)のデータが含まれている可能性があります。ブロック内でデータは主キーでソートされていますが、主キーの最初のカラムとして日付を含まない可能性があります。このため、主キー接頭辞を指定しない単一日付条件のクエリを使用すると、単一の日付の場合よりも多くのデータが読み取られることになります。 -### 部分的に単調増加する主キーに対するインデックスの利用 {#use-of-index-for-partially-monotonic-primary-keys} - -例えば、月の日を考えます。これは 1 か月の間、[単調増加シーケンス](https://en.wikipedia.org/wiki/Monotonic_function) を形成しますが、より長い期間に対しては単調ではありません。これは部分的に単調増加するシーケンスです。ユーザーが部分的に単調増加する主キーでテーブルを作成した場合、ClickHouse は通常のようにスパースインデックスを作成します。ユーザーがこの種のテーブルからデータを選択すると、ClickHouse はクエリ条件を分析します。インデックスの 2 つのマークの間にデータを取得したい場合、これらの 2 つのマークが 1 か月の間に収まる場合、ClickHouse はこの特定のケースでインデックスを使用できる可能性があります。なぜなら、クエリのパラメータとインデックスのマーク間の距離を計算できるからです。 - -クエリパラメーターの範囲の主キーの値が単調増加のシーケンスを表さない場合、ClickHouse はインデックスを使用できません。この場合、ClickHouse はフルスキャン方式を使用します。 - -ClickHouse はこのロジックを、月の日のシーケンスだけでなく、部分的に単調増加する任意のシーケンスの主キーに対して使用します。 -### データスキッピングインデックス {#table_engine-mergetree-data_skipping-indexes} - -インデックス宣言は、`CREATE` クエリのカラムセクションにあります。 - -```sql -INDEX index_name expr TYPE type(...) [GRANULARITY granularity_value] -``` - -`*MergeTree` ファミリーのテーブルの場合、データスキッピングインデックスを指定できます。 - -これらのインデックスは、指定された式に関する情報を `granularity_value` グラニュール(グラニュールのサイズはテーブルエンジンの `index_granularity` 設定を使用して指定されます)で構成されるブロックで集約します。次に、これらの集約が `SELECT` クエリで使用され、クエリが満たされない大きなデータブロックをスキップするために必要なデータ量を削減します。 - -`GRANULARITY` 句は省略可能で、デフォルトの `granularity_value` の値は 1 です。 - -**例** - -```sql -CREATE TABLE table_name -( - u64 UInt64, - i32 Int32, - s String, - ... - INDEX idx1 u64 TYPE bloom_filter GRANULARITY 3, - INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 3, - INDEX idx3 u64 * length(s) TYPE set(1000) GRANULARITY 4 -) ENGINE = MergeTree() -... -``` - -この例のインデックスは、ClickHouse が次のクエリでディスクから読み取るデータの量を削減するために使用できます: - -```sql -SELECT count() FROM table WHERE u64 == 10; -SELECT count() FROM table WHERE u64 * i32 >= 1234 -SELECT count() FROM table WHERE u64 * length(s) == 1234 -``` - -データスキッピングインデックスは、合成カラムに対しても作成できます: - -```sql --- Map 型のカラムに対して: -INDEX map_key_index mapKeys(map_column) TYPE bloom_filter -INDEX map_value_index mapValues(map_column) TYPE bloom_filter - --- Tuple 型のカラムに対して: -INDEX tuple_1_index tuple_column.1 TYPE bloom_filter -INDEX tuple_2_index tuple_column.2 TYPE bloom_filter - --- Nested 型のカラムに対して: -INDEX nested_1_index col.nested_col1 TYPE bloom_filter -INDEX nested_2_index col.nested_col2 TYPE bloom_filter -``` -### 利用可能なインデックスの種類 {#available-types-of-indices} -#### MinMax {#minmax} - -指定された式の極端値を保存します(式が `tuple` の場合、それぞれの `tuple` の要素の極端値を保存します)。主キーのように、データブロックをスキップするために保存された情報を使用します。 - -構文: `minmax` -#### Set {#set} - -指定された式のユニークな値を保存します(`max_rows` 行を超えない、 `max_rows=0` は「制限なし」を意味します)。これらの値を使用して、データブロックで `WHERE` 式が満たされないかどうかを確認します。 - -構文: `set(max_rows)` -#### Bloom フィルター {#bloom-filter} - -指定されたカラムに対する [Bloom フィルター](https://en.wikipedia.org/wiki/Bloom_filter) を保存します。オプションの `false_positive` パラメータは 0 から 1 の間の値で、フィルターから偽陽性の応答を受け取る確率を指定します。デフォルト値: 0.025。サポートされているデータ型: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID` および `Map`。`Map` データ型の場合、クライアントは[mapKeys](/sql-reference/functions/tuple-map-functions.md/#mapkeys)または[mapValues](/sql-reference/functions/tuple-map-functions.md/#mapvalues)関数を使用して、インデックスがキーまたは値に対して作成されるべきかを指定できます。 - -構文: `bloom_filter([false_positive])` -#### N-gram Bloom フィルター {#n-gram-bloom-filter} - -すべての n-gram をデータブロックから含む [Bloom フィルター](https://en.wikipedia.org/wiki/Bloom_filter) を保存します。データ型: [String](/sql-reference/data-types/string.md), [FixedString](/sql-reference/data-types/fixedstring.md) および [Map](/sql-reference/data-types/map.md) のみで使用できます。`EQUALS`、 `LIKE` および `IN` 式の最適化に使用できます。 - -構文: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - -- `n` — ngramサイズ、 -- `size_of_bloom_filter_in_bytes` — バイト単位の Bloom フィルターサイズ(例えば、256 や 512 などの大きな値を指定できます。圧縮がうまくできるため)。 -- `number_of_hash_functions` — Bloom フィルターで使用されるハッシュ関数の数。 -- `random_seed` — Bloom フィルターのハッシュ関数のシード。 - -ユーザーは [UDF](/sql-reference/statements/create/function.md) を作成して、`ngrambf_v1` のパラメータセットを推定できます。クエリステートメントは次のとおりです: - -```sql -CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster] -AS -(total_number_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_number_of_all_grams) * log(2)); - -CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster] -AS -(total_number_of_all_grams, probability_of_false_positives) -> ceil((total_number_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2)))); - -CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster] -AS -(total_number_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_number_of_all_grams)), number_of_hash_functions); - -CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster] -AS -(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions)))) - -``` -これらの関数を使用するには、少なくとも 2 つのパラメータを指定する必要があります。 -たとえば、グラニュール内に 4300 の ngram があり、偽陽性が 0.0001 未満であると予想される場合、次のクエリを実行して他のパラメータを推定できます: - -```sql ---- フィルター内のビット数を推定 -SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes; - -┌─size_of_bloom_filter_in_bytes─┐ -│ 10304 │ -└───────────────────────────────┘ - ---- ハッシュ関数の数を推定 -SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions - -┌─number_of_hash_functions─┐ -│ 13 │ -└──────────────────────────┘ -``` -もちろん、他の条件でパラメータを推定するためにこれらの関数を使用することもできます。 -関数は[こちら](https://hur.st/bloomfilter)のコンテンツを参照します。 -#### トークン Bloom フィルター {#token-bloom-filter} - -`ngrambf_v1` と同様ですが、n-gram の代わりにトークンを保存します。トークンは、非英数字で区切られたシーケンスです。 - -構文: `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` -#### 特殊目的 {#special-purpose} - -- 近似最近傍探索をサポートするための実験的インデックス。詳細は [こちら](annindexes.md) を参照してください。 -- フルテキスト検索をサポートするための実験的なフルテキストインデックス。詳細は [こちら](invertedindexes.md) を参照してください。 -### Functions Support {#functions-support} - -`WHERE`句の条件は、カラムを操作する関数の呼び出しを含みます。カラムがインデックスの一部である場合、ClickHouseは関数を実行する際にこのインデックスを使用しようとします。ClickHouseは、インデックスを使用するためのさまざまな関数のサブセットをサポートしています。 - -`set`タイプのインデックスはすべての関数で使用できます。他のインデックスタイプは以下のようにサポートされています: - -| 関数(演算子) / インデックス | 主キー | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | full_text | -|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|-----------| -| [equals (=, ==)](/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | -| [notLike](/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | -| [match](/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ | -| [startsWith](/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | -| [endsWith](/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ | -| [multiSearchAny](/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ | -| [in](/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (`<`)](/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greater (`>`)](/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [lessOrEquals (`<=`)](/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greaterOrEquals (`>=`)](/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [empty](/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [notEmpty](/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [has](/sql-reference/functions/array-functions#hasarr-elem) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | -| [hasAny](/sql-reference/functions/array-functions#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | -| [hasAll](/sql-reference/functions/array-functions#hasall) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | -| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | -| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ | -| hasTokenCaseInsensitiveOrNull (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ | - -定数引数がngramサイズ未満の場合、`ngrambf_v1`によるクエリ最適化には使用できません。 - -(*) `hasTokenCaseInsensitive`と`hasTokenCaseInsensitiveOrNull`を有効にするには、`tokenbf_v1`インデックスを小文字化されたデータで作成する必要があります。例えば、`INDEX idx (lower(str_col)) TYPE tokenbf_v1(512, 3, 0)`のようにします。 - -:::note -ブルームフィルターは誤陽性の一致を持つ可能性があるため、`ngrambf_v1`、`tokenbf_v1`、および`bloom_filter`インデックスは、関数の結果がfalseであることが期待されるクエリの最適化には使用できません。 - -例えば: - -- 最適化可能なもの: - - `s LIKE '%test%'` - - `NOT s NOT LIKE '%test%'` - - `s = 1` - - `NOT s != 1` - - `startsWith(s, 'test')` -- 最適化不可能なもの: - - `NOT s LIKE '%test%'` - - `s NOT LIKE '%test%'` - - `NOT s = 1` - - `s != 1` - - `NOT startsWith(s, 'test')` -::: -## Projections {#projections} -プロジェクションは[物化ビュー](/sql-reference/statements/create/view)のようですが、パートレベルで定義されています。それは、一貫性の保証を提供し、クエリに自動的に使用されます。 - -:::note -プロジェクションを実装する際には、[force_optimize_projection](/operations/settings/settings#force_optimize_projection)設定も考慮するべきです。 -::: - -プロジェクションは、[FINAL](/sql-reference/statements/select/from#final-modifier)修飾子を持つ`SELECT`文ではサポートされていません。 -### Projection Query {#projection-query} -プロジェクションクエリは、プロジェクションを定義するものです。それは暗黙的に親テーブルからデータを選択します。 -**構文** - -```sql -SELECT [GROUP BY] [ORDER BY] -``` - -プロジェクションは、[ALTER](/sql-reference/statements/alter/projection.md)文を使用して変更または削除できます。 -### Projection Storage {#projection-storage} -プロジェクションはパートディレクトリ内に格納されます。これはインデックスに似ていますが、匿名の`MergeTree`テーブルのパートを格納するサブディレクトリを含みます。このテーブルはプロジェクションの定義クエリによって誘導されます。`GROUP BY`句がある場合、基盤のストレージエンジンは[AggregatingMergeTree](aggregatingmergetree.md)となり、すべての集約関数は`AggregateFunction`に変換されます。`ORDER BY`句がある場合、`MergeTree`テーブルはそれを主キー式として使用します。マージプロセス中、プロジェクションパートはそのストレージのマージルーチンを介してマージされます。親テーブルのパートのチェックサムは、プロジェクションのパートと組み合わされます。他のメンテナンス作業はデータスキッピングインデックスに似ています。 -### Query Analysis {#projection-query-analysis} -1. プロジェクションが与えられたクエリに応じて使用されるかを確認します。つまり、基礎テーブルをクエリした場合と同じ答えが生成されるかを確認します。 -2. 読み取りに最も少ない粒を含む、最良の利用可能な一致を選択します。 -3. プロジェクションを使用するクエリパイプラインは、元のパーツを使用するものと異なります。プロジェクションがいくつかのパーツに欠けている場合、そのパイプラインを追加して動的に「プロジェクト」することができます。 -## Concurrent Data Access {#concurrent-data-access} - -同時テーブルアクセスのために、マルチバージョンを使用します。別の言い方をすれば、テーブルが同時に読み取られ、更新されるとき、データはクエリの時点での現在の一連のパーツから読み取られます。長時間のロックはありません。挿入は読み取り操作の妨げになりません。 - -テーブルからの読み取りは自動的に並列化されます。 -## TTL for Columns and Tables {#table_engine-mergetree-ttl} - -値の寿命を決定します。 - -`TTL`句はテーブル全体、および各個別のカラムに設定できます。テーブルレベルの`TTL`は、自動的にデータをディスクやボリューム間で移動するロジックや、すべてのデータが期限切れになったパーツを再圧縮することを指定できます。 - -式は[Date](/sql-reference/data-types/date.md)または[DateTime](/sql-reference/data-types/datetime.md)データ型に評価される必要があります。 - -**構文** - -カラムのTTLを設定する: - -```sql -TTL time_column -TTL time_column + interval -``` - -`interval`を定義するには、[時間間隔](/sql-reference/operators#operators-for-working-with-dates-and-times)演算子を使用します。例えば: - -```sql -TTL date_time + INTERVAL 1 MONTH -TTL date_time + INTERVAL 15 HOUR -``` -### Column TTL {#mergetree-column-ttl} - -カラム内の値が期限切れになると、ClickHouseはそれらをカラムデータ型のデフォルト値に置き換えます。データ部分内のすべてのカラム値が期限切れになると、ClickHouseはこのカラムをファイルシステムのデータ部分から削除します。 - -`TTL`句はキーカラムには使用できません。 - -**例** -#### `TTL`でテーブルを作成する: {#creating-a-table-with-ttl} - -```sql -CREATE TABLE tab -( - d DateTime, - a Int TTL d + INTERVAL 1 MONTH, - b Int TTL d + INTERVAL 1 MONTH, - c String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d; -``` -#### 既存のテーブルのカラムにTTLを追加する {#adding-ttl-to-a-column-of-an-existing-table} - -```sql -ALTER TABLE tab - MODIFY COLUMN - c String TTL d + INTERVAL 1 DAY; -``` -#### カラムのTTLを変更する {#altering-ttl-of-the-column} - -```sql -ALTER TABLE tab - MODIFY COLUMN - c String TTL d + INTERVAL 1 MONTH; -``` -### Table TTL {#mergetree-table-ttl} - -テーブルには期限切れ行を削除するための式が含まれ、複数の式が[ディスクまたはボリューム](#table_engine-mergetree-multiple-volumes)間の部品の自動移動を可能にします。テーブル内の行が期限切れになると、ClickHouseはすべての対応する行を削除します。部分の移動や再圧縮の場合、パートのすべての行が`TTL`式の条件を満たす必要があります。 - -```sql -TTL expr - [DELETE|RECOMPRESS codec_name1|TO DISK 'xxx'|TO VOLUME 'xxx'][, DELETE|RECOMPRESS codec_name2|TO DISK 'aaa'|TO VOLUME 'bbb'] ... - [WHERE conditions] - [GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] -``` - -TTLルールのタイプは各TTL式に続く場合があります。それは式が満たされたときに実行されるアクションに影響します(現在の時間に達すると): - -- `DELETE` - 期限切れ行を削除(デフォルトのアクション); -- `RECOMPRESS codec_name` - `codec_name`でデータパートを再圧縮します; -- `TO DISK 'aaa'` - 部品をディスク`aaa`に移動します; -- `TO VOLUME 'bbb'` - 部品をディスク`bbb`に移動します; -- `GROUP BY` - 期限切れ行を集約します。 - -`DELETE`アクションは、フィルタ条件に基づいて期限切れ行の一部のみを削除するために`WHERE`句と一緒に使用できます: -```sql -TTL time_column + INTERVAL 1 MONTH DELETE WHERE column = 'value' -``` - -`GROUP BY`式はテーブルの主キーのプレフィックスでなければなりません。 - -カラムが`GROUP BY`式の一部でなく、`SET`句で明示的に設定されていない場合、結果行にはグループ化された行からの偶発的な値が含まれます(集約関数`any`がそれに適用されているかのように)。 - -**例** -#### `TTL`でテーブルを作成する: {#creating-a-table-with-ttl-1} - -```sql -CREATE TABLE tab -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH DELETE, - d + INTERVAL 1 WEEK TO VOLUME 'aaa', - d + INTERVAL 2 WEEK TO DISK 'bbb'; -``` -#### テーブルの`TTL`を変更する: {#altering-ttl-of-the-table} - -```sql -ALTER TABLE tab - MODIFY TTL d + INTERVAL 1 DAY; -``` - -1か月後に期限切れになる行を持つテーブルを作成します。期限切れの行は日付が月曜日のときに削除されます: - -```sql -CREATE TABLE table_with_where -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH DELETE WHERE toDayOfWeek(d) = 1; -``` -#### 期限切れ行が再圧縮されるテーブルを作成する: {#creating-a-table-where-expired-rows-are-recompressed} - -```sql -CREATE TABLE table_for_recompression -( - d DateTime, - key UInt64, - value String -) ENGINE MergeTree() -ORDER BY tuple() -PARTITION BY key -TTL d + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(17)), d + INTERVAL 1 YEAR RECOMPRESS CODEC(LZ4HC(10)) -SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; -``` - -期限切れ行が集約されるテーブルを作成します。結果行の`x`はグループ化された行の中で最大値を持ち、`y`は最小値、`d`はグループ化された行からの偶発的な値を持ちます。 - -```sql -CREATE TABLE table_for_aggregation -( - d DateTime, - k1 Int, - k2 Int, - x Int, - y Int -) -ENGINE = MergeTree -ORDER BY (k1, k2) -TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); -``` -### Removing Expired Data {#mergetree-removing-expired-data} - -期限切れの`TTL`を持つデータは、ClickHouseがデータパーツをマージするときに削除されます。 - -ClickHouseがデータが期限切れであることを検出した場合、スケジュール外のマージを実行します。そのようなマージの頻度を制御するには、`merge_with_ttl_timeout`を設定できます。値が低すぎると、多くのスケジュール外のマージが行われる可能性があり、リソースを多く消費することがあります。 - -マージの間に`SELECT`クエリを実行すると、期限切れのデータが取得されることがあります。それを避けるためには、`SELECT`の前に[OPTIMIZE](/sql-reference/statements/optimize.md)クエリを使用してください。 - -**参照** - -- [ttl_only_drop_parts](/operations/settings/merge-tree-settings#ttl_only_drop_parts)設定 -## Disk types {#disk-types} - -ローカルブロックデバイスに加えて、ClickHouseは次のストレージタイプをサポートしています: -- [`s3` for S3 and MinIO](#table_engine-mergetree-s3) -- [`gcs` for GCS](/integrations/data-ingestion/gcs/index.md/#creating-a-disk) -- [`blob_storage_disk` for Azure Blob Storage](/operations/storing-data#azure-blob-storage) -- [`hdfs` for HDFS](/engines/table-engines/integrations/hdfs) -- [`web` for read-only from web](/operations/storing-data#web-storage) -- [`cache` for local caching](/operations/storing-data#using-local-cache) -- [`s3_plain` for backups to S3](/operations/backup#backuprestore-using-an-s3-disk) -- [`s3_plain_rewritable` for immutable, non-replicated tables in S3](/operations/storing-data.md#s3-plain-rewritable-storage) -## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes} -### Introduction {#introduction} - -`MergeTree`ファミリーのテーブルエンジンは、複数のブロックデバイスにデータを保存することができます。例えば、特定のテーブルのデータが暗黙的に「ホット」と「コールド」に分割されている場合に役立ちます。最新のデータは定期的にリクエストされますが、ごく少量のスペースしか必要ありません。その反対に、太い尾の履歴データは希にリクエストされます。複数のディスクが利用可能な場合、「ホット」データは高速ディスク(例えば、NVMe SSDまたはメモリ内)に置かれ、「コールド」データは比較的遅いもの(例えば、HDD)に置かれる場合があります。 - -データパートは`MergeTree`エンジンテーブルの最小可動単位です。1つのパーツに属するデータは1つのディスクに保存されます。データパーツは、ユーザー設定に応じてバックグラウンドでディスク間を移動したり、[ALTER](/sql-reference/statements/alter/partition)クエリによって移動したりできます。 -### Terms {#terms} - -- ディスク — ファイルシステムにマウントされたブロックデバイス。 -- デフォルトディスク — [path](/operations/server-configuration-parameters/settings.md/#path)サーバー設定で指定されたパスを格納するディスク。 -- ボリューム — 同等のディスクの順序付けされたセット([JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)に似ています)。 -- ストレージポリシー — ボリュームのセットとそれらの間でデータを移動するためのルール。 - -記述されたエンティティに付けられた名前は、システムテーブル`[system.storage_policies](/operations/system-tables/storage_policies)`および`[system.disks](/operations/system-tables/disks)`で見つけることができます。テーブルに構成されたストレージポリシーの1つを適用するには、`MergeTree`エンジンファミリのテーブルの`storage_policy`設定を使用します。 -### Configuration {#table_engine-mergetree-multiple-volumes_configure} - -ディスク、ボリューム、およびストレージポリシーは、`config.d`ディレクトリ内のファイルの中にある``タグ内に宣言する必要があります。 - -:::tip -ディスクは、クエリの`SETTINGS`セクションに宣言することもできます。これは、例えば、URLでホストされているディスクを一時的に接続するための便利です。詳細については[動的ストレージ](/operations/storing-data#dynamic-configuration)を参照してください。 -::: - -構成構造: - -```xml - - - - /mnt/fast_ssd/clickhouse/ - - - /mnt/hdd1/clickhouse/ - 10485760 - - - /mnt/hdd2/clickhouse/ - 10485760 - - - ... - - - ... - -``` - -タグ: - -- `` — ディスク名。すべてのディスクの名前は異なる必要があります。 -- `path` — サーバーがデータ(`data`および`shadow`フォルダー)を保存するパスで、`/`で終わる必要があります。 -- `keep_free_space_bytes` — 保存されるべきフリースペースの量。 - -ディスク定義の順序は重要ではありません。 - -ストレージポリシー構成マークアップ: - -```xml - - ... - - - - - disk_name_from_disks_configuration - 1073741824 - round_robin - - - - - - - 0.2 - - - - - - - - ... - -``` - -タグ: - -- `policy_name_N` — ポリシー名。ポリシー名は一意である必要があります。 -- `volume_name_N` — ボリューム名。ボリューム名は一意である必要があります。 -- `disk` — ボリューム内のディスク。 -- `max_data_part_size_bytes` — ボリュームのどのディスクにも保存できるパーツの最大サイズ。マージされたパーツのサイズが`max_data_part_size_bytes`を超えると、そのパーツは次のボリュームに書き込まれます。この機能により、新しい小さなパーツをホット(SSD)ボリュームに保持し、大きなサイズに達したときにコールド(HDD)ボリュームに移動できます。この設定は、ポリシーに1つのボリュームだけがある場合は使用しないでください。 -- `move_factor` — 利用可能なスペースがこのファクターより少なくなると、データは自動的に次のボリュームに移動し始めます(デフォルトは0.1)。ClickHouseは、現存するパーツをサイズが大きいものから小さいものへと降順に並べ、`move_factor`条件を満たすために十分なサイズのパーツを選択します。すべてのパーツの合計サイズが不十分な場合は、すべてのパーツが移動されます。 -- `perform_ttl_move_on_insert` — データパートINSERT時のTTL移動を無効にします。デフォルト(有効な場合)の状態では、TTL移動ルールによりすでに期限切れのデータパートを挿入すると、そのパーツは直ちに移動ルールに宣言されたボリューム/ディスクに移動されます。これは、宛先ボリューム/ディスクが遅い場合(例:S3)には挿入を大幅に遅くする可能性があります。無効にした場合は、期限切れのデータパートがデフォルトボリュームに書き込まれ、その後すぐにTTLボリュームに移動されます。 -- `load_balancing` - ディスクバランスのポリシー、`round_robin`または`least_used`。 -- `least_used_ttl_ms` - すべてのディスクで更新可能なスペースを更新するためのタイムアウト(ミリ秒)(`0` - 常に更新, `-1` - 更新しない, デフォルトは`60000`)。注意、ディスクがClickHouseのみに使用可能で、オンラインファイルシステムのリサイズ/縮小の影響を受けない場合は、`-1`を使用してもよいですが、そうでない場合は推奨されません。最終的には、不正確なスペース配分につながるためです。 -- `prefer_not_to_merge` — この設定は使用しないでください。ボリューム上のデータパーツのマージを無効にします(これは有害でパフォーマンスの低下につながります)。この設定が有効になっている状態では(使用しないでください)、このボリュームでデータのマージが許可されません(これは悪い結果をもたらします)。これにより(しかし、あなたはそれを必要としません)、ClickHouseが遅いディスクでどのように動作するかを制御できます(しかし、ClickHouseの方がよく知っていますので、この設定を使用しないでください)。 -- `volume_priority` — ボリュームが埋められる順序を定義します。低い値は高い優先度を示します。パラメータ値は自然数であり、1からNの範囲を一緒にカバーするべきです(最低の優先度)。 - * すべてのボリュームにタグが付けられている場合、それらは指定された順序で優先されます。 - * 一部のボリュームにのみタグが付けられている場合、タグなしのボリュームは最低の優先度を持ち、定義された順序で優先されます。 - * タグが付けられていない場合、優先度は構成で宣言された順序に応じて設定されます。 - * 2つのボリュームは同じ優先度の値を持つことができません。 - -構成の例: - -```xml - - ... - - - - - disk1 - disk2 - - - - - - - - fast_ssd - 1073741824 - - - disk1 - - - 0.2 - - - - -
- jbod1 -
- - external - -
-
-
- ... -
-``` - -与えられた例では、`hdd_in_order`ポリシーは[ラウンドロビン](https://en.wikipedia.org/wiki/Round-robin_scheduling)アプローチを実装します。このポリシーは1つのボリューム(`single`)のみを定義し、データパーツはそのすべてのディスクに円環式で保存されます。このようなポリシーは、複数の類似のディスクがシステムにマウントされているが、RAIDが構成されていない場合には非常に有用です。各個々のディスクドライブは信頼性がなく、レプリケーション係数を3以上にしたい場合があります。 - -システム内にさまざまな種類のディスクが利用可能な場合、`moving_from_ssd_to_hdd`ポリシーを代わりに使用できます。ボリューム`hot`はSSDディスク(`fast_ssd`)からなり、そこに保存できるパートの最大サイズは1GBです。1GBを超えるサイズのすべてのパーツは、HDDディスク`disk1`を含む`cold`ボリュームに直接保存されます。また、ディスク`fast_ssd`が80%以上充填されると、データはバックグラウンドプロセスによって`disk1`に転送されます。 - -ストレージポリシー内のボリュームの列挙順序は、少なくとも1つのボリュームに明示的な`volume_priority`パラメータがない場合に重要です。 -ボリュームが過剰に満たされると、データは次のボリュームに移動されます。ディスクの列挙順序も重要です。データは順に保存されます。 - -テーブルを作成する際には、設定されたストレージポリシーの1つを適用できます: - -```sql -CREATE TABLE table_with_non_default_policy ( - EventDate Date, - OrderID UInt64, - BannerID UInt64, - SearchPhrase String -) ENGINE = MergeTree -ORDER BY (OrderID, BannerID) -PARTITION BY toYYYYMM(EventDate) -SETTINGS storage_policy = 'moving_from_ssd_to_hdd' -``` - -`default`ストレージポリシーは、``で指定された1つのディスクのみを含むボリュームを使用することを意味します。 -テーブル作成後にストレージポリシーを変更するには、[ALTER TABLE ... MODIFY SETTING]クエリを使用し、新しいポリシーにすべての古いディスクと同名のボリュームを含める必要があります。 - -データパーツのバックグラウンド移動を実行するスレッド数は、[background_move_pool_size](/operations/server-configuration-parameters/settings.md/#background_move_pool_size)設定で変更できます。 -### Details {#details} - -`MergeTree`テーブルの場合、データはさまざまな方法でディスクに到達します: - -- 挿入(`INSERT`クエリ)の結果として。 -- バックグラウンドマージおよび[ミューテーション](/sql-reference/statements/alter#mutations)中。 -- 別のレプリカからのダウンロード時。 -- パーティションのフリーズ結果として、[ALTER TABLE ... FREEZE PARTITION](/sql-reference/statements/alter/partition#freeze-partition)。 - -これらのケースは、ミューテーションやパーティションのフリーズを除き、パートは指定されたストレージポリシーに応じてボリュームおよびディスクに保存されます: - -1. 保存パート用に十分なディスクスペースがある最初のボリューム(定義の順序で)が選択されます(`unreserved_space > current_part_size`)および指定されたサイズのパーツを保存することが許可される(`max_data_part_size_bytes > current_part_size`)。 -2. このボリューム内で、前回のデータのチャンクを格納するために使用されたディスクの次のディスクが選択され、パートサイズよりもフリースペースが多いディスク(`unreserved_space - keep_free_space_bytes > current_part_size`)が選択されます。 - -内部では、ミューテーションやパーティションのフリーズは[ハードリンク](https://ja.wikipedia.org/wiki/ハードリンク)を利用します。異なるディスク間のハードリンクはサポートされていないため、そのような場合、結果のパーツは初期のものと同じディスクに保存されます。 - -バックグラウンドでは、ボリューム間での移動は、フリースペースの量(`move_factor`パラメータ)に基づいて構成ファイルで宣言された順序に従います。 -データは最後のボリュームから最初のボリュームに移動されることはありません。バックグラウンド移動を監視するために、システムテーブル`[system.part_log](/operations/system-tables/part_log)`(フィールド`type = MOVE_PART`)および`[system.parts](/operations/system-tables/parts.md)`(フィールド`path`および`disk`)を使用できます。また、詳細情報はサーバーログで見つけることができます。 - -ユーザーは、[ALTER TABLE ... MOVE PART\|PARTITION ... TO VOLUME\|DISK ...](/sql-reference/statements/alter/partition)クエリを使用して、パートやパーティションを1つのボリュームから別のボリュームに強制的に移動させることができます。すべての背景操作に関する制限が考慮されます。このクエリは、自身で移動を開始し、バックグラウンド操作の完了を待機しません。無料スペースが不十分な場合や、必要条件が満たされていない場合、ユーザーはエラーメッセージを受け取ります。 - -データの移動はデータレプリケーションに干渉しません。したがって、同じテーブルに対して異なるストレージポリシーを異なるレプリカに指定できます。 - -バックグラウンドマージとミューテーションが完了した後、古いパーツは一定の期間(`old_parts_lifetime`)の後にのみ削除されます。 -この期間中、他のボリュームやディスクには移動されません。したがって、パーツが最終的に削除されるまで、そのサイズは占有スペースの評価に考慮されます。 - -ユーザーは、[JBOD](https://ja.wikipedia.org/wiki/Non-RAID_drive_architectures)ボリュームの異なるディスクに新しい大きなパーツを均等に割り当てることができます。これは、[min_bytes_to_rebalance_partition_over_jbod](/operations/settings/merge-tree-settings.md/#min_bytes_to_rebalance_partition_over_jbod)設定を使用して実現します。 -## 外部ストレージを使用したデータストレージ {#table_engine-mergetree-s3} - -[MergeTree](/engines/table-engines/mergetree-family/mergetree.md) ファミリーのテーブルエンジンは、`S3`、`AzureBlobStorage`、`HDFS` にデータを保存することができ、タイプ `s3`、`azure_blob_storage`、`hdfs` に応じてディスクを使用します。詳細については、[外部ストレージオプションの構成](/operations/storing-data.md/#configuring-external-storage)を参照してください。 - -外部ストレージとしての [S3](https://aws.amazon.com/s3/) の例で、ディスクタイプは `s3` です。 - -設定マークアップ: -```xml - - ... - - - s3 - true - https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/root-path/ - your_access_key_id - your_secret_access_key - -
Authorization: Bearer SOME-TOKEN
- your_base64_encoded_customer_key - your_kms_key_id - your_kms_encryption_context - true - - http://proxy1 - http://proxy2 - - 10000 - 5000 - 10 - 4 - 1000 - /var/lib/clickhouse/disks/s3/ - false -
- - cache - s3 - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - -
- ... -
-``` - -また、[外部ストレージオプションの構成](/operations/storing-data.md/#configuring-external-storage)も参照してください。 - -:::note キャッシュ設定 -ClickHouse バージョン 22.3 から 22.7 までは異なるキャッシュ設定が使用されているため、これらのバージョンを使用している場合は、[ローカルキャッシュの使用](/operations/storing-data.md/#using-local-cache)を参照してください。 -::: -## バーチャルカラム {#virtual-columns} - -- `_part` — パートの名前。 -- `_part_index` — クエリ結果におけるパートの順次インデックス。 -- `_part_starting_offset` — クエリ結果におけるパートの累積開始行。 -- `_part_offset` — パート内の行番号。 -- `_partition_id` — パーティションの名前。 -- `_part_uuid` — 一意のパート識別子 (MergeTree 設定 `assign_part_uuids` が有効な場合)。 -- `_part_data_version` — パートのデータバージョン (最小ブロック番号またはミューテーションバージョン)。 -- `_partition_value` — `partition by` 式の値 (タプル)。 -- `_sample_factor` — サンプルファクター (クエリから)。 -- `_block_number` — 行のブロック番号。`allow_experimental_block_number_column` が true に設定されると、マージ時に永続化されます。 -## カラム統計 {#column-statistics} - - - - -統計の宣言は、`*MergeTree*` ファミリーのテーブルの `CREATE` クエリのカラムセクションにあり、`set allow_experimental_statistics = 1` を有効にしています。 - -```sql -CREATE TABLE tab -( - a Int64 STATISTICS(TDigest, Uniq), - b Float64 -) -ENGINE = MergeTree -ORDER BY a -``` - -統計は `ALTER` ステートメントを使用しても操作できます。 - -```sql -ALTER TABLE tab ADD STATISTICS b TYPE TDigest, Uniq; -ALTER TABLE tab DROP STATISTICS a; -``` - -これらの軽量統計は、カラム内の値の分布に関する情報を集約します。統計はすべてのパートに保存され、各挿入のたびに更新されます。 -これらは、`set allow_statistics_optimize = 1` を有効にする場合にのみ、prewhere 最適化に使用できます。 -### 利用可能なカラム統計のタイプ {#available-types-of-column-statistics} - -- `MinMax` - - 数値カラムの範囲フィルタの選択性を推定可能にする列の最小値と最大値。 - - 構文: `minmax` - -- `TDigest` - - 数値カラムの近似パーセンタイル (例: 90 パーセンタイル) を計算するのに役立つ [TDigest](https://github.com/tdunning/t-digest) スケッチ。 - - 構文: `tdigest` - -- `Uniq` - - 列が含む一意の値の数を推定する [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) スケッチ。 - - 構文: `uniq` - -- `CountMin` - - 列内の各値の頻度の近似カウントを提供する [CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) スケッチ。 - - 構文: `countmin` -### サポートされているデータ型 {#supported-data-types} - -| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String または FixedString | -|-----------|----------------------------------------------------|-----------------------| -| CountMin | ✔ | ✔ | -| MinMax | ✔ | ✗ | -| TDigest | ✔ | ✗ | -| Uniq | ✔ | ✔ | -### サポートされている操作 {#supported-operations} - -| | 等価フィルタ (==) | 範囲フィルタ (`>, >=, <, <=`) | -|-----------|-----------------------|------------------------------| -| CountMin | ✔ | ✗ | -| MinMax | ✗ | ✔ | -| TDigest | ✗ | ✔ | -| Uniq | ✔ | ✗ | -## カラムレベルの設定 {#column-level-settings} - -特定の MergeTree 設定は、カラムレベルでオーバーライドできます: - -- `max_compress_block_size` — テーブルに書き込む前に圧縮される未圧縮データの最大ブロックサイズ。 -- `min_compress_block_size` — 次のマークを書き込む際に圧縮のために必要な未圧縮データの最小ブロックサイズ。 - -例: - -```sql -CREATE TABLE tab -( - id Int64, - document String SETTINGS (min_compress_block_size = 16777216, max_compress_block_size = 16777216) -) -ENGINE = MergeTree -ORDER BY id -``` - -カラムレベルの設定は、[ALTER MODIFY COLUMN](/sql-reference/statements/alter/column.md) を使用して変更または削除できます。例えば: - -- カラム宣言から `SETTINGS` を削除: - -```sql -ALTER TABLE tab MODIFY COLUMN document REMOVE SETTINGS; -``` - -- 設定を変更: - -```sql -ALTER TABLE tab MODIFY COLUMN document MODIFY SETTING min_compress_block_size = 8192; -``` - -- 1つまたは複数の設定をリセットし、同時にテーブルの CREATE クエリのカラム式から設定宣言を削除します。 - -```sql -ALTER TABLE tab MODIFY COLUMN document RESET SETTING min_compress_block_size; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md.hash deleted file mode 100644 index 11624570ea2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/mergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -bdaa6d35f6a29fc5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md deleted file mode 100644 index f83bdebf0b1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -description: '主キーとは異なり、同じソートキー値(`ORDER BY`テーブルセクションではなく`PRIMARY KEY`)を持つ重複エントリを削除します。' -sidebar_label: 'ReplacingMergeTree' -sidebar_position: 40 -slug: '/engines/table-engines/mergetree-family/replacingmergetree' -title: 'ReplacingMergeTree' ---- - - - - -# ReplacingMergeTree - -このエンジンは、[MergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree)と異なり、同じ[ソートキー](../../../engines/table-engines/mergetree-family/mergetree.md)値(`ORDER BY`テーブルセクション、ではなく`PRIMARY KEY`)を持つ重複エントリを削除します。 - -データの重複削除は、マージ中にのみ発生します。マージは不明な時間にバックグラウンドで行われるため、計画を立てることはできません。一部のデータは未処理のまま残ることがあります。`OPTIMIZE`クエリを使用して非スケジュールのマージを実行することができますが、大量のデータを読み書きするため、これを利用することは期待しないでください。 - -したがって、`ReplacingMergeTree`は、スペースを節約するためにバックグラウンドで重複データをクリアするのに適していますが、重複が存在しないことを保証するものではありません。 - -:::note -ReplacingMergeTreeに関する詳細なガイド、ベストプラクティス、パフォーマンスの最適化方法については、[こちら](/guides/replacing-merge-tree)をご覧ください。 -::: - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = ReplacingMergeTree([ver [, is_deleted]]) -[PARTITION BY expr] -[ORDER BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -リクエストパラメータの説明については、[ステートメントの説明](../../../sql-reference/statements/create/table.md)を参照してください。 - -:::note -行の一意性は、`PRIMARY KEY`ではなく、`ORDER BY`テーブルセクションによって決まります。 -::: - -## ReplacingMergeTreeのパラメータ {#replacingmergetree-parameters} - -### ver {#ver} - -`ver` — バージョン番号を持つカラム。型は`UInt*`、`Date`、`DateTime`または`DateTime64`。オプションのパラメータです。 - -マージ時に、`ReplacingMergeTree`は同じソートキーを持つすべての行から1つだけを残します: - -- `ver`が設定されていない場合は、選択内の最後の行が残ります。選択とは、マージに参加するパーツのセット内の行の集合です。最も最近作成されたパート(最後の挿入)が選択内の最後の行になります。したがって、重複削除後は、各ユニークなソートキーに対して、最新の挿入から最後の行が残ります。 -- `ver`が指定されている場合は、最大バージョンの行が残ります。複数の行が同じ`ver`を持つ場合、それに対して「`ver`が指定されていない場合」と同じルールが適用されるため、最も最近挿入された行が残ります。 - -例: - -```sql --- verなし - 最後に挿入されたものが"勝つ" -CREATE TABLE myFirstReplacingMT -( - `key` Int64, - `someCol` String, - `eventTime` DateTime -) -ENGINE = ReplacingMergeTree -ORDER BY key; - -INSERT INTO myFirstReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); -INSERT INTO myFirstReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); - -SELECT * FROM myFirstReplacingMT FINAL; - -┌─key─┬─someCol─┬───────────eventTime─┐ -│ 1 │ second │ 2020-01-01 00:00:00 │ -└─────┴─────────┴─────────────────────┘ - - --- verあり - 最大のverを持つ行が"勝つ" -CREATE TABLE mySecondReplacingMT -( - `key` Int64, - `someCol` String, - `eventTime` DateTime -) -ENGINE = ReplacingMergeTree(eventTime) -ORDER BY key; - -INSERT INTO mySecondReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); -INSERT INTO mySecondReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); - -SELECT * FROM mySecondReplacingMT FINAL; - -┌─key─┬─someCol─┬───────────eventTime─┐ -│ 1 │ first │ 2020-01-01 01:01:01 │ -└─────┴─────────┴─────────────────────┘ -``` - -### is_deleted {#is_deleted} - -`is_deleted` — マージ中に、データがこの行における状態か、削除されるべきかを判定するために使用されるカラムの名前;`1`は「削除された」行、`0`は「状態」行です。 - -カラムのデータ型は`UInt8`です。 - -:::note -`is_deleted`は、`ver`が使用されている場合にのみ有効にできます。 - -データに対する操作に関わらず、バージョンは増加させる必要があります。挿入された2つの行が同じバージョン番号を持つ場合、最後に挿入された行が保持されます。 - -デフォルトでは、ClickHouseはキーに対して最後の行を保持します。たとえその行が削除行であってもです。今後の低バージョンの行が安全に挿入できるようにし、削除行が適用され続けるからです。 - -このような削除行を永続的にドロップするには、テーブル設定`allow_experimental_replacing_merge_with_cleanup`を有効にし、次のいずれかを行います: - -1. テーブル設定`enable_replacing_merge_with_cleanup_for_min_age_to_force_merge`、`min_age_to_force_merge_on_partition_only`、および`min_age_to_force_merge_seconds`を設定します。パーティション内のすべてのパーツが`min_age_to_force_merge_seconds`よりも古い場合、ClickHouseはそれらをすべて1つのパートにマージし、削除行を取り除きます。 - -2. 手動で`OPTIMIZE TABLE table [PARTITION partition | PARTITION ID 'partition_id'] FINAL CLEANUP`を実行します。 -::: - -例: -```sql --- verとis_deletedを使用 -CREATE OR REPLACE TABLE myThirdReplacingMT -( - `key` Int64, - `someCol` String, - `eventTime` DateTime, - `is_deleted` UInt8 -) -ENGINE = ReplacingMergeTree(eventTime, is_deleted) -ORDER BY key -SETTINGS allow_experimental_replacing_merge_with_cleanup = 1; - -INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 0); -INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 1); - -select * from myThirdReplacingMT final; - -0 rows in set. Elapsed: 0.003 sec. - --- is_deletedを使用して削除行を削除 -OPTIMIZE TABLE myThirdReplacingMT FINAL CLEANUP; - -INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 00:00:00', 0); - -select * from myThirdReplacingMT final; - -┌─key─┬─someCol─┬───────────eventTime─┬─is_deleted─┐ -│ 1 │ first │ 2020-01-01 00:00:00 │ 0 │ -└─────┴─────────┴─────────────────────┴────────────┘ -``` - -## クエリ句 {#query-clauses} - -`ReplacingMergeTree`テーブルを作成する際には、`MergeTree`テーブルを作成する際と同じ[句](../../../engines/table-engines/mergetree-family/mergetree.md)が必要です。 - -
- -テーブルを作成するための非推奨の方法 - -:::note -新しいプロジェクトではこの方法を使用せず、可能であれば古いプロジェクトを上記の方法に切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) -``` - -`ver`を除くすべてのパラメータは`MergeTree`と同じ意味を持ちます。 - -- `ver` - バージョンを持つカラム。オプションのパラメータです。詳細については、上記のテキストを参照してください。 - -
- -## クエリ時の重複排除とFINAL {#query-time-de-duplication--final} - -マージ時に、`ReplacingMergeTree`は重複した行を識別し、テーブル作成時に使用された`ORDER BY`カラムの値を一意の識別子として利用し、最高バージョンのみを保持します。しかし、これは最終的に正しい結果を提供するものであり、行が重複しないことを保証するものではなく、これを期待すべきではありません。したがって、クエリは更新および削除行がクエリに考慮されるため、不正確な回答を生成することがあります。 - -正しい回答を得るには、ユーザーはバックグラウンドのマージを補完し、クエリ時の重複排除と削除処理を行う必要があります。これは、`FINAL`演算子を使用することで達成できます。たとえば、次の例を考えます: - -```sql -CREATE TABLE rmt_example -( - `number` UInt16 -) -ENGINE = ReplacingMergeTree -ORDER BY number - -INSERT INTO rmt_example SELECT floor(randUniform(0, 100)) AS number -FROM numbers(1000000000) - -0 rows in set. Elapsed: 19.958 sec. Processed 1.00 billion rows, 8.00 GB (50.11 million rows/s., 400.84 MB/s.) -``` -`FINAL`を使わずにクエリを実行すると、不正確なカウントが生成されます(正確な結果はマージに応じて変わることがあります): - -```sql -SELECT count() -FROM rmt_example - -┌─count()─┐ -│ 200 │ -└─────────┘ - -1 row in set. Elapsed: 0.002 sec. -``` - -`FINAL`を追加すると正しい結果が得られます: - -```sql -SELECT count() -FROM rmt_example -FINAL - -┌─count()─┐ -│ 100 │ -└─────────┘ - -1 row in set. Elapsed: 0.002 sec. -``` - -`FINAL`の詳細、パフォーマンスの最適化については、[ReplacingMergeTreeに関する詳細ガイド](/guides/replacing-merge-tree)をお読みになることをお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md.hash deleted file mode 100644 index 7f3c32704fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replacingmergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -3078d73052bb3c76 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md deleted file mode 100644 index f157b67bcb3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md +++ /dev/null @@ -1,363 +0,0 @@ ---- -description: 'ClickHouse におけるデータレプリケーションの概要' -sidebar_label: 'データレプリケーション' -sidebar_position: 20 -slug: '/engines/table-engines/mergetree-family/replication' -title: 'データレプリケーション' ---- - - - - -# データレプリケーション - -:::note -ClickHouse Cloud では、レプリケーションが自動的に管理されます。引数を追加せずにテーブルを作成してください。たとえば、以下のテキストでは、 - -```sql -ENGINE = ReplicatedMergeTree( - '/clickhouse/tables/{shard}/table_name', - '{replica}', - ver -) -``` - -を次のように置き換えます: - -```sql -ENGINE = ReplicatedMergeTree -``` -::: - -レプリケーションは、MergeTreeファミリーのテーブルにのみ対応しています: - -- ReplicatedMergeTree -- ReplicatedSummingMergeTree -- ReplicatedReplacingMergeTree -- ReplicatedAggregatingMergeTree -- ReplicatedCollapsingMergeTree -- ReplicatedVersionedCollapsingMergeTree -- ReplicatedGraphiteMergeTree - -レプリケーションは、個々のテーブルのレベルで機能し、サーバー全体ではなくなります。サーバーは、レプリケーションテーブルと非レプリケーションテーブルの両方を同時に保存できます。 - -レプリケーションはシャーディングに依存しません。各シャードには独自の独立したレプリケーションがあります。 - -`INSERT` および `ALTER` クエリの圧縮データはレプリケーションされます(詳細については、[ALTER](/sql-reference/statements/alter) のドキュメントを参照してください)。 - -`CREATE`、`DROP`、`ATTACH`、`DETACH` および `RENAME` クエリは、単一のサーバーで実行され、レプリケーションされません: - -- `CREATE TABLE` クエリは、クエリが実行されたサーバーに新しいレプリケーション可能なテーブルを作成します。このテーブルが他のサーバーにすでに存在する場合、これは新しいレプリカを追加します。 -- `DROP TABLE` クエリは、クエリが実行されたサーバーにあるレプリカを削除します。 -- `RENAME` クエリは、レプリカの1つのテーブルの名前を変更します。言い換えれば、レプリケーションテーブルは異なるレプリカで異なる名前を持つことができます。 - -ClickHouseは、レプリカのメタ情報を保存するために [ClickHouse Keeper](/guides/sre/keeper/index.md) を使用します。ZooKeeperバージョン3.4.5以降を使用することも可能ですが、ClickHouse Keeperが推奨されます。 - -レプリケーションを使用するには、[zookeeper](/operations/server-configuration-parameters/settings#zookeeper) サーバー構成セクションでパラメータを設定します。 - -:::note -セキュリティ設定を無視しないでください。ClickHouseは、ZooKeeperセキュリティサブシステムの `digest` [ACLスキーム](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) をサポートしています。 -::: - -ClickHouse Keeperクラスタのアドレスを設定する例: - -```xml - - - example1 - 2181 - - - example2 - 2181 - - - example3 - 2181 - - -``` - -ClickHouseは、補助的なZooKeeperクラスタにレプリカのメタ情報を保存することもサポートしています。これは、エンジン引数としてZooKeeperクラスタ名とパスを提供することで行います。言い換えれば、異なるZooKeeperクラスタに異なるテーブルのメタデータを保存することができます。 - -補助的なZooKeeperクラスタのアドレスを設定する例: - -```xml - - - - example_2_1 - 2181 - - - example_2_2 - 2181 - - - example_2_3 - 2181 - - - - - example_3_1 - 2181 - - - -``` - -デフォルトのZooKeeperクラスタの代わりに、補助的なZooKeeperクラスタにテーブルのメタデータを保存するには、次のようにReplicatedMergeTreeエンジンでテーブルを作成するためのSQLを使用できます: - -```sql -CREATE TABLE table_name ( ... ) ENGINE = ReplicatedMergeTree('zookeeper_name_configured_in_auxiliary_zookeepers:path', 'replica_name') ... -``` - -既存のZooKeeperクラスタを指定でき、システムはその上に独自のデータ用のディレクトリを使用します(ディレクトリはレプリケーション可能なテーブルを作成するときに指定されます)。 - -設定ファイルにZooKeeperが設定されていない場合、レプリケーションテーブルを作成することはできず、既存のレプリケーションテーブルは読み取り専用になります。 - -ZooKeeperは `SELECT` クエリでは使用されません。なぜなら、レプリケーションは `SELECT` のパフォーマンスに影響を与えず、非レプリケーションテーブルと同じ速度でクエリが実行されるからです。分散レプリケーションテーブルをクエリするとき、ClickHouseの動作は設定 [max_replica_delay_for_distributed_queries](/operations/settings/settings.md/#max_replica_delay_for_distributed_queries) と [fallback_to_stale_replicas_for_distributed_queries](/operations/settings/settings.md/#fallback_to_stale_replicas_for_distributed_queries) によって制御されます。 - -各 `INSERT` クエリに対して、約10のエントリがZooKeeperに対していくつかのトランザクションを通じて追加されます(正確には、挿入されたデータの各ブロックに対して; 1つのINSERTクエリにはブロックまたは `max_insert_block_size = 1048576` 行ごとに1つのブロックが含まれています)。これは、非レプリケーションテーブルに比べて `INSERT` の遅延がわずかに長くなる原因となります。しかし、データを秒間最大1回の `INSERT` のバッチで挿入するという推奨に従えば、問題はありません。ZooKeeperクラスタを調整するために使用されるClickHouseクラスタ全体では、合計で数百の `INSERTs` が毎秒行われます。データ挿入のスループット(1秒間の行数)は、非レプリケーションデータと同じ高さです。 - -非常に大きなクラスタの場合、シャードごとに異なるZooKeeperクラスタを使用できます。しかし、私たちの経験では、約300サーバーを持つ生産クラスタでは必要性が証明されていません。 - -レプリケーションは非同期でマルチマスターです。 `INSERT` クエリ(および `ALTER`)は、利用可能なサーバーに任意に送信できます。データはクエリが実行されたサーバーに挿入され、その後他のサーバーにコピーされます。非同期であるため、最近挿入されたデータは他のレプリカに少し遅延して表示されます。一部のレプリカが使用できない場合、データはそれらが利用可能になると書き込まれます。レプリカが利用可能な場合、レイテンシは圧縮データのブロックをネットワークを介して転送するのにかかる時間です。レプリケータブルテーブル用のバックグラウンドタスクを実行するスレッドの数は、[background_schedule_pool_size](/operations/server-configuration-parameters/settings.md/#background_schedule_pool_size) 設定で設定できます。 - -`ReplicatedMergeTree` エンジンは、レプリケーションフェッチ用の専用スレッドプールを使用します。プールのサイズは、[background_fetches_pool_size](/operations/server-configuration-parameters/settings#background_fetches_pool_size) 設定によって制限されており、サーバーの再起動で調整できます。 - -デフォルトでは、INSERTクエリは、1つのレプリカからのデータ書き込みの確認を待機します。データが1つのレプリカに正常に書き込まれ、そしてこのレプリカを持つサーバーが存在しなくなると、保存されたデータは失われます。複数のレプリカからデータの書き込み確認を受け取るためには、`insert_quorum` オプションを使用してください。 - -各データブロックは、原子的に書き込まれます。INSERTクエリは、最大`max_insert_block_size = 1048576`行までのブロックに分割されます。言い換えれば、INSERTクエリが1048576行未満であれば、それは原子的に行われます。 - -データブロックは重複排除されます。同じデータブロックの複数回の書き込み(同じ順序で同じ行を含む同じサイズのデータブロック)については、ブロックは1回だけ書き込まれます。これは、クライアントアプリケーションがデータがDBに書き込まれたかどうかわからないネットワークの失敗時を考慮して、INSERTクエリを単に繰り返すことができるためです。データが同一である場合、INSERTが送信されたレプリカは重要ではありません。 `INSERTs`は冪等です。重複排除パラメータは、[merge_tree](/operations/server-configuration-parameters/settings.md/#merge_tree) サーバー設定で制御されます。 - -レプリケーション中、挿入するソースデータのみがネットワークを介して転送されます。それ以降のデータ変換(マージ)は、すべてのレプリカで同じように調整されて実行されます。これによりネットワーク使用量が最小化されるため、レプリケーションは異なるデータセンターにレプリカがある場合でもうまく機能します。(異なるデータセンターへのデータの重複は、レプリケーションの主な目的であることに注意してください。) - -同じデータのレプリカを任意の数持つことができます。私たちの経験に基づいて、比較的信頼性が高く便利な解決策は、生産環境での二重レプリケーションを使用し、各サーバーがRAID-5またはRAID-6(場合によってはRAID-10)を使用することです。 - -システムはレプリカのデータの同期性を監視し、障害後に回復することができます。フェイルオーバーは自動(小さなデータの違いの場合)または半自動(データがあまりにも異なるときで、設定エラーを示す可能性があります)です。 - -## レプリケーションテーブルの作成 {#creating-replicated-tables} - -:::note -ClickHouse Cloud では、レプリケーションが自動的に管理されます。引数を追加せずにテーブルを作成してください。たとえば、以下のテキストでは、 - -```sql -ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver) -``` - -を次のように置き換えます: - -```sql -ENGINE = ReplicatedMergeTree -``` -::: - -テーブルエンジン名に `Replicated` プレフィックスが追加されます。例えば、`ReplicatedMergeTree` 。 - -:::tip -ClickHouse Cloud では `Replicated` の追加はオプションです。すべてのテーブルがレプリケートされます。 -::: - -### Replicated\*MergeTreeパラメータ {#replicatedmergetree-parameters} - -#### zoo_path {#zoo_path} - -`zoo_path` — ClickHouse Keeper内のテーブルへのパス。 - -#### replica_name {#replica_name} - -`replica_name` — ClickHouse Keeperにおけるレプリカ名。 - -#### other_parameters {#other_parameters} - -`other_parameters` — レプリケーションされたバージョンの作成に使用されるエンジンのパラメータ(たとえば、`ReplacingMergeTree`のバージョン)。 - -例: - -```sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32, - ver UInt16 -ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', ver) -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID); -``` - -
- -非推奨の構文の例 - -```sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192); -``` - -
- -例のように、これらのパラメータには、中括弧内の置き換えが含まれる場合があります。置き換えられた値は、設定ファイルの [macros](/operations/server-configuration-parameters/settings.md/#macros) セクションから取得されます。 - -例: - -```xml - - 02 - example05-02-1 - -``` - -ClickHouse Keeper内のテーブルへのパスは、各レプリケーションテーブルに対してユニークである必要があります。異なるシャードのテーブルは異なるパスを持つ必要があります。 -この場合、パスは次の部分で構成されます: - -`/clickhouse/tables/` は共通プレフィックスです。正確にこれを使用することをお勧めします。 - -`{shard}` は、シャード識別子に展開されます。 - -`table_name` は、ClickHouse Keeper内のテーブルのノード名です。テーブル名と同じにするのが良いアイデアです。これは明示的に定義され、テーブル名と異なり、RENAMEクエリの後に変わることはありません。 -*ヒント*: `table_name`の前にデータベース名を追加することもできます。例: `db_name.table_name` - -2つの組み込みの置き換え `{database}` と `{table}` を使用することができます。これらはそれぞれテーブル名とデータベース名に展開されます(これらのマクロが `macros` セクションで定義されていない限り)。したがって、 ZooKeeper のパスは `'/clickhouse/tables/{shard}/{database}/{table}'` として指定できます。 -これらの組み込みの置き換えを使用する際には、テーブルの名前変更に注意してください。ClickHouse Keeper内のパスは変更できず、テーブルの名前が変更されると、マクロは異なるパスに展開され、テーブルはClickHouse Keeperに存在しないパスを参照し、読み取り専用モードに入ります。 - -レプリカ名は、同じテーブルの異なるレプリカを識別します。例のように、サーバー名を使用できます。この名前は、各シャード内でユニークである必要があります。 - -置き換えを使用するのではなく、パラメータを明示的に定義することもできます。これは、テストや小規模クラスタの構成に便利です。ただし、この場合、分散DDLクエリ(`ON CLUSTER`)を使用することはできません。 - -大規模なクラスタで作業する場合、置き換えを使用することをお勧めします。なぜなら、それによりエラーの可能性が低くなるからです。 - -サーバー構成ファイルで `Replicated` テーブルエンジンのデフォルト引数を指定することができます。たとえば: - -```xml -/clickhouse/tables/{shard}/{database}/{table} -{replica} -``` - -この場合、テーブルを作成する際に引数を省略することができます: - -```sql -CREATE TABLE table_name ( - x UInt32 -) ENGINE = ReplicatedMergeTree -ORDER BY x; -``` - -これは次のように等価です: - -```sql -CREATE TABLE table_name ( - x UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}') -ORDER BY x; -``` - -各レプリカで `CREATE TABLE` クエリを実行します。このクエリは新しいレプリケーションテーブルを作成するか、既存のテーブルに新しいレプリカを追加します。 - -もし他のレプリカに既にデータが含まれている場合や新しいレプリカを追加した場合、クエリを実行した後、他のレプリカから新しいレプリカにデータがコピーされます。言い換えれば、新しいレプリカは他のレプリカと同期されます。 - -レプリカを削除するには、`DROP TABLE` を実行します。ただし、削除されるのは1つのレプリカのみで、クエリが実行されたサーバーに存在するレプリカだけです。 - -## 障害後の回復 {#recovery-after-failures} - -サーバーが起動する際にClickHouse Keeperが利用できない場合、レプリケーションテーブルは読み取り専用モードに切り替わります。システムは定期的にClickHouse Keeperへの接続を試みます。 - -`INSERT`中にClickHouse Keeperが利用できない場合、もしくはClickHouse Keeperとのやり取り中にエラーが発生した場合、例外がスローされます。 - -ClickHouse Keeperに接続した後、システムはローカルファイルシステム上のデータセットが期待されるデータセットと一致するかを確認します(ClickHouse Keeperはこの情報を保存します)。小さな不一致がある場合、システムはレプリカとのデータを同期することで解決します。 - -システムが破損したデータパーツ(ファイルのサイズが誤っている)や認識されないパーツ(ファイルシステムに書き込まれたがClickHouse Keeperに記録されていないパーツ)を検出した場合、それらを `detached` サブディレクトリに移動します(削除されません)。不足しているパーツはレプリカからコピーされます。 - -ClickHouseは、自動的に大量のデータを削除するような破壊的操作を行わないことに注意してください。 - -サーバーが起動したとき(またはClickHouse Keeperとの新しいセッションを確立したとき)、システムはすべてのファイルの数量とサイズのみを確認します。もしファイルサイズが一致しているが、どこかの中間でバイトが変更されている場合、これは即座には検出されませんが、`SELECT` クエリのためにデータを読み取ろうとしたときにのみ検出されます。クエリが不一致のチェックサムや圧縮ブロックのサイズに関する例外をスローします。この場合、データパーツは検証キューに追加され、必要に応じてレプリカからコピーされます。 - -ローカルデータセットが期待されるデータセットとあまりにも異なる場合、安全メカニズムが起動します。サーバーはこれをログに記録し、起動を拒否します。この状況が発生する可能性があるのは、設定エラーの可能性を示すためです。これは、あるシャードのレプリカが別のシャードのレプリカとして誤って構成されている場合に発生します。ただし、このメカニズムのしきい値はかなり低く設定されており、この状況は通常の障害回復中に発生する可能性があります。この場合、データは半自動的に復元されます-「ボタンを押す」ことによって。 - -回復を開始するには、ClickHouse Keeperに `/path_to_table/replica_name/flags/force_restore_data` ノードを作成し、任意の内容を含めるか、すべてのレプリケーションテーブルを復元するためのコマンドを実行します: - -```bash -sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data -``` - -その後、サーバーを再起動します。起動時に、サーバーはこれらのフラグを削除し、回復を開始します。 - -## 完全なデータ損失後の回復 {#recovery-after-complete-data-loss} - -サーバーのすべてのデータとメタデータが消失した場合、次の手順に従って回復します: - -1. サーバーにClickHouseをインストールします。サブスティテューションを設定ファイル内で正しく定義します。これにはシャード識別子とレプリカが含まれます。 -2. 手動で複製する必要のある未レプリケートテーブルがあった場合、レプリカからデータをコピーします(`/var/lib/clickhouse/data/db_name/table_name/` ディレクトリで)。 -3. レプリカから `/var/lib/clickhouse/metadata/` にあるテーブルの定義をコピーします。テーブル定義内でシャードまたはレプリカ識別子が明示的に定義されている場合は、それを修正して、対応するレプリカに合うようにします。(代わりにサーバーを起動し、`.sql`ファイルに存在すべきすべての `ATTACH TABLE` クエリを実行します。) -4. 回復を開始するには、ClickHouse Keeperに `/path_to_table/replica_name/flags/force_restore_data` を任意の内容で持つノードを作成するか、すべてのレプリケーションテーブルを復元するためのコマンドを実行します:`sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` - -その後、サーバーを起動します(すでに実行されている場合は再起動を行います)。データはレプリカからダウンロードされます。 - -別の回復オプションとして、失われたレプリカに関する情報をClickHouse Keeperから削除し(`/path_to_table/replica_name`)、その後、"[レプリケーションテーブルの作成](#creating-replicated-tables)"に記載されているように再作成します。 - -回復中はネットワーク帯域幅に制限はありません。複数のレプリカを同時に復元する場合は、これを考慮してください。 - -## MergeTreeからReplicatedMergeTreeへの変換 {#converting-from-mergetree-to-replicatedmergetree} - -私たちは、`MergeTree`という用語を、`ReplicatedMergeTree` と同様に、`MergeTreeファミリー内のすべてのテーブルエンジンを指すために使用します。 - -もし手動でレプリケートされた `MergeTree` テーブルがあった場合、それをレプリケーション可能なテーブルに変換できます。この操作が必要になるのは、`MergeTree` テーブル内にすでに大量のデータを集めており、今やレプリケーションを有効にしたい場合です。 - -[ATTACH TABLE ... AS REPLICATED](/sql-reference/statements/attach.md#attach-mergetree-table-as-replicatedmergetree) ステートメントを使用して、分離された `MergeTree` テーブルを `ReplicatedMergeTree` としてアタッチできます。 - -`MergeTree`テーブルは、テーブルのデータディレクトリに `convert_to_replicated` フラグが設定されている場合、サーバーの再起動時に自動的に変換されます(`/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/` で `Atomic`データベース用)。 -空の `convert_to_replicated` ファイルを作成すると、次回のサーバー再起動時にテーブルがレプリケーション可能として読み込まれます。 - -このクエリを使用してテーブルのデータパスを取得できます。テーブルが複数のデータパスを持っている場合は、最初のものを使用する必要があります。 - -```sql -SELECT data_paths FROM system.tables WHERE table = 'table_name' AND database = 'database_name'; -``` - -`ReplicatedMergeTree` テーブルは、`default_replica_path` と `default_replica_name` 設定の値を使用して作成されます。 -他のレプリカに変換されたテーブルを作成するには、`ReplicatedMergeTree`エンジンの最初の引数でそのパスを明示的に指定する必要があります。次のクエリを使用してそのパスを取得できます。 - -```sql -SELECT zookeeper_path FROM system.replicas WHERE table = 'table_name'; -``` - -これには手動の方法もあります。 - -さまざまなレプリカでデータが異なる場合、まずそれを同期させるか、1つを除くすべてのレプリカでこのデータを削除します。 - -既存のMergeTreeテーブルの名前を変更し、古い名前でReplicatedMergeTreeテーブルを作成します。 -古いテーブルから新しいテーブルデータのディレクトリ内にある `detached` サブディレクトリにデータを移動します(`/var/lib/clickhouse/data/db_name/table_name/`)。 -次に、データパーツを作業セットに追加するために、レプリカの1つで `ALTER TABLE ATTACH PARTITION` を実行します。 - -## ReplicatedMergeTreeからMergeTreeへの変換 {#converting-from-replicatedmergetree-to-mergetree} - -[ATTACH TABLE ... AS NOT REPLICATED](/sql-reference/statements/attach.md#attach-mergetree-table-as-replicatedmergetree) ステートメントを使用して、分離された `ReplicatedMergeTree` テーブルを単一のサーバー上で `MergeTree` としてアタッチできます。 - -これを行うための別の方法は、サーバーを再起動することです。異なる名前のMergeTreeテーブルを作成します。`ReplicatedMergeTree` テーブルデータのディレクトリから新しいテーブルのデータディレクトリにすべてのデータを移動します。それから、`ReplicatedMergeTree` テーブルを削除してサーバーを再起動します。 - -サーバーを起動せずに `ReplicatedMergeTree` テーブルを削除したい場合: - -- メタデータディレクトリ (`/var/lib/clickhouse/metadata/`) から対応する .sql ファイルを削除します。 -- ClickHouse Keeperから対応するパスを削除します(`/path_to_table/replica_name`)。 - -この後、サーバーを起動し、`MergeTree` テーブルを作成し、データをそのディレクトリに移動し、その後サーバーを再起動します。 - -## ClickHouse Keeperクラスタ内のメタデータが失われたまたは損傷した場合の回復 {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} - -ClickHouse Keeperのデータが失われたまたは損傷した場合、上記のように未レプリケートテーブルにデータを移動することによってデータを保存できます。 - -**参照** - -- [background_schedule_pool_size](/operations/server-configuration-parameters/settings.md/#background_schedule_pool_size) -- [background_fetches_pool_size](/operations/server-configuration-parameters/settings.md/#background_fetches_pool_size) -- [execute_merges_on_single_replica_time_threshold](/operations/settings/merge-tree-settings#execute_merges_on_single_replica_time_threshold) -- [max_replicated_fetches_network_bandwidth](/operations/settings/merge-tree-settings.md/#max_replicated_fetches_network_bandwidth) -- [max_replicated_sends_network_bandwidth](/operations/settings/merge-tree-settings.md/#max_replicated_sends_network_bandwidth) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md.hash deleted file mode 100644 index 7084fbb16c8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/replication.md.hash +++ /dev/null @@ -1 +0,0 @@ -9cf4fb14e2039070 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md deleted file mode 100644 index a7fa1bcf1e1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -description: 'SummingMergeTree inherits from the MergeTree engine. Its key feature - is the ability to automatically sum numeric data during part merges.' -sidebar_label: 'SummingMergeTree' -sidebar_position: 50 -slug: '/engines/table-engines/mergetree-family/summingmergetree' -title: 'SummingMergeTree' ---- - - - - -# SummingMergeTree - -このエンジンは [MergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree) から継承されています。違いは、`SummingMergeTree` テーブルのデータパーツをマージする際に、ClickHouse が同じ主キーのすべての行(より正確には、同じ [ソートキー](../../../engines/table-engines/mergetree-family/mergetree.md))を持つ行を、数値データ型のカラムの合計値を持つ1行に置き換える点です。ソートキーが単一のキー値に大きな数の行が対応するように構成されている場合、これによりストレージボリュームが大幅に削減され、データ選択が迅速になります。 - -エンジンは `MergeTree` と共に使用することをお勧めします。すべてのデータを `MergeTree` テーブルに保存し、集計データを保存するために `SummingMergeTree` を使用します。たとえば、レポートを作成するときです。このアプローチにより、誤って構成された主キーによる貴重なデータ損失を防ぐことができます。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = SummingMergeTree([columns]) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -リクエストパラメータの説明については、[リクエストの説明](../../../sql-reference/statements/create/table.md)を参照してください。 - -### SummingMergeTree のパラメータ {#parameters-of-summingmergetree} - -#### columns {#columns} - -`columns` - 値が合計されるカラムの名前のタプル。オプションのパラメータです。 -カラムは数値型でなければならず、パーティションまたはソートキーに含まれてはいけません。 - -`columns` が指定されない場合、ClickHouse はソートキーに含まれないすべての数値データ型のカラムの値を合計します。 - -### クエリ句 {#query-clauses} - -`SummingMergeTree` テーブルを作成する際には、`MergeTree` テーブルを作成する時と同じ [句](../../../engines/table-engines/mergetree-family/mergetree.md) が必要です。 - -
- -テーブルを作成するための非推奨メソッド - -:::note -このメソッドは新しいプロジェクトでは使用しないでください。可能であれば、古いプロジェクトを上記に記載した方法に切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) -``` - -すべてのパラメータは `MergeTree` での意味と同じです。 - -- `columns` - 合計されるカラムの名前のタプル。オプションのパラメータです。詳細は上記のテキストを参照してください。 - -
- -## 使用例 {#usage-example} - -次のテーブルを考えてみましょう。 - -```sql -CREATE TABLE summtt -( - key UInt32, - value UInt32 -) -ENGINE = SummingMergeTree() -ORDER BY key -``` - -データを挿入します。 - -```sql -INSERT INTO summtt Values(1,1),(1,2),(2,1) -``` - -ClickHouse はすべての行を完全に合計しない場合があります([下記参照](#data-processing))。そのため、クエリ内で集計関数 `sum` と `GROUP BY` 句を使用します。 - -```sql -SELECT key, sum(value) FROM summtt GROUP BY key -``` - -```text -┌─key─┬─sum(value)─┐ -│ 2 │ 1 │ -│ 1 │ 3 │ -└─────┴────────────┘ -``` - -## データ処理 {#data-processing} - -データがテーブルに挿入されると、それらはそのまま保存されます。ClickHouse は挿入されたデータパーツを定期的にマージし、この際に同じ主キーを持つ行が合計され、それぞれのデータパーツごとに1つに置き換えられます。 - -ClickHouse はデータパーツをマージできるため、異なる結果のデータパーツが同じ主キーを持つ行で構成されることがあります。すなわち、合計が不完全になる可能性があります。そのため、クエリ内で集計関数 [sum()](/sql-reference/aggregate-functions/reference/sum) と `GROUP BY` 句を使用する必要があります。上記の例のように。 - -### 合計に関する一般規則 {#common-rules-for-summation} - -数値データ型のカラムの値が合計されます。カラムのセットは `columns` パラメータによって定義されます。 - -合計のためのカラムのすべての値が 0 である場合、その行は削除されます。 - -カラムが主キーに含まれておらず、合計されない場合、既存の値から任意の値が選択されます。 - -主キーのカラムについては、値は合計されません。 - -### 集約関数カラムでの合計 {#the-summation-in-the-aggregatefunction-columns} - -[AggregateFunction 型](../../../sql-reference/data-types/aggregatefunction.md) のカラムについて、ClickHouse はその関数に従って集約する [AggregatingMergeTree](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md) エンジンのように動作します。 - -### ネストされた構造 {#nested-structures} - -テーブルは特別な方法で処理されるネストされたデータ構造を持つことができます。 - -ネストされたテーブルの名前が `Map` で終わり、少なくとも次の条件を満たす2カラム以上を含む場合: - -- 最初のカラムは数値型 `(*Int*, Date, DateTime)` または文字列 `(String, FixedString)` で、これを `key` と呼びます。 -- 他のカラムは算術型 `(*Int*, Float32/64)` で、これを `(values...)` と呼びます。 - -このネストされたテーブルは `key => (values...)` のマッピングとして解釈され、行をマージするときに、2つのデータセットの要素が `key` によってマージされ、対応する `(values...)` の合計が計算されます。 - -例: - -```text -DROP TABLE IF EXISTS nested_sum; -CREATE TABLE nested_sum -( - date Date, - site UInt32, - hitsMap Nested( - browser String, - imps UInt32, - clicks UInt32 - ) -) ENGINE = SummingMergeTree -PRIMARY KEY (date, site); - -INSERT INTO nested_sum VALUES ('2020-01-01', 12, ['Firefox', 'Opera'], [10, 5], [2, 1]); -INSERT INTO nested_sum VALUES ('2020-01-01', 12, ['Chrome', 'Firefox'], [20, 1], [1, 1]); -INSERT INTO nested_sum VALUES ('2020-01-01', 12, ['IE'], [22], [0]); -INSERT INTO nested_sum VALUES ('2020-01-01', 10, ['Chrome'], [4], [3]); - -OPTIMIZE TABLE nested_sum FINAL; -- マージをエミュレート - -SELECT * FROM nested_sum; -┌───────date─┬─site─┬─hitsMap.browser───────────────────┬─hitsMap.imps─┬─hitsMap.clicks─┐ -│ 2020-01-01 │ 10 │ ['Chrome'] │ [4] │ [3] │ -│ 2020-01-01 │ 12 │ ['Chrome','Firefox','IE','Opera'] │ [20,11,22,5] │ [1,3,0,1] │ -└────────────┴──────┴───────────────────────────────────┴──────────────┴────────────────┘ - -SELECT - site, - browser, - impressions, - clicks -FROM -( - SELECT - site, - sumMap(hitsMap.browser, hitsMap.imps, hitsMap.clicks) AS imps_map - FROM nested_sum - GROUP BY site -) -ARRAY JOIN - imps_map.1 AS browser, - imps_map.2 AS impressions, - imps_map.3 AS clicks; - -┌─site─┬─browser─┬─impressions─┬─clicks─┐ -│ 12 │ Chrome │ 20 │ 1 │ -│ 12 │ Firefox │ 11 │ 3 │ -│ 12 │ IE │ 22 │ 0 │ -│ 12 │ Opera │ 5 │ 1 │ -│ 10 │ Chrome │ 4 │ 3 │ -└──────┴─────────┴─────────────┴────────┘ -``` - -データを要求する際には、`Map` の集計には [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md) 関数を使用します。 - -ネストされたデータ構造では、合計のためのカラムのタプルにそのカラムを指定する必要はありません。 - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouse における集約関数コンビネータの使用](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md.hash deleted file mode 100644 index 9ba120e0520..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/summingmergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -2904a5302d63a1c8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md deleted file mode 100644 index 1bddcab5bff..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -description: 'Allows for quick writing of object states that are continually changing, - and deleting old object states in the background.' -sidebar_label: 'VersionedCollapsingMergeTree' -sidebar_position: 80 -slug: '/engines/table-engines/mergetree-family/versionedcollapsingmergetree' -title: 'VersionedCollapsingMergeTree' ---- - - - - -# VersionedCollapsingMergeTree - -このエンジンは: - -- 継続的に変化するオブジェクトの状態を迅速に記録できるようにします。 -- 古いオブジェクトの状態をバックグラウンドで削除します。これにより、ストレージの使用量が大幅に削減されます。 - -詳細はセクション [Collapsing](#table_engines_versionedcollapsingmergetree) を参照してください。 - -このエンジンは [MergeTree](/engines/table-engines/mergetree-family/versionedcollapsingmergetree) を継承し、データパーツのマージアルゴリズムに行を崩すためのロジックを追加します。`VersionedCollapsingMergeTree` は [CollapsingMergeTree](../../../engines/table-engines/mergetree-family/collapsingmergetree.md) と同じ目的を果たしますが、データを複数スレッドで任意の順序で挿入することを可能にする異なる崩壊アルゴリズムを使用します。特に、`Version` カラムは、行が間違った順序で挿入されても適切に行を崩すのに役立ちます。それに対して、`CollapsingMergeTree` は厳密に連続した挿入しか許可しません。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = VersionedCollapsingMergeTree(sign, version) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -クエリパラメータの説明については、[クエリの説明](../../../sql-reference/statements/create/table.md) を参照してください。 - -### エンジンパラメータ {#engine-parameters} - -```sql -VersionedCollapsingMergeTree(sign, version) -``` - -| パラメータ | 説明 | 型 | -|-------------|---------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `sign` | 行のタイプを持つカラムの名前: `1` は「状態」行、 `-1` は「キャンセル」行です。 | [`Int8`](/sql-reference/data-types/int-uint) | -| `version` | オブジェクト状態のバージョンを持つカラムの名前。 | [`Int*`](/sql-reference/data-types/int-uint), [`UInt*`](/sql-reference/data-types/int-uint), [`Date`](/sql-reference/data-types/date), [`Date32`](/sql-reference/data-types/date32), [`DateTime`](/sql-reference/data-types/datetime) または [`DateTime64`](/sql-reference/data-types/datetime64) | - -### クエリ句 {#query-clauses} - -`VersionedCollapsingMergeTree` テーブルを作成する際には、`MergeTree` テーブルを作成する際と同じ [句](../../../engines/table-engines/mergetree-family/mergetree.md) が必要です。 - -
- -テーブルを作成するための非推奨メソッド - -:::note -新しいプロジェクトではこの方法を使用しないでください。可能であれば、古いプロジェクトを上記の方法に切り替えてください。 -::: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] VersionedCollapsingMergeTree(date-column [, samp#table_engines_versionedcollapsingmergetreeling_expression], (primary, key), index_granularity, sign, version) -``` - -`sign` と `version` 以外のすべてのパラメータは、`MergeTree` と同じ意味を持ちます。 - -- `sign` — 行のタイプを持つカラムの名前: `1` は「状態」行、 `-1` は「キャンセル」行です。 - - カラムデータ型 - `Int8`。 - -- `version` — オブジェクト状態のバージョンを持つカラムの名前。 - - カラムデータ型は `UInt*` である必要があります。 - -
- -## 崩壊 {#table_engines_versionedcollapsingmergetree} - -### データ {#data} - -あるオブジェクトの継続的に変化するデータを保存する必要がある状況を考えてみましょう。オブジェクトに対して一行を持ち、変更があるたびにその行を更新するのは合理的です。ただし、更新操作はデータストレージの書き換えが必要なため、DBMS には高コストで遅いです。データを迅速に書き込む必要がある場合、更新は受け入れられませんが、変更を次のようにオブジェクトに順次書き込むことができます。 - -行を書き込むときに `Sign` カラムを使用します。`Sign = 1` は行がオブジェクトの状態を表すことを意味します(これを「状態」行と呼びます)。`Sign = -1` は同じ属性を持つオブジェクトの状態のキャンセルを示します(これを「キャンセル」行と呼びます)。また、`Version` カラムを使用します。これはオブジェクトの各状態を別の番号で識別する必要があります。 - -例えば、我々はユーザーがあるサイトで訪れたページ数とその滞在時間を計算したいと思っています。ある時点で、ユーザーアクティビティの状態を示す次の行を書くことができます。 - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -その後、ユーザーアクティビティの変更を登録し、次の2行で書き込みます。 - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -最初の行はオブジェクト(ユーザー)の前の状態をキャンセルします。これはキャンセルされた状態のすべてのフィールドを `Sign` を除きコピーする必要があります。 - -2行目は現在の状態を含みます。 - -ユーザーアクティビティの最後の状態だけが必要なため、以下の行は - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -削除でき、オブジェクトの無効(古い)状態が崩壊します。`VersionedCollapsingMergeTree` は、データパーツをマージする際にこれを行います。 - -なぜ変更ごとに2行が必要なのかを理解するには、[アルゴリズム](#table_engines-versionedcollapsingmergetree-algorithm) を見てください。 - -**使用に関する注意事項** - -1. データを記録するプログラムは、オブジェクトの状態をキャンセルできるように、その状態を記憶している必要があります。「キャンセル」文字列は、プライマリキーのフィールドと「状態」文字列のバージョンおよび逆の `Sign` を含むコピーを持つ必要があります。これにより初期ストレージサイズが増加しますが、データを書き込むのが迅速になります。 -2. カラムに長大な配列が存在すると書き込み負荷によってエンジンの効率が低下します。データが単純であればあるほど、効率が向上します。 -3. `SELECT` 結果はオブジェクトの変更履歴の一貫性に大きく依存します。挿入するデータを準備する際は正確に行ってください。不整合なデータによって得られる結果は予測不能であり、セッション深度などの非負メトリクスに対して負の値を得ることがあります。 - -### アルゴリズム {#table_engines-versionedcollapsingmergetree-algorithm} - -ClickHouse がデータパーツをマージする場合、同じプライマリキーとバージョンを持ち、異なる `Sign` を持つ各ペアの行を削除します。行の順序は重要ではありません。 - -ClickHouse がデータを挿入する際、行はプライマリキーで並べ替えられます。`Version` カラムがプライマリキーに含まれていない場合、ClickHouse はそれを暗黙的に最後のフィールドとしてプライマリキーに追加し、それを使用して並べ替えます。 - -## データの選択 {#selecting-data} - -ClickHouse は、同じプライマリキーを持つすべての行が同じ結果データパーツまたは同じ物理サーバーに存在することを保証しません。これはデータの書き込みおよびその後のデータパーツのマージの双方に当てはまります。さらに、ClickHouse は複数のスレッドで `SELECT` クエリを処理し、結果の行の順序を予測することはできません。したがって、`VersionedCollapsingMergeTree` テーブルから完全に「崩壊」したデータを得る必要がある場合には集計が必要です。 - -崩壊を最終化するには、`GROUP BY` 句と Sign を考慮する集計関数を持つクエリを書きます。例えば、数量を計算するには `count()` の代わりに `sum(Sign)` を使用します。何かの合計を計算するには、`sum(Sign * x)` を使用し、`HAVING sum(Sign) > 0` を追加します。 - -これにより、集計 `count`、`sum`、および `avg` をこの方法で計算できます。オブジェクトに少なくとも一つの未崩壊の状態がある場合、集計 `uniq` を計算できます。集計 `min` および `max` は計算できません。なぜなら、`VersionedCollapsingMergeTree` は崩壊した状態の値の履歴を保存しないからです。 - -集計なしで「崩壊」したデータを抽出したい場合(例えば、最新の値が特定の条件に一致する行が存在するかを確認するため)、`FROM` 句に `FINAL` 修飾子を使用できます。このアプローチは効率が悪く、大規模なテーブルでは使用すべきではありません。 - -## 使用例 {#example-of-use} - -例のデータ: - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -テーブルの作成: - -```sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8, - Version UInt8 -) -ENGINE = VersionedCollapsingMergeTree(Sign, Version) -ORDER BY UserID -``` - -データを挿入する: - -```sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) -``` - -```sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) -``` - -二つの異なるデータパーツを作成するために二つの `INSERT` クエリを使用します。データを単一のクエリで挿入すると、ClickHouse は一つのデータパーツを作成し、決してマージを実行しません。 - -データを取得する: - -```sql -SELECT * FROM UAct -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -ここで何が見え、崩壊された部分はどこですか? -我々は二つの `INSERT` クエリを使用して二つのデータパーツを作成しました。`SELECT` クエリは二つのスレッドで実行され、結果は行のランダムな順序です。 -崩壊はまだ行われていないため、データパーツはまだマージされていません。ClickHouse はデータパーツを未知のタイミングでマージしますが、それを予測することはできません。 - -これが集計が必要な理由です: - -```sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration, - Version -FROM UAct -GROUP BY UserID, Version -HAVING sum(Sign) > 0 -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 2 │ -└─────────────────────┴───────────┴──────────┴─────────┘ -``` - -集計が不要で崩壊を強制したい場合、`FROM` 句に `FINAL` 修飾子を使用できます。 - -```sql -SELECT * FROM UAct FINAL -``` - -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -これは非常に非効率的なデータ選択方法です。大きなテーブルに対しては使用しないでください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md.hash deleted file mode 100644 index 581481e1989..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md.hash +++ /dev/null @@ -1 +0,0 @@ -5295374ac77f9b8a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md deleted file mode 100644 index 19b730351fe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: 'Buffers the data to write in RAM, periodically flushing it to another - table. During the read operation, data is read from the buffer and the other table - simultaneously.' -sidebar_label: 'Buffer' -sidebar_position: 120 -slug: '/engines/table-engines/special/buffer' -title: 'Buffer Table Engine' ---- - - - - -# バッファーテーブルエンジン - -データを書き込むためにRAMにバッファリングし、定期的に別のテーブルにフラッシュします。読み取り操作中、データはバッファからと他のテーブルから同時に読み取られます。 - -:::note -バッファーテーブルエンジンの推奨代替手段は、[非同期挿入](/guides/best-practices/asyncinserts.md)を有効にすることです。 -::: - -```sql -Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes [,flush_time [,flush_rows [,flush_bytes]]]) -``` - -### エンジンパラメーター: {#engine-parameters} - -#### database {#database} - -`database` – データベース名。`currentDatabase()`や文字列を返す他の定数式を使用できます。 - -#### table {#table} - -`table` – データをフラッシュするテーブル。 - -#### num_layers {#num_layers} - -`num_layers` – 並列性の層。物理的には、テーブルは`num_layers`の独立したバッファとして表されます。 - -#### min_time, max_time, min_rows, max_rows, min_bytes, and max_bytes {#min_time-max_time-min_rows-max_rows-min_bytes-and-max_bytes} - -バッファからデータをフラッシュする条件。 - -### オプションのエンジンパラメーター: {#optional-engine-parameters} - -#### flush_time, flush_rows, and flush_bytes {#flush_time-flush_rows-and-flush_bytes} - -バッファからデータをバックグラウンドでフラッシュする条件(省略またはゼロは`flush*`パラメーターなしを意味します)。 - -すべての`min*`条件が満たされるか、少なくとも1つの`max*`条件が満たされると、データはバッファからフラッシュされ、宛先テーブルに書き込まれます。 - -また、少なくとも1つの`flush*`条件が満たされると、バックグラウンドでフラッシュが開始されます。これは`max*`とは異なり、`flush*`を使用することで、バッファテーブルへの`INSERT`クエリの遅延を避けるためにバックグラウンドフラッシュを個別に設定できます。 - -#### min_time, max_time, and flush_time {#min_time-max_time-and-flush_time} - -バッファへの最初の書き込みからの秒数の条件。 - -#### min_rows, max_rows, and flush_rows {#min_rows-max_rows-and-flush_rows} - -バッファ内の行数の条件。 - -#### min_bytes, max_bytes, and flush_bytes {#min_bytes-max_bytes-and-flush_bytes} - -バッファ内のバイト数の条件。 - -書き込み操作中、データは1つまたは複数のランダムなバッファに挿入されます(`num_layers`で構成)。あるいは、挿入するデータ部分が十分大きい(`max_rows`または`max_bytes`を超える)場合、バッファを省略して宛先テーブルに直接書き込まれます。 - -データをフラッシュする条件は、各`num_layers`バッファごとに別々に計算されます。例えば、`num_layers = 16`で`max_bytes = 100000000`の場合、最大RAM消費量は1.6 GBです。 - -例: - -```sql -CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 1, 10, 100, 10000, 1000000, 10000000, 100000000) -``` - -`merge.hits`と同じ構造の`merge.hits_buffer`テーブルを作成し、バッファエンジンを使用します。このテーブルに書き込むと、データはRAMにバッファリングされ、その後'merge.hits'テーブルに書き込まれます。単一のバッファが作成され、次のいずれかの場合にデータがフラッシュされます。 -- 最後のフラッシュから100秒が経過した場合(`max_time`)または -- 100万行が書き込まれた場合(`max_rows`)または -- 100MBのデータが書き込まれた場合(`max_bytes`)または -- 10秒が経過し(`min_time`)、10,000行(`min_rows`)および10MB(`min_bytes`)のデータが書き込まれた場合 - -例えば、たった1行が書き込まれた場合、100秒後には必ずフラッシュされます。しかし、多くの行が書き込まれた場合、データは早めにフラッシュされます。 - -サーバーが停止した場合、`DROP TABLE`または`DETACH TABLE`を使用すると、バッファされたデータも宛先テーブルにフラッシュされます。 - -データベースやテーブル名に空の文字列をシングルクォートで指定することもできます。これは宛先テーブルが存在しないことを示します。この場合、データフラッシュ条件が達成されると、バッファは単にクリアされます。これは、メモリ内のデータウィンドウを保持するために役立つかもしれません。 - -バッファテーブルから読み取るときは、データはバッファと宛先テーブル(もし存在する場合)から処理されます。 -バッファテーブルはインデックスをサポートしないことに注意してください。言い換えれば、バッファ内のデータは完全にスキャンされるため、大きなバッファでは遅くなることがあります。(従属テーブルのデータについては、対応するインデックスが使用されます。) - -バッファテーブルのカラムのセットが従属テーブルのカラムのセットと一致しない場合、両方のテーブルに存在するカラムのサブセットが挿入されます。 - -バッファテーブルのカラムと従属テーブルのカラムの型が一致しない場合、サーバーログにエラーメッセージが記録され、バッファがクリアされます。 -従属テーブルが存在しない場合も同様に、バッファがフラッシュされるとエラーが発生します。 - -:::note -2021年10月26日以前のリリースでバッファテーブルに対してALTERを実行すると、`Block structure mismatch`エラーが発生します(詳細は[#15117](https://github.com/ClickHouse/ClickHouse/issues/15117)および[#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)を参照)。したがって、バッファテーブルを削除してから再作成するのが唯一の選択肢です。このエラーがリリースで修正されたかどうかを確認してから、バッファテーブルに対してALTERを実行してください。 -::: - -サーバーが異常に再起動された場合、バッファ内のデータは失われます。 - -`FINAL`と`SAMPLE`はバッファテーブルに対して正しく機能しません。これらの条件は宛先テーブルに渡されますが、バッファ内のデータ処理には使用されません。これらの機能が必要な場合は、バッファテーブルでは書き込みを行うだけで、宛先テーブルから読み取ることをお勧めします。 - -バッファテーブルにデータを追加する際、1つのバッファがロックされます。これにより、テーブルからの読み取り操作が同時に行われている場合に遅延が発生します。 - -バッファテーブルに挿入されたデータは、従属テーブルに異なる順序や異なるブロックで保存される可能性があります。このため、バッファテーブルを使用してCollapsingMergeTreeに書き込むのは難しいです。問題を避けるために、`num_layers`を1に設定することができます。 - -宛先テーブルがレプリケートされている場合、バッファテーブルへの書き込み時に、レプリケートテーブルの期待される特性の一部が失われます。行の順序やデータ部分のサイズのランダムな変更により、データの重複排除が機能しなくなり、レプリケートテーブルに対して信頼できる「正確に一度」書き込みを行うことができなくなります。 - -これらの欠点により、バッファテーブルの使用は稀なケースに限って推奨されます。 - -バッファテーブルは、単位時間内に大量のサーバーから多くのINSERTを受け取った場合に使用され、データを挿入前にバッファリングできず、INSERTが十分に速く実行できない場合に利用されます。 - -バッファテーブルでも、1行ずつデータを挿入することは意味がないことに注意してください。これでは、1秒あたり数千行の速度しか得られず、より大きなデータブロックを挿入すると1秒間に100万行以上の速度を出すことができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md.hash deleted file mode 100644 index 7e31ab189e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/buffer.md.hash +++ /dev/null @@ -1 +0,0 @@ -658fe78501648c5b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md deleted file mode 100644 index 6c4ccf3d29c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -description: 'The `Dictionary` engine displays the dictionary data as a ClickHouse - table.' -sidebar_label: 'Dictionary' -sidebar_position: 20 -slug: '/engines/table-engines/special/dictionary' -title: 'Dictionary Table Engine' ---- - - - - -# Dictionary Table Engine - -`Dictionary` エンジンは、[dictionary](../../../sql-reference/dictionaries/index.md) データを ClickHouse テーブルとして表示します。 - -## 例 {#example} - -例として、次の構成を持つ `products` の辞書を考えてみます。 - -```xml - - - products - - -
products
- DSN=some-db-server - - - - 300 - 360 - - - - - - - product_id - - - title - String - - - - - -``` - -辞書データをクエリします: - -```sql -SELECT - name, - type, - key, - attribute.names, - attribute.types, - bytes_allocated, - element_count, - source -FROM system.dictionaries -WHERE name = 'products' -``` - -```text -┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ -│ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ -└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ -``` - -[dictGet\*](/sql-reference/functions/ext-dict-functions#dictget-dictgetordefault-dictgetornull) 関数を使用して、この形式で辞書データを取得できます。 - -このビューは、生データを取得したり、`JOIN` 操作を行ったりする必要があるときには役立ちません。そのような場合には、辞書データをテーブル形式で表示する `Dictionary` エンジンを使用できます。 - -構文: - -```sql -CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` -``` - -使用例: - -```sql -create table products (product_id UInt64, title String) Engine = Dictionary(products); -``` - - Ok - -テーブルの内容を確認します。 - -```sql -select * from products limit 1; -``` - -```text -┌────product_id─┬─title───────────┐ -│ 152689 │ Some item │ -└───────────────┴─────────────────┘ -``` - -**関連情報** - -- [Dictionary function](/sql-reference/table-functions/dictionary) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md.hash deleted file mode 100644 index 4d5fe2d1173..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/dictionary.md.hash +++ /dev/null @@ -1 +0,0 @@ -5357cae661d2d9e3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md deleted file mode 100644 index 2c8b2c11452..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -description: '分散エンジンを使用したテーブルは独自のデータを保存しませんが、複数のサーバー上での分散クエリ処理を可能にします。 読み取りは自動的に並列化されます。 - 読み取り中、リモートサーバー上のテーブルインデックスがある場合は使用されます。' -sidebar_label: '分散' -sidebar_position: 10 -slug: '/engines/table-engines/special/distributed' -title: '分散テーブルエンジン' ---- - - - - -# 分散テーブルエンジン - -:::warning -クラウドで分散テーブルエンジンを作成するには、[remote and remoteSecure](../../../sql-reference/table-functions/remote) テーブル関数を使用できます。`Distributed(...)` 構文は ClickHouse Cloud では使用できません。 -::: - -分散エンジンを持つテーブルは独自のデータを保存せず、複数のサーバーでの分散クエリ処理を可能にします。読み取りは自動的に並列化されます。読み取り中、リモートサーバー上のテーブルインデックスが使用されます。 - -## テーブルの作成 {#distributed-creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]]) -[SETTINGS name=value, ...] -``` - -### テーブルから {#distributed-from-a-table} - -`Distributed` テーブルが現在のサーバー上のテーブルを指している場合、そのテーブルのスキーマを採用できます: - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2 ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]]) [SETTINGS name=value, ...] -``` - -### 分散パラメータ {#distributed-parameters} - -#### cluster {#cluster} - -`cluster` - サーバーの設定ファイル内のクラスター名 - -#### database {#database} - -`database` - リモートデータベースの名前 - -#### table {#table} - -`table` - リモートテーブルの名前 - -#### sharding_key {#sharding_key} - -`sharding_key` - (オプション)シャーディングキー - -`sharding_key` を指定する必要があるのは以下の場合です: - -- 分散テーブルへの `INSERT` の場合(テーブルエンジンはデータをどのように分割するかを判断するために `sharding_key` が必要です)。ただし、`insert_distributed_one_random_shard` 設定が有効な場合は、`INSERT` にシャーディングキーは必要ありません。 -- `optimize_skip_unused_shards` を使用する場合、`sharding_key` はどのシャードを照会すべきかを判断するために必要です。 - -#### policy_name {#policy_name} - -`policy_name` - (オプション)ポリシー名。バックグラウンド送信用の一時ファイルを保存するために使用されます。 - -**参照** - -- [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) 設定 -- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) の例 - -### 分散設定 {#distributed-settings} - -#### fsync_after_insert {#fsync_after_insert} - -`fsync_after_insert` - 分散へのバックグラウンド挿入後にファイルデータの `fsync` を行います。OSが全挿入データを**イニシエータノード**のディスクにフラッシュしたことを保証します。 - -#### fsync_directories {#fsync_directories} - -`fsync_directories` - ディレクトリの `fsync` を行います。分散テーブルへのバックグラウンド挿入に関連する操作の後(挿入後、データをシャードに送信した後など)に、OSがディレクトリメタデータを更新したことを保証します。 - -#### skip_unavailable_shards {#skip_unavailable_shards} - -`skip_unavailable_shards` - true の場合、ClickHouse は利用できないシャードを静かにスキップします。シャードは以下の理由で利用できないとマークされます: 1) 接続失敗によりシャードに到達できない。2) DNSを通じてシャードを解決できない。3) シャードにテーブルが存在しない。デフォルトは false。 - -#### bytes_to_throw_insert {#bytes_to_throw_insert} - -`bytes_to_throw_insert` - この数以上の圧縮バイトがバックグラウンドINSERTのために保留されている場合、例外がスローされます。0 - スローしない。デフォルトは 0。 - -#### bytes_to_delay_insert {#bytes_to_delay_insert} - -`bytes_to_delay_insert` - この数以上の圧縮バイトがバックグラウンドINSERTのために保留されている場合、クエリが遅延されます。0 - 遅延しない。デフォルトは 0。 - -#### max_delay_to_insert {#max_delay_to_insert} - -`max_delay_to_insert` - バックグラウンド送信のために保留されているバイトが多い場合、分散テーブルへのデータ挿入の最大遅延(秒)です。デフォルトは 60。 - -#### background_insert_batch {#background_insert_batch} - -`background_insert_batch` - [distributed_background_insert_batch](../../../operations/settings/settings.md#distributed_background_insert_batch) と同じです。 - -#### background_insert_split_batch_on_failure {#background_insert_split_batch_on_failure} - -`background_insert_split_batch_on_failure` - [distributed_background_insert_split_batch_on_failure](../../../operations/settings/settings.md#distributed_background_insert_split_batch_on_failure) と同じです。 - -#### background_insert_sleep_time_ms {#background_insert_sleep_time_ms} - -`background_insert_sleep_time_ms` - [distributed_background_insert_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_sleep_time_ms) と同じです。 - -#### background_insert_max_sleep_time_ms {#background_insert_max_sleep_time_ms} - -`background_insert_max_sleep_time_ms` - [distributed_background_insert_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_max_sleep_time_ms) と同じです。 - -#### flush_on_detach {#flush_on_detach} - -`flush_on_detach` - DETACH/DROP/サーバーシャットダウン時にリモートノードにデータをフラッシュします。デフォルトは true。 - -:::note -**耐久性設定**(`fsync_...`): - -- データが最初にイニシエータノードのディスクに保存され、その後バックグラウンドでシャードに送信されるバックグラウンドINSERTのみに影響します(`distributed_foreground_insert=false`)。 -- 挿入のパフォーマンスが大幅に低下する可能性があります。 -- 分散テーブルフォルダー内のデータを書き込む際に、**挿入を受け付けたノードに**影響します。基盤となるMergeTreeテーブルへのデータ書き込みの保証が必要な場合は、`system.merge_tree_settings` 内の耐久性設定(`...fsync...`)を参照してください。 - -**挿入制限設定**(`..._insert`)についても参照してください: - -- [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) 設定 -- [prefer_localhost_replica](/operations/settings/settings#prefer_localhost_replica) 設定 -- `bytes_to_throw_insert` は `bytes_to_delay_insert` よりも先に処理されるため、`bytes_to_delay_insert` よりも小さい値に設定するべきではありません。 -::: - -**例** - -```sql -CREATE TABLE hits_all AS hits -ENGINE = Distributed(logs, default, hits[, sharding_key[, policy_name]]) -SETTINGS - fsync_after_insert=0, - fsync_directories=0; -``` - -データは `logs` クラスター内のすべてのサーバーから、クラスター内の各サーバーにある `default.hits` テーブルから読み取られます。データは単に読み取られるだけでなく、リモートサーバーで部分的に処理されます(可能な限りの範囲で)。たとえば、`GROUP BY` のクエリの場合、データはリモートサーバーで集約され、中間状態の集約関数がリクエスタのサーバーに送信されます。次に、データはさらに集約されます。 - -データベース名の代わりに、文字列を返す定数式を使用することもできます。たとえば: `currentDatabase()`。 - -## クラスター {#distributed-clusters} - -クラスターは [サーバー設定ファイル](../../../operations/configuration-files.md) で構成されています: - -```xml - - - - - - - - - - - 1 - - shard_01 - - false - - - 1 - example01-01-1 - 9000 - - - example01-01-2 - 9000 - - - - 2 - shard_02 - false - - example01-02-1 - 9000 - - - example01-02-2 - 1 - 9440 - - - - -``` - -ここでは、`logs` という名前のクラスターが定義されており、2つのシャードが含まれていて、それぞれが2つのレプリカを含んでいます。シャードは異なるデータの部分を含むサーバーを指します(全データを読み取るには、すべてのシャードにアクセスする必要があります)。レプリカはデータを複製するサーバーです(全データを読み取るには、任意のレプリカのデータにアクセスできます)。 - -クラスター名にはドットを含めることはできません。 - -各サーバーについて、`host`、`port`、およびオプションで `user`、`password`、`secure`、`compression`、`bind_host` パラメータが指定されます: - -- `host` - リモートサーバーのアドレス。ドメイン名または IPv4 または IPv6 アドレスを使用できます。ドメイン名を指定する場合、サーバーは起動時に DNS リクエストを行い、結果はサーバーが稼働している間保持されます。DNS リクエストが失敗すると、サーバーは起動しません。DNS レコードを変更した場合は、サーバーを再起動する必要があります。 -- `port` - メッセージ活動のための TCP ポート(設定内の `tcp_port`、通常は 9000 に設定)。`http_port` とは混同しないでください。 -- `user` - リモートサーバーへの接続用のユーザー名。デフォルト値は `default` ユーザーです。このユーザーは指定されたサーバーに接続するためのアクセス権を持っている必要があります。アクセスは `users.xml` ファイルで構成されます。詳細については、[アクセス権](../../../guides/sre/user-management/index.md) のセクションを参照してください。 -- `password` - リモートサーバーへの接続用のパスワード(マスクされません)。デフォルト値: 空文字列。 -- `secure` - セキュアな SSL/TLS 接続を使用するかどうか。通常、ポートを指定することも必要です(デフォルトのセキュアポートは `9440` です)。サーバーは `9440` でリッスンし、正しい証明書が設定される必要があります。 -- `compression` - データ圧縮を使用します。デフォルト値: `true`。 -- `bind_host` - このノードからリモートサーバーに接続する際に使用する送信元アドレス。IPv4アドレスのみがサポートされます。ClickHouse の分散クエリによって使用されるソースIPアドレスを設定する必要がある高度なデプロイメント使用ケース向けに設計されています。 - -レプリカを指定すると、各シャードを読み取る際に利用可能なレプリカのうちの1つが選択されます。負荷分散のためのアルゴリズムを設定することができます(どのレプリカにアクセスするかの優先順位) – [load_balancing](../../../operations/settings/settings.md#load_balancing) 設定を参照してください。サーバーとの接続が確立されない場合は、短いタイムアウトで接続を試みます。接続が失敗した場合は次のレプリカが選択され、すべてのレプリカに対して同様に繰り返されます。これにより耐障害性が向上しますが、完全なフォールトトレランスを提供するものではありません: リモートサーバーは接続を受け入れることがありますが、動作しないか、動作が悪い場合があります。 - -シャードの1つだけを指定することもできます(この場合、クエリ処理は分散ではなくリモートと呼ばれる必要があります)または任意の数のシャードを指定できます。各シャードに対して1つ以上のレプリカを指定できます。各シャードに対して異なる数のレプリカを指定できます。 - -設定ファイル内で任意の数のクラスターを指定できます。 - -クラスターを表示するには、`system.clusters` テーブルを使用します。 - -`Distributed` エンジンは、クラスタをローカルサーバーのように扱うことを可能にします。ただし、クラスターの構成は動的に指定することはできず、サーバー設定ファイルで構成する必要があります。通常、クラスター内のすべてのサーバーは同じクラスター設定を持ちます(これは必須ではありません)。設定ファイルからのクラスターはサーバーを再起動することなく即時に更新されます。 - -毎回未知のシャードとレプリカのセットにクエリを送信する必要がある場合、`Distributed` テーブルを作成する必要はありません – 代わりに `remote` テーブル関数を使用してください。 [テーブル関数](../../../sql-reference/table-functions/index.md) のセクションを参照してください。 - -## データの書き込み {#distributed-writing-data} - -クラスタにデータを書くための方法は二つあります。 - -最初に、どのサーバーにどのデータを書き込むかを定義し、各シャードで直接書き込みを行うことができます。言い換えれば、`Distributed` テーブルが指しているリモートテーブルに対して直接 `INSERT` ステートメントを実行します。これは、データをトリビアルではない要求を持つ主題領域に基づいて任意のシャーディングスキームを使用できるため、最も柔軟なソリューションです。また、異なるシャードに異なるデータを完全に独立して書き込むことができるため、最も最適なソリューションでもあります。 - -第二に、`Distributed` テーブルに対して `INSERT` ステートメントを実行できます。この場合、テーブルは挿入されたデータをサーバーに自動的に分配します。`Distributed` テーブルに書き込むためには、`sharding_key` パラメータが構成されている必要があります(シャードが1つしかない場合を除く)。 - -各シャードは設定ファイル内で `` を定義できます。デフォルトでは、重みは `1` です。データはシャードの重みに比例して分配されます。すべてのシャードの重みが合計され、各シャードの重みが総和で割られて各シャードの比率が決定されます。たとえば、2つのシャードがあり、最初のシャードの重みが 1 で、2 番目のシャードの重みが 2 の場合、最初のシャードには 3 分の 1 (1 / 3)の挿入された行が送られ、2 番目のシャードには 3 分の 2 (2 / 3)が送られます。 - -各シャードには設定ファイル内で `internal_replication` パラメータが定義できます。このパラメータが `true` に設定されている場合、書き込み操作は最初の正常なレプリカを選択し、そこにデータを書き込みます。`Distributed` テーブルの基盤となるテーブルがレプリケートテーブル(例えば、`Replicated*MergeTree` テーブルエンジンのいずれか)である場合は、これを使用します。一つのテーブルレプリカが書き込みを受け取り、それが他のレプリカに自動的にレプリケートされます。 - -`internal_replication` が `false` に設定されている場合(デフォルト)、データはすべてのレプリカに書き込まれます。この場合、`Distributed` テーブルはデータを自分でレプリケートします。これは、レプリカの整合性がチェックされず、時間が経つにつれてわずかに異なるデータを含むようになるため、レプリケートされたテーブルを使用するよりも劣ります。 - -行データが送信されるシャードを選択するために、シャーディング式が分析され、その余りがシャードの合計ウエイトで割られた値から取得されます。行は、`prev_weights` から `prev_weights + weight` への余りの半区間に対応するシャードに送信されます。ここで、`prev_weights` は最小の数のシャードの合計ウエイトであり、`weight` はこのシャードの重みです。たとえば、2つのシャードがあり、最初が重み 9 で、2 番目が重み 10 の場合、行は余りの範囲 \[0, 9) について最初のシャードに送信され、余りの範囲 \[9, 19) については2 番目のシャードに送信されます。 - -シャーディング式は、整数を返す定数およびテーブルカラムからなる任意の式です。たとえば、データのランダム分配のために `rand()` 式を使用したり、ユーザー ID で割った余りによる分配のために `UserID` を使用したりできます(この場合、単一のユーザーのデータは単一のシャードに存在するため、ユーザーによる `IN` および `JOIN` の実行が簡素化されます)。もし、いずれかのカラムの分配が十分に均一でない場合は、それをハッシュ関数でラップすることができます(たとえば、`intHash64(UserID)`)。 - -単純な割り算の余りはシャーディングに制限された解決策であり、常に適切ではありません。中規模および大規模のデータボリューム(数十のサーバー)には有効ですが、ごく大きなデータボリューム(数百のサーバーまたはそれ以上)には適していません。その場合、`Distributed` テーブルのエントリを使用するのではなく、対象領域によって要求されるシャーディングスキームを使用してください。 - -以下のような場合にはシャーディングスキームを考慮すべきです: - -- 特定のキーでデータの結合(`IN` または `JOIN`)を必要とするクエリが使用される場合。このキーでデータがシャーディングされていると、`GLOBAL IN` または `GLOBAL JOIN` の代わりにローカル `IN` または `JOIN` を使用できます。これにより、大幅に効率が向上します。 -- 大量のサーバー(数百以上)を使用し、個々のクライアントのデータに対する小さいクエリが大量にある場合(例えば、ウェブサイト、広告主、またはパートナーのデータ)。それら的小クエリが全体のクラスターに影響を与えないように、単一のクライアントのデータを単一のシャードに位置付けることが意味があります。別の方法として、二段階のシャーディングを設定できます: 全体のクラスターを「層」に分けることができ、その層は複数のシャードで構成されることがあります。単一クライアントのデータは単一層に位置付けられますが、必要に応じて層内のシャードが追加され、データがその中でランダムに分配されます。各層には `Distributed` テーブルが作成され、グローバルクエリ用に一つの共有された分散テーブルが作成されます。 - -データはバックグラウンドで書き込まれます。テーブルに挿入されたとき、データブロックは単にローカルファイルシステムに書き込まれます。データは、可能な限り早く、リモートサーバーにバックグラウンドで送信されます。データ送信の周期性は、[distributed_background_insert_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_sleep_time_ms) と [distributed_background_insert_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_max_sleep_time_ms) 設定によって管理されています。`Distributed` エンジンは、挿入されたデータを含む各ファイルを個別に送信しますが、[distributed_background_insert_batch](../../../operations/settings/settings.md#distributed_background_insert_batch) 設定を使用してファイルのバッチ送信を有効にできます。この設定により、ローカルサーバーおよびネットワークリソースをより適切に活用することで、クラスター性能が向上します。データが正常に送信されたかどうかは、テーブルディレクトリ内の待機リストにあるファイルを確認することで確認できます: `/var/lib/clickhouse/data/database/table/`。バックグラウンドタスクを実行するスレッドの数は [background_distributed_schedule_pool_size](/operations/server-configuration-parameters/settings#background_distributed_schedule_pool_size) 設定によって設定できます。 - -`INSERT` が `Distributed` テーブルに行われた後、サーバーが存在しなくなったり、強制的に再起動した場合(たとえば、ハードウェア障害による)に、挿入されたデータが失われる可能性があります。テーブルディレクトリ内に破損したデータ部分が検出された場合、それは `broken` サブディレクトリに移動され、もはや使用されません。 - -## データの読み取り {#distributed-reading-data} - -`Distributed` テーブルにクエリを行うと、`SELECT` クエリがすべてのシャードに送信され、データがシャード全体にどう分配されているかに関係なく機能します(完全にランダムに分配されている可能性もあります)。新しいシャードを追加すると、古いデータをその中に転送する必要はありません。代わりに、重みを大きくして新しいデータを書き込むことで、データが少し不均等に分配されますが、クエリは正しく効率的に機能します。 - -`max_parallel_replicas` オプションが有効になっている場合、クエリ処理は単一のシャード内のすべてのレプリカに並列化されます。詳細については、[max_parallel_replicas](../../../operations/settings/settings.md#max_parallel_replicas) のセクションを参照してください。 - -分散 `in` および `global in` クエリがどのように処理されるかについては、[こちら](/sql-reference/operators/in#distributed-subqueries) のドキュメントを参照してください。 - -## 仮想カラム {#virtual-columns} - -#### _shard_num {#_shard_num} - -`_shard_num` — テーブル `system.clusters` の `shard_num` 値を含みます。型: [UInt32](../../../sql-reference/data-types/int-uint.md)。 - -:::note -[remote](../../../sql-reference/table-functions/remote.md) および [cluster](../../../sql-reference/table-functions/cluster.md) テーブル関数は内部的に一時的な Distributed テーブルを作成するため、`_shard_num` もそこに存在します。 -::: - -**参照** - -- [仮想カラム](../../../engines/table-engines/index.md#table_engines-virtual_columns) の説明 -- [background_distributed_schedule_pool_size](/operations/server-configuration-parameters/settings#background_distributed_schedule_pool_size) 設定 -- [shardNum()](../../../sql-reference/functions/other-functions.md#shardnum) および [shardCount()](../../../sql-reference/functions/other-functions.md#shardcount) 関数 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md.hash deleted file mode 100644 index d331ca262fb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/distributed.md.hash +++ /dev/null @@ -1 +0,0 @@ -7ccc20c170b84501 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md deleted file mode 100644 index 907e38b59e7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -description: 'The `Executable` and `ExecutablePool` table engines allow you to define - a table whose rows are generated from a script that you define (by writing rows - to **stdout**).' -sidebar_label: 'Executable' -sidebar_position: 40 -slug: '/engines/table-engines/special/executable' -title: 'Executable and ExecutablePool Table Engines' ---- - - - - -# 実行可能および実行プールテーブルエンジン - -`Executable` および `ExecutablePool` テーブルエンジンを使用すると、あなたが定義したスクリプトから生成された行を持つテーブルを定義できます(**stdout** に行を書き込むことによって)。実行可能なスクリプトは `users_scripts` ディレクトリに保存され、任意のソースからデータを読み取ることができます。 - -- `Executable` テーブル: 各クエリごとにスクリプトが実行されます -- `ExecutablePool` テーブル: 永続的なプロセスのプールを維持し、プールからプロセスを取得して読み込みます - -オプションで、スクリプトが読み取るために結果を **stdin** にストリームする1つ以上の入力クエリを含めることができます。 - -## 実行可能テーブルの作成 {#creating-an-executable-table} - -`Executable` テーブルエンジンには、スクリプトの名前と受信データの形式という2つのパラメータが必要です。オプションで、1つ以上の入力クエリを渡すことができます: - -```sql -Executable(script_name, format, [input_query...]) -``` - -`Executable` テーブルに関連する設定は以下の通りです: - -- `send_chunk_header` - - 説明: プロセスにチャンクを送信する前に、各チャンク内の行数を送信します。この設定は、リソースを事前に確保するためにスクリプトをより効率的に書くのに役立ちます - - デフォルト値: false -- `command_termination_timeout` - - 説明: コマンド終了タイムアウト(秒単位) - - デフォルト値: 10 -- `command_read_timeout` - - 説明: コマンド stdout からデータを読み取るためのタイムアウト(ミリ秒単位) - - デフォルト値: 10000 -- `command_write_timeout` - - 説明: コマンド stdin にデータを書き込むためのタイムアウト(ミリ秒単位) - - デフォルト値: 10000 - - -例を見てみましょう。次の Python スクリプトは `my_script.py` という名で `user_scripts` フォルダに保存されています。このスクリプトは数値 `i` を読み取り、10個のランダムな文字列を出力します。各文字列の前にはタブで区切られた数字が付きます: - -```python -#!/usr/bin/python3 - -import sys -import string -import random - -def main(): - - # 入力値を読み込む - for number in sys.stdin: - i = int(number) - - # ランダムな行を生成する - for id in range(0, i): - letters = string.ascii_letters - random_string = ''.join(random.choices(letters ,k=10)) - print(str(id) + '\t' + random_string + '\n', end='') - - # 結果を stdout にフラッシュ - sys.stdout.flush() - -if __name__ == "__main__": - main() -``` - -次の `my_executable_table` は `my_script.py` の出力から構築されます。これにより、`my_executable_table` から `SELECT` を実行するたびに10個のランダムな文字列が生成されます: - -```sql -CREATE TABLE my_executable_table ( - x UInt32, - y String -) -ENGINE = Executable('my_script.py', TabSeparated, (SELECT 10)) -``` - -テーブルの作成はすぐに戻り、スクリプトは呼び出されません。`my_executable_table` をクエリすると、スクリプトが呼び出されます: - -```sql -SELECT * FROM my_executable_table -``` - -```response -┌─x─┬─y──────────┐ -│ 0 │ BsnKBsNGNH │ -│ 1 │ mgHfBCUrWM │ -│ 2 │ iDQAVhlygr │ -│ 3 │ uNGwDuXyCk │ -│ 4 │ GcFdQWvoLB │ -│ 5 │ UkciuuOTVO │ -│ 6 │ HoKeCdHkbs │ -│ 7 │ xRvySxqAcR │ -│ 8 │ LKbXPHpyDI │ -│ 9 │ zxogHTzEVV │ -└───┴────────────┘ -``` - -## スクリプトにクエリ結果を渡す {#passing-query-results-to-a-script} - -Hacker News ウェブサイトのユーザーはコメントを残します。Python には、コメントがポジティブ、ネガティブ、またはニュートラルであるかを判断するための自然言語処理ツールキット(`nltk`)があり、-1(非常にネガティブなコメント)から1(非常にポジティブなコメント)までの値を割り当てることができます。それでは、`nltk` を使用して Hacker News コメントの感情を計算する `Executable` テーブルを作成しましょう。 - -この例では、[こちら](/engines/table-engines/mergetree-family/invertedindexes/#full-text-search-of-the-hacker-news-dataset)で説明されている `hackernews` テーブルを使用します。`hackernews` テーブルには、`UInt64` 型の `id` カラムと `String` 型の `comment` カラムが含まれています。それでは、`Executable` テーブルを定義して始めましょう: - -```sql -CREATE TABLE sentiment ( - id UInt64, - sentiment Float32 -) -ENGINE = Executable( - 'sentiment.py', - TabSeparated, - (SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20) -); -``` - -`sentiment` テーブルについてのいくつかのコメント: - -- ファイル `sentiment.py` は `user_scripts` フォルダに保存されています(`user_scripts_path` 設定のデフォルトフォルダ) -- `TabSeparated` 形式は、Python スクリプトがタブ区切りの値を含む生データの行を生成する必要があることを意味します -- クエリは `hackernews` から2つのカラムを選択します。Python スクリプトは、受信行からそのカラム値を解析する必要があります - -以下が `sentiment.py` の定義です: - -```python -#!/usr/local/bin/python3.9 - -import sys -import nltk -from nltk.sentiment import SentimentIntensityAnalyzer - -def main(): - sentiment_analyzer = SentimentIntensityAnalyzer() - - while True: - try: - row = sys.stdin.readline() - if row == '': - break - - split_line = row.split("\t") - - id = str(split_line[0]) - comment = split_line[1] - - score = sentiment_analyzer.polarity_scores(comment)['compound'] - print(id + '\t' + str(score) + '\n', end='') - sys.stdout.flush() - except BaseException as x: - break - -if __name__ == "__main__": - main() -``` - -私たちの Python スクリプトについてのいくつかのコメント: - -- これが機能するためには、`nltk.downloader.download('vader_lexicon')` を実行する必要があります。これはスクリプト内に置くこともできますが、そうすると `sentiment` テーブルのクエリが実行されるたびに毎回ダウンロードされてしまうため、効率的ではありません -- `row` の各値は `SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20` の結果セットの行になります -- 受信行はタブ区切りであるため、Python の `split` 関数を使用して `id` と `comment` を解析します -- `polarity_scores` の結果は多数の値を持つ JSON オブジェクトです。私たちはこの JSON オブジェクトの `compound` 値を取得することにしました -- `sentiment` テーブルは ClickHouse で `TabSeparated` 形式を使用し、2つのカラムを含むため、私たちの `print` 関数はタブでカラムを区切ります - -`sentiment` テーブルから行を選択するクエリを実行するたびに、`SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20` クエリが実行され、その結果が `sentiment.py` に渡されます。これをテストしてみましょう: - -```sql -SELECT * -FROM sentiment -``` - -応答は以下のようになります: - -```response -┌───────id─┬─sentiment─┐ -│ 7398199 │ 0.4404 │ -│ 21640317 │ 0.1779 │ -│ 21462000 │ 0 │ -│ 25168863 │ 0 │ -│ 25168978 │ -0.1531 │ -│ 25169359 │ 0 │ -│ 25169394 │ -0.9231 │ -│ 25169766 │ 0.4137 │ -│ 25172570 │ 0.7469 │ -│ 25173687 │ 0.6249 │ -│ 28291534 │ 0 │ -│ 28291669 │ -0.4767 │ -│ 28291731 │ 0 │ -│ 28291949 │ -0.4767 │ -│ 28292004 │ 0.3612 │ -│ 28292050 │ -0.296 │ -│ 28292322 │ 0 │ -│ 28295172 │ 0.7717 │ -│ 28295288 │ 0.4404 │ -│ 21465723 │ -0.6956 │ -└──────────┴───────────┘ -``` - -## ExecutablePool テーブルの作成 {#creating-an-executablepool-table} - -`ExecutablePool` の構文は `Executable` と似ていますが、`ExecutablePool` テーブル固有のいくつかの関連設定があります: - -- `pool_size` - - 説明: プロセスプールのサイズ。サイズが0の場合、サイズの制限はありません - - デフォルト値: 16 -- `max_command_execution_time` - - 説明: 最大コマンド実行時間(秒単位) - - デフォルト値: 10 - -上記の `sentiment` テーブルを `Executable` の代わりに `ExecutablePool` を使用するように簡単に変換できます: - -```sql -CREATE TABLE sentiment_pooled ( - id UInt64, - sentiment Float32 -) -ENGINE = ExecutablePool( - 'sentiment.py', - TabSeparated, - (SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20000) -) -SETTINGS - pool_size = 4; -``` - -ClickHouse は、クライアントが `sentiment_pooled` テーブルをクエリする際に、オンデマンドで4つのプロセスを維持します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md.hash deleted file mode 100644 index 53b6974667e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/executable.md.hash +++ /dev/null @@ -1 +0,0 @@ -9bda640eedb0575f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md deleted file mode 100644 index 81310fe3cc1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: 'ClickHouseは、`SELECT`クエリと一緒にクエリ処理に必要なデータをサーバーに送信することを可能にします。このデータは一時テーブルに配置され、クエリ内で使用できます(たとえば、`IN`演算子で)。' -sidebar_label: '外部データ' -sidebar_position: 130 -slug: '/engines/table-engines/special/external-data' -title: 'クエリ処理の外部データ' ---- - - - - -# クエリ処理のための外部データ - -ClickHouseは、必要なデータをサーバーに送信し、`SELECT`クエリと一緒に処理することを許可します。このデータは一時テーブルに格納され(「一時テーブル」セクションを参照)、クエリ内で使用できます(例えば、`IN`演算子内で)。 - -たとえば、重要なユーザー識別子を含むテキストファイルがある場合、そのファイルをサーバーにアップロードし、このリストによるフィルタリングを使用するクエリと一緒に送信できます。 - -複数のクエリを大容量の外部データと共に実行する必要がある場合、この機能を使用しないでください。データを事前にDBにアップロードする方が良いです。 - -外部データは、コマンドラインクライアント(非対話モード)を介して、またはHTTPインターフェースを通じてアップロードできます。 - -コマンドラインクライアントでは、次の形式でパラメータセクションを指定できます。 - -```bash ---external --file=... [--name=...] [--format=...] [--types=...|--structure=...] -``` - -送信されるテーブルの数に応じて、複数のセクションをこのように指定できます。 - -**–external** – 条項の開始を示します。 -**–file** – テーブルダンプのファイルパス、または標準入力を指す -。 -標準入力からは単一のテーブルしか取得できません。 - -次のパラメータは任意です: -**–name**– テーブルの名前。省略すると、_data が使用されます。 -**–format** – ファイル内のデータフォーマット。省略すると、TabSeparated が使用されます。 - -次のパラメータのいずれかが必須です: -**–types** – カンマ区切りのカラムタイプのリスト。例えば: `UInt64,String`。カラムは _1, _2, ... と名付けられます。 -**–structure**– `UserID UInt64`, `URL String` 形式のテーブル構造。カラム名とタイプを定義します。 - -'file' で指定されたファイルは、'format' で指定された形式で解析され、'types' または 'structure' で指定されたデータ型が使用されます。テーブルはサーバーにアップロードされ、'name' の名前の一時テーブルとしてそこにアクセス可能です。 - -例: - -```bash -$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 -849897 -$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -HTTPインターフェースを使用する場合、外部データは multipart/form-data 形式で渡されます。各テーブルは別のファイルとして送信されます。テーブル名はファイル名から取得されます。`query_string` には、`name_format`、`name_types`、および `name_structure` のパラメータが渡されます。ここで、`name` はこれらのパラメータが対応するテーブルの名前です。パラメータの意味はコマンドラインクライアントを使用した場合と同じです。 - -例: - -```bash -$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv - -$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -分散クエリ処理の場合、一時テーブルはすべてのリモートサーバーに送信されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md.hash deleted file mode 100644 index 08d91b117d0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/external-data.md.hash +++ /dev/null @@ -1 +0,0 @@ -68dcd5169f61bacf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md deleted file mode 100644 index b995fadf2a7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: 'The File table engine keeps the data in a file in one of the supported - file formats (`TabSeparated`, `Native`, etc.).' -sidebar_label: 'File' -sidebar_position: 40 -slug: '/engines/table-engines/special/file' -title: 'File テーブルエンジン' ---- - - - - -# File Table Engine - -Fileテーブルエンジンは、サポートされている[ファイルフォーマット](/interfaces/formats#formats-overview)のいずれか(`TabSeparated`、`Native`など)でファイルにデータを保持します。 - -使用シナリオ: - -- ClickHouseからファイルへのデータエクスポート。 -- データを別のフォーマットに変換。 -- ディスク上のファイルを編集してClickHouseのデータを更新。 - -:::note -このエンジンは現在ClickHouse Cloudで使用できませんので、[S3テーブル関数を使用してください](/sql-reference/table-functions/s3.md)。 -::: - -## ClickHouseサーバーでの使用 {#usage-in-clickhouse-server} - -```sql -File(Format) -``` - -`Format`パラメータは、利用可能なファイルフォーマットの1つを指定します。`SELECT`クエリを実行するには、フォーマットが入力をサポートしている必要があり、`INSERT`クエリを実行するには、出力をサポートしている必要があります。利用可能なフォーマットは、[Formats](/interfaces/formats#formats-overview)セクションにリストされています。 - -ClickHouseは`File`のためにファイルシステムのパスを指定することを許可しません。サーバー設定の[path](../../../operations/server-configuration-parameters/settings.md)設定で定義されたフォルダーを使用します。 - -`File(Format)`を使用してテーブルを作成すると、そのフォルダーに空のサブディレクトリが作成されます。そのテーブルにデータが書き込まれると、そのサブディレクトリ内の`data.Format`ファイルに配置されます。 - -このサブフォルダーとファイルを手動でサーバーファイルシステム内に作成し、対応する名前のテーブル情報に[ATTACH](../../../sql-reference/statements/attach.md)することで、そのファイルからデータをクエリすることができます。 - -:::note -この機能には注意が必要です。ClickHouseはそのようなファイルへの外部変更を追跡しません。ClickHouse外部と同時に書き込みを行う結果は未定義です。 -::: - -## 例 {#example} - -**1.** `file_engine_table`テーブルを設定します: - -```sql -CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) -``` - -デフォルトでは、ClickHouseはフォルダー`/var/lib/clickhouse/data/default/file_engine_table`を作成します。 - -**2.** 手動で`/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated`を作成し、次の内容を含めます: - -```bash -$ cat data.TabSeparated -one 1 -two 2 -``` - -**3.** データをクエリします: - -```sql -SELECT * FROM file_engine_table -``` - -```text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## ClickHouse-localでの使用 {#usage-in-clickhouse-local} - -[clickhouse-local](../../../operations/utilities/clickhouse-local.md)内で、Fileエンジンは`Format`に加えてファイルパスを受け付けます。デフォルトの入力/出力ストリームは、`0`や`stdin`、`1`や`stdout`のような数値または人間が読める名前を使用して指定できます。追加のエンジンパラメータまたはファイル拡張子(`gz`、`br`または`xz`)に基づいて圧縮ファイルを読み書きすることが可能です。 - -**例:** - -```bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -``` - -## 実装の詳細 {#details-of-implementation} - -- 複数の`SELECT`クエリを同時に実行できますが、`INSERT`クエリは互いに待機します。 -- `INSERT`クエリで新しいファイルの作成がサポートされています。 -- ファイルが存在する場合、`INSERT`は新しい値を追加します。 -- サポートされていないもの: - - `ALTER` - - `SELECT ... SAMPLE` - - インデックス - - レプリケーション - -## PARTITION BY {#partition-by} - -`PARTITION BY` — オプションです。パーティションキーでデータをパーティション化し、別々のファイルを作成することが可能です。ほとんどの場合、パーティションキーは必要ありませんが、必要な場合でも月単位でのパーティションキー以上の粒度は一般的には必要ありません。パーティション化はクエリの速度を向上させません(ORDER BY式とは対照的です)。粒度が細かすぎるパーティション化は行わないでください。クライアント識別子や名前でデータをパーティション化しないでください(その代わりに、ORDER BY式の最初のカラムにクライアント識別子または名前を設定してください)。 - -月ごとにパーティション化するには、`toYYYYMM(date_column)`式を使用します。ここで`date_column`は[Date](/sql-reference/data-types/date.md)タイプの日付を持つカラムです。ここでのパーティション名は`"YYYYMM"`形式です。 - -## 仮想カラム {#virtual-columns} - -- `_path` — ファイルへのパス。タイプ: `LowCardinality(String)`。 -- `_file` — ファイル名。タイプ: `LowCardinality(String)`。 -- `_size` — バイト単位のファイルサイズ。タイプ: `Nullable(UInt64)`。サイズが不明な場合、値は`NULL`です。 -- `_time` — ファイルの最終変更時刻。タイプ: `Nullable(DateTime)`。時間が不明な場合、値は`NULL`です。 - -## 設定 {#settings} - -- [engine_file_empty_if_not_exists](/operations/settings/settings#engine_file_empty_if_not_exists) - 存在しないファイルから空のデータを選択できるようにします。デフォルトでは無効です。 -- [engine_file_truncate_on_insert](/operations/settings/settings#engine_file_truncate_on_insert) - 挿入前にファイルを切り詰めることを可能にします。デフォルトでは無効です。 -- [engine_file_allow_create_multiple_files](/operations/settings/settings.md#engine_file_allow_create_multiple_files) - フォーマットにサフィックスがある場合、各挿入で新しいファイルを作成できるようにします。デフォルトでは無効です。 -- [engine_file_skip_empty_files](/operations/settings/settings.md#engine_file_skip_empty_files) - 読み込み中に空のファイルをスキップできるようにします。デフォルトでは無効です。 -- [storage_file_read_method](/operations/settings/settings#engine_file_empty_if_not_exists) - ストレージファイルからデータを読み取る方法で、`read`、`pread`、`mmap`のいずれかです。mmap方法はclickhouse-serverには適用されません(clickhouse-local向けです)。デフォルト値:clickhouse-serverでは`pread`、clickhouse-localでは`mmap`です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md.hash deleted file mode 100644 index ea06f6119d2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/file.md.hash +++ /dev/null @@ -1 +0,0 @@ -320bc44b972e6a3a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md deleted file mode 100644 index d66da48e3ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -description: 'This engine allows processing of application log files as a stream - of records.' -sidebar_label: 'FileLog' -sidebar_position: 160 -slug: '/engines/table-engines/special/filelog' -title: 'FileLog Engine' ---- - - - - -# FileLog エンジン {#filelog-engine} - -このエンジンは、アプリケーションのログファイルをレコードのストリームとして処理することを可能にします。 - -`FileLog` では次のことができます: - -- ログファイルに対してサブスクライブする。 -- サブスクライブしたログファイルに新しいレコードが追加されると、それを処理する。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = FileLog('path_to_logs', 'format_name') SETTINGS - [poll_timeout_ms = 0,] - [poll_max_batch_size = 0,] - [max_block_size = 0,] - [max_threads = 0,] - [poll_directory_watch_events_backoff_init = 500,] - [poll_directory_watch_events_backoff_max = 32000,] - [poll_directory_watch_events_backoff_factor = 2,] - [handle_error_mode = 'default'] -``` - -エンジンの引数: - -- `path_to_logs` – サブスクライブするログファイルのパス。ログファイルのディレクトリまたは単一のログファイルのパスであることができます。ClickHouse は `user_files` ディレクトリ内のパスのみを許可していることに注意してください。 -- `format_name` - レコードフォーマット。FileLog はファイル内の各行を独立したレコードとして処理するため、すべてのデータフォーマットが適しているわけではありません。 - -オプションのパラメータ: - -- `poll_timeout_ms` - ログファイルからの単一ポーリングのタイムアウト。デフォルト: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms)。 -- `poll_max_batch_size` — 単一ポーリングでポーリングされるレコードの最大数。デフォルト: [max_block_size](/operations/settings/settings#max_block_size)。 -- `max_block_size` — ポーリング用の最大バッチサイズ(レコード数)。デフォルト: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size)。 -- `max_threads` - ファイルを解析するための最大スレッド数。デフォルトは 0 で、これは max(1, physical_cpu_cores / 4) を意味します。 -- `poll_directory_watch_events_backoff_init` - ディレクトリ監視スレッドの初期スリープ値。デフォルト: `500`。 -- `poll_directory_watch_events_backoff_max` - ディレクトリ監視スレッドの最大スリープ値。デフォルト: `32000`。 -- `poll_directory_watch_events_backoff_factor` - バックオフの速さ。デフォルトは指数的です。デフォルト: `2`。 -- `handle_error_mode` — FileLog エンジンのエラー処理方法。可能な値: default(メッセージの解析に失敗した場合は例外がスローされる)、stream(例外メッセージと生のメッセージが仮想カラム `_error` と `_raw_message` に保存される)。 - -## 説明 {#description} - -配信されたレコードは自動的に追跡されるため、ログファイル内の各レコードは一度だけカウントされます。 - -`SELECT` はレコードを読むのには特に便利ではありません(デバッグを除いて)。なぜなら、各レコードは一度だけ読むことができるからです。リアルタイムスレッドを作成することがより実用的であり、そのためには [materialized views](../../../sql-reference/statements/create/view.md) を使用します。これを行うには: - -1. エンジンを使用して FileLog テーブルを作成し、データストリームとして考えます。 -2. 希望の構造を持つテーブルを作成します。 -3. エンジンからデータを変換し、事前に作成したテーブルに格納する materialized view を作成します。 - -`MATERIALIZED VIEW` がエンジンに参加すると、バックグラウンドでデータの収集を開始します。これにより、ログファイルからレコードを継続的に受け取り、`SELECT` を使用して必要な形式に変換できます。 -1 つの FileLog テーブルには、希望する数だけ materialized view を持つことができ、これらはテーブルから直接データを読み取るのではなく、新しいレコード(バッチで)を受け取ります。このようにして、異なる詳細レベル(グループ化 - 集約あり、なし)で複数のテーブルに書き込むことができます。 - -例: - -```sql - CREATE TABLE logs ( - timestamp UInt64, - level String, - message String - ) ENGINE = FileLog('user_files/my_app/app.log', 'JSONEachRow'); - - CREATE TABLE daily ( - day Date, - level String, - total UInt64 - ) ENGINE = SummingMergeTree(day, (day, level), 8192); - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total - FROM queue GROUP BY day, level; - - SELECT level, sum(total) FROM daily GROUP BY level; -``` - -ストリームデータの受信を停止したり、変換ロジックを変更したりするには、materialized view を切り離します: - -```sql - DETACH TABLE consumer; - ATTACH TABLE consumer; -``` - -`ALTER` を使用してターゲットテーブルを変更する場合は、ターゲットテーブルとビューからのデータの不一致を避けるために、materialized view を無効にすることを推奨します。 - -## 仮想カラム {#virtual-columns} - -- `_filename` - ログファイルの名前。データ型:`LowCardinality(String)`。 -- `_offset` - ログファイル内のオフセット。データ型:`UInt64`。 - -`handle_error_mode='stream'` の場合の追加の仮想カラム: - -- `_raw_record` - 正しく解析できなかった生のレコード。データ型:`Nullable(String)`。 -- `_error` - 解析失敗時に発生した例外メッセージ。データ型:`Nullable(String)`。 - -注意: `_raw_record` および `_error` の仮想カラムは、解析中に例外が発生した場合のみ充填され、メッセージが正常に解析された場合は常に `NULL` です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md.hash deleted file mode 100644 index 343dd7f83f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/filelog.md.hash +++ /dev/null @@ -1 +0,0 @@ -fac8b3dd9af57bae diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md deleted file mode 100644 index 26239984e0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: 'The GenerateRandom table engine produces random data for given table - schema.' -sidebar_label: 'GenerateRandom' -sidebar_position: 140 -slug: '/engines/table-engines/special/generate' -title: 'GenerateRandom Table Engine' ---- - - - -The GenerateRandom table engine produces random data for given table schema. - -Usage examples: - -- Use in test to populate reproducible large table. -- Generate random input for fuzzing tests. - -## 使用法 in ClickHouse Server {#usage-in-clickhouse-server} - -```sql -ENGINE = GenerateRandom([random_seed [,max_string_length [,max_array_length]]]) -``` - -The `max_array_length` and `max_string_length` parameters specify maximum length of all -array or map columns and strings correspondingly in generated data. - -Generate table engine supports only `SELECT` queries. - -It supports all [DataTypes](../../../sql-reference/data-types/index.md) that can be stored in a table except `AggregateFunction`. - -## 例 {#example} - -**1.** Set up the `generate_engine_table` table: - -```sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Query the data: - -```sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -```text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## 実装の詳細 {#details-of-implementation} - -- Not supported: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Indices - - Replication diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md.hash deleted file mode 100644 index 6b2a3e1bf1f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/generate.md.hash +++ /dev/null @@ -1 +0,0 @@ -398be151ec5dff1b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md deleted file mode 100644 index 206433e0e73..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: 'Documentation for Special Table Engines' -sidebar_label: 'Special' -sidebar_position: 50 -slug: '/engines/table-engines/special/' -title: 'Special Table Engines' ---- - - - - -# 特殊なテーブルエンジン - -テーブルエンジンには主に3つのカテゴリがあります。 - -- [MergeTreeエンジンファミリ](../../../engines/table-engines/mergetree-family/index.md):主要な生産用途向け。 -- [Logエンジンファミリ](../../../engines/table-engines/log-family/index.md):小さな一時データ用。 -- [統合用テーブルエンジン](../../../engines/table-engines/integrations/index.md)。 - -残りのエンジンはその目的がユニークであり、ファミリにはまだグルーピングされていないため、この「特殊」カテゴリに位置付けられています。 - - -| ページ | 説明 | -|-----|-----| -| [Buffer Table Engine](/engines/table-engines/special/buffer) | データをRAMにバッファリングし、定期的に別のテーブルにフラッシュします。読み取り操作中は、データはバッファと他のテーブルから同時に読み込まれます。 | -| [Executable and ExecutablePool Table Engines](/engines/table-engines/special/executable) | `Executable`および`ExecutablePool`テーブルエンジンは、あなたが定義するスクリプトから生成された行を持つテーブルを定義できるようにします(**stdout**に行を書き込みます)。 | -| [URL Table Engine](/engines/table-engines/special/url) | リモートHTTP/HTTPSサーバーからデータをクエリします。このエンジンはFileエンジンに似ています。 | -| [View Table Engine](/engines/table-engines/special/view) | ビューを実装するために使用されます(詳細は`CREATE VIEW`クエリを参照)。データを保存せず、指定された`SELECT`クエリのみを保存します。テーブルから読み取るとき、このクエリを実行し(不要なカラムはすべて削除されます)、データを取得します。 | -| [Distributed Table Engine](/engines/table-engines/special/distributed) | Distributedエンジンを持つテーブルは、自身のデータを保存せず、複数のサーバーでの分散クエリ処理を可能にします。読み取りは自動的に並列化されます。読み取り中、リモートサーバーのテーブルインデックスがあれば、それが利用されます。 | -| [File Table Engine](/engines/table-engines/special/file) | Fileテーブルエンジンは、サポートされているファイルフォーマット(`TabSeparated`、`Native`など)のいずれかでファイルにデータを保存します。 | -| [FileLog Engine](/engines/table-engines/special/filelog) | このエンジンは、アプリケーションのログファイルをレコードのストリームとして処理することを可能にします。 | -| [Set Table Engine](/engines/table-engines/special/set) | 常にRAMにあるデータセット。`IN`演算子の右側での使用を目的としています。 | -| [Dictionary Table Engine](/engines/table-engines/special/dictionary) | `Dictionary`エンジンは、辞書データをClickHouseテーブルとして表示します。 | -| [GenerateRandom Table Engine](/engines/table-engines/special/generate) | GenerateRandomテーブルエンジンは、指定されたテーブルスキーマに対してランダムデータを生成します。 | -| [Memory Table Engine](/engines/table-engines/special/memory) | Memoryエンジンは、RAMにデータを非圧縮形式で保存します。データは、読み取ったときに受信したのと正確に同じ形で保存されます。言い換えれば、このテーブルからの読み取りは完全に無償です。 | -| [Merge Table Engine](/engines/table-engines/special/merge) | `Merge`エンジン(`MergeTree`と混同しないでください)は、データ自体を保存せず、他の任意のテーブルから同時に読み取ることを可能にします。 | -| [External Data for Query Processing](/engines/table-engines/special/external-data) | ClickHouseは、クエリ処理に必要なデータをサーバーに送信し、`SELECT`クエリとともに渡すことを許可します。このデータは一時テーブルに配置され、クエリで使用することができます(例えば、`IN`演算子内で)。 | -| [Join Table Engine](/engines/table-engines/special/join) | JOIN操作で使用するためのオプションの準備されたデータ構造。 | -| [KeeperMap](/engines/table-engines/special/keeper-map) | このエンジンは、Keeper/ZooKeeperクラスターを、一貫性のあるキーと値のストアとして、リニアライザブル書き込みと順序一貫性のある読み取りを提供します。 | -| [Null Table Engine](/engines/table-engines/special/null) | `Null`テーブルに書き込むと、データは無視されます。`Null`テーブルから読み取ると、レスポンスは空になります。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md.hash deleted file mode 100644 index dee976d2df0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -c1d8d21eb3bf49cc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md deleted file mode 100644 index d59ce04c2e6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -description: 'JOIN 操作で使用するためのオプションの準備済みデータ構造。' -sidebar_label: 'Join' -sidebar_position: 70 -slug: '/engines/table-engines/special/join' -title: 'Join テーブルエンジン' ---- - - - - -# ジョインテーブルエンジン - -[JOIN](/sql-reference/statements/select/join) 操作に使用するオプションの準備データ構造です。 - -:::note -これは [JOIN句](/sql-reference/statements/select/join) 自体に関する記事ではありません。 -::: - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], -) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) -``` - -[CREATE TABLE](/sql-reference/statements/create/table) クエリの詳細な説明を参照してください。 - -## エンジンパラメータ {#engine-parameters} - -### join_strictness {#join_strictness} - -`join_strictness` – [JOINの厳密さ](/sql-reference/statements/select/join#supported-types-of-join)。 - -### join_type {#join_type} - -`join_type` – [JOINのタイプ](/sql-reference/statements/select/join#supported-types-of-join)。 - -### キーカラム {#key-columns} - -`k1[, k2, ...]` – `USING`句からのキー列で、`JOIN`操作が行われます。 - -`join_strictness` および `join_type` パラメータは引用符なしで入力してください。たとえば、`Join(ANY, LEFT, col1)` のように。これらは、テーブルが使用される `JOIN` 操作と一致する必要があります。パラメータが一致しない場合、ClickHouseは例外を投げず、正しくないデータを返す可能性があります。 - -## 特徴と推奨事項 {#specifics-and-recommendations} - -### データストレージ {#data-storage} - -`Join`テーブルのデータは常にRAMにあります。テーブルに行を挿入すると、ClickHouseはデータブロックをディレクトリにディスク上に書き込み、サーバーが再起動するときにそれらを復元できるようにします。 - -サーバーが不正に再起動した場合、ディスク上のデータブロックが失われるか、損傷する可能性があります。この場合、損傷したデータのファイルを手動で削除する必要があるかもしれません。 - -### データの選択と挿入 {#selecting-and-inserting-data} - -`INSERT`クエリを使用して、`Join`エンジンテーブルにデータを追加できます。テーブルが `ANY` 厳密さで作成された場合、重複キーのデータは無視されます。`ALL` 厳密さの場合は、すべての行が追加されます。 - -`Join`エンジンテーブルの主な使用ケースは以下の通りです: - -- `JOIN`句の右側にテーブルを配置します。 -- [joinGet](/sql-reference/functions/other-functions.md/#joinget) 関数を呼び出して、辞書からデータを抽出するのと同じ方法でテーブルからデータを取得します。 - -### データの削除 {#deleting-data} - -`Join`エンジンテーブルに対する `ALTER DELETE` クエリは、[ミューテーション](/sql-reference/statements/alter/index.md#mutations)として実装されています。`DELETE`ミューテーションは、フィルタリングされたデータを読み取り、メモリとディスクのデータを上書きします。 - -### 制限事項と設定 {#join-limitations-and-settings} - -テーブルを作成する際、次の設定が適用されます: - -#### join_use_nulls {#join_use_nulls} - -[join_use_nulls](/operations/settings/settings.md/#join_use_nulls) - -#### max_rows_in_join {#max_rows_in_join} - -[max_rows_in_join](/operations/settings/settings#max_rows_in_join) - -#### max_bytes_in_join {#max_bytes_in_join} - -[max_bytes_in_join](/operations/settings/settings#max_bytes_in_join) - -#### join_overflow_mode {#join_overflow_mode} - -[join_overflow_mode](/operations/settings/settings#join_overflow_mode) - -#### join_any_take_last_row {#join_any_take_last_row} - -[join_any_take_last_row](/operations/settings/settings.md/#join_any_take_last_row) -#### join_use_nulls {#join_use_nulls-1} - -#### persistent {#persistent} - -Join および [Set](/engines/table-engines/special/set.md) テーブルエンジンの持続性を無効にします。 - -I/Oのオーバーヘッドを削減します。パフォーマンスを追求し、持続性を要求しないシナリオに適しています。 - -可能な値: - -- 1 — 有効 -- 0 — 無効 - -デフォルト値: `1` - -`Join`エンジンテーブルは、`GLOBAL JOIN`操作で使用できません。 - -`Join`エンジンは、`CREATE TABLE`ステートメントにおいて[join_use_nulls](/operations/settings/settings.md/#join_use_nulls)設定を指定することを許可します。[SELECT](/sql-reference/statements/select/index.md) クエリは同じ `join_use_nulls` 値を持つ必要があります。 - -## 使用例 {#example} - -左側のテーブルを作成: - -```sql -CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog; -``` - -```sql -INSERT INTO id_val VALUES (1,11)(2,12)(3,13); -``` - -右側の `Join` テーブルを作成: - -```sql -CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id); -``` - -```sql -INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23); -``` - -テーブルを結合: - -```sql -SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id); -``` - -```text -┌─id─┬─val─┬─id_val_join.val─┐ -│ 1 │ 11 │ 21 │ -│ 2 │ 12 │ 0 │ -│ 3 │ 13 │ 23 │ -└────┴─────┴─────────────────┘ -``` - -代わりに、結合キーの値を指定して `Join` テーブルからデータを取得できます: - -```sql -SELECT joinGet('id_val_join', 'val', toUInt32(1)); -``` - -```text -┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ -│ 21 │ -└────────────────────────────────────────────┘ -``` - -`Join` テーブルから行を削除: - -```sql -ALTER TABLE id_val_join DELETE WHERE id = 3; -``` - -```text -┌─id─┬─val─┐ -│ 1 │ 21 │ -└────┴─────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md.hash deleted file mode 100644 index 9fd45ba5501..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/join.md.hash +++ /dev/null @@ -1 +0,0 @@ -10e3a822f99b5edd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md deleted file mode 100644 index 84d830a86f3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: 'This engine allows you to use Keeper/ZooKeeper cluster as consistent - key-value store with linearizable writes and sequentially consistent reads.' -sidebar_label: 'KeeperMap' -sidebar_position: 150 -slug: '/engines/table-engines/special/keeper-map' -title: 'KeeperMap' ---- - - - - -# KeeperMap {#keepermap} - -このエンジンを使用すると、Keeper/ZooKeeper クラスターを一貫したキー・バリュー・ストアとして利用でき、線形整合性のある書き込みと逐次整合性のある読み取りを提供します。 - -KeeperMap ストレージエンジンを有効にするには、テーブルが格納される ZooKeeper パスを `` 設定を使用して定義する必要があります。 - -例えば: - -```xml - - /keeper_map_tables - -``` - -ここで、パスは有効な別の ZooKeeper パスであれば何でも可能です。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = KeeperMap(root_path, [keys_limit]) PRIMARY KEY(primary_key_name) -``` - -エンジンのパラメータ: - -- `root_path` - `table_name` が格納される ZooKeeper パス。 -このパスには `` 設定で定義されたプレフィックスを含めてはいけません。このプレフィックスは自動的に `root_path` に追加されます。 -さらに、`auxiliary_zookeeper_cluster_name:/some/path` の形式もサポートされており、`auxiliary_zookeeper_cluster` は `` 設定の中で定義された ZooKeeper クラスターです。 -デフォルトでは、`` 設定の中で定義された ZooKeeper クラスターが使用されます。 -- `keys_limit` - テーブル内で許可されるキーの数。 -この制限はソフトリミットであり、一部のエッジケースではテーブルにさらに多くのキーが存在することがあるかもしれません。 -- `primary_key_name` – カラムリスト内の任意のカラム名。 -- `primary key` は指定する必要があり、主キーには1つのカラムのみをサポートします。主キーは ZooKeeper 内で `node name` としてバイナリにシリアル化されます。 -- 主キー以外のカラムは、対応する順序でバイナリにシリアル化され、シリアル化されたキーで定義された結果ノードの値として格納されます。 -- キーに対する `equals` または `in` フィルタリングを伴うクエリは、`Keeper` からのマルチキー検索に最適化されます。それ以外の場合は、すべての値がフェッチされます。 - -例: - -```sql -CREATE TABLE keeper_map_table -( - `key` String, - `v1` UInt32, - `v2` String, - `v3` Float32 -) -ENGINE = KeeperMap('/keeper_map_table', 4) -PRIMARY KEY key -``` - -と、 - -```xml - - /keeper_map_tables - -``` - - -各値は `(v1, v2, v3)` のバイナリシリアル化であり、`Keeper` の `/keeper_map_tables/keeper_map_table/data/serialized_key` に格納されます。 -さらに、キーの数には4というソフトリミットがあります。 - -同じ ZooKeeper パスに複数のテーブルが作成されると、値はそのテーブルのうち少なくとも1つが存在する限り永続化されます。 -その結果、テーブル作成時に `ON CLUSTER` 句を使用して、複数の ClickHouse インスタンスからデータを共有することが可能です。 -もちろん、関連のない ClickHouse インスタンスで同じパスを使って手動で `CREATE TABLE` を実行し、同様のデータ共有効果を得ることも可能です。 - -## サポートされている操作 {#supported-operations} - -### 挿入 {#inserts} - -新しい行が `KeeperMap` に挿入されると、キーが存在しない場合はキーの新しいエントリが作成されます。 -キーが存在する場合、`keeper_map_strict_mode` の設定が `true` に設定されていると、例外がスローされます。そうでない場合、キーの値は上書きされます。 - -例: - -```sql -INSERT INTO keeper_map_table VALUES ('some key', 1, 'value', 3.2); -``` - -### 削除 {#deletes} - -行は `DELETE` クエリまたは `TRUNCATE` を使用して削除できます。 -キーが存在し、`keeper_map_strict_mode` の設定が `true` に設定されていると、データの取得と削除は原子的に実行される場合のみ成功します。 - -```sql -DELETE FROM keeper_map_table WHERE key LIKE 'some%' AND v1 > 1; -``` - -```sql -ALTER TABLE keeper_map_table DELETE WHERE key LIKE 'some%' AND v1 > 1; -``` - -```sql -TRUNCATE TABLE keeper_map_table; -``` - -### 更新 {#updates} - -値は `ALTER TABLE` クエリを使用して更新できます。主キーは更新できません。 -`keeper_map_strict_mode` の設定が `true` に設定されていると、データの取得と更新は原子的に実行される場合のみ成功します。 - -```sql -ALTER TABLE keeper_map_table UPDATE v1 = v1 * 10 + 2 WHERE key LIKE 'some%' AND v3 > 3.1; -``` - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouse と Hex を使用したリアルタイム分析アプリの構築](https://clickhouse.com/blog/building-real-time-applications-with-clickhouse-and-hex-notebook-keeper-engine) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md.hash deleted file mode 100644 index 0ad37c0eaa5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/keepermap.md.hash +++ /dev/null @@ -1 +0,0 @@ -1c8f6004616bbfd4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md deleted file mode 100644 index 04d5e5719ed..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -description: 'The Memory engine stores data in RAM, in uncompressed form. Data is - stored in exactly the same form as it is received when read. In other words, reading - from this table is completely free.' -sidebar_label: 'Memory' -sidebar_position: 110 -slug: '/engines/table-engines/special/memory' -title: 'Memory Table Engine' ---- - - - - -# Memory Table Engine - -:::note -ClickHouse CloudでMemoryテーブルエンジンを使用する際、データはすべてのノード間でレプリケーションされません(設計上)。すべてのクエリが同じノードにルーティングされ、Memoryテーブルエンジンが期待どおりに機能することを保証するために、次のいずれかを行うことができます: -- 同じセッション内ですべての操作を実行する -- [clickhouse-client](/interfaces/cli) のようにTCPまたはネイティブインターフェースを使用するクライアントを使用する(これによりスティッキー接続がサポートされます) -::: - -MemoryエンジンはデータをRAMに非圧縮形式で保存します。データは読み取られるときに受け取ったままの形式で保存されます。言い換えれば、このテーブルからの読み取りは完全に無料です。 -同時データアクセスは同期されます。ロックは短時間で済みます:読み取りと書き込みの操作は互いにブロックしません。 -インデックスはサポートされていません。読み取りは並列処理されます。 - -最大の生産性(10GB/秒を超える)はシンプルなクエリで達成されます。これは、ディスクからの読み取り、データの解凍、またはデシリアライズがないためです。(多くのケースで、MergeTreeエンジンの生産性もほぼ同じくらい高いことに注意する必要があります。) -サーバーを再起動すると、データはテーブルから消え、テーブルは空になります。 -通常、このテーブルエンジンの使用は正当化されません。しかし、テストや相対的に少数の行(約100,000,000行まで)で最大の速度が必要なタスクには使用することができます。 - -Memoryエンジンは、外部クエリデータを使用した一時テーブルにシステムによって使用されます(「クエリの処理のための外部データ」セクションを参照)および`GLOBAL IN`を実装するために使用されます(「IN演算子」セクションを参照)。 - -Memoryエンジンのテーブルサイズを制限するために上限と下限を指定でき、効果的に円形バッファとして機能します([エンジンパラメータ](#engine-parameters)を参照)。 - -## Engine Parameters {#engine-parameters} - -- `min_bytes_to_keep` — メモリテーブルがサイズ制限されている場合に保持する最小バイト数。 - - デフォルト値: `0` - - `max_bytes_to_keep`を必要とします -- `max_bytes_to_keep` — メモリテーブル内で保持する最大バイト数で、最古の行は各挿入時に削除されます(つまり円形バッファ)。最古の削除対象の行のバッチが大きなブロックを追加する際に`min_bytes_to_keep`制限を下回る場合、最大バイト数は指定された制限を超えることがあります。 - - デフォルト値: `0` -- `min_rows_to_keep` — メモリテーブルがサイズ制限されている場合に保持する最小行数。 - - デフォルト値: `0` - - `max_rows_to_keep`を必要とします -- `max_rows_to_keep` — メモリテーブル内で保持する最大行数で、最古の行は各挿入時に削除されます(つまり円形バッファ)。最古の削除対象の行のバッチが大きなブロックを追加する際に`min_rows_to_keep`制限を下回る場合、最大行数は指定された制限を超えることがあります。 - - デフォルト値: `0` -- `compress` - メモリ内のデータを圧縮するかどうか。 - - デフォルト値: `false` - -## Usage {#usage} - -**設定の初期化** -```sql -CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 100, max_rows_to_keep = 1000; -``` - -**設定の変更** -```sql -ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100, max_rows_to_keep = 1000; -``` - -**注意:** `bytes`と`rows`の制限パラメータは同時に設定できますが、`max`と`min`の下限は遵守されます。 - -## Examples {#examples} -```sql -CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_bytes_to_keep = 4096, max_bytes_to_keep = 16384; - -/* 1. 最古のブロックが最小しきい値のために削除されないことをテスト - 3000行 */ -INSERT INTO memory SELECT * FROM numbers(0, 1600); -- 8'192バイト - -/* 2. 削除されないブロックの追加 */ -INSERT INTO memory SELECT * FROM numbers(1000, 100); -- 1'024バイト - -/* 3. 最古のブロックが削除されることをテスト - 9216バイト - 1100 */ -INSERT INTO memory SELECT * FROM numbers(9000, 1000); -- 8'192バイト - -/* 4. 非常に大きなブロックがすべてを上書きすることを確認 */ -INSERT INTO memory SELECT * FROM numbers(9000, 10000); -- 65'536バイト - -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -``` - -```text -┌─total_bytes─┬─total_rows─┐ -│ 65536 │ 10000 │ -└─────────────┴────────────┘ -``` - -さらに、行に関して: - -```sql -CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 4000, max_rows_to_keep = 10000; - -/* 1. 最古のブロックが最小しきい値のために削除されないことをテスト - 3000行 */ -INSERT INTO memory SELECT * FROM numbers(0, 1600); -- 1'600行 - -/* 2. 削除されないブロックの追加 */ -INSERT INTO memory SELECT * FROM numbers(1000, 100); -- 100行 - -/* 3. 最古のブロックが削除されることをテスト - 9216バイト - 1100 */ -INSERT INTO memory SELECT * FROM numbers(9000, 1000); -- 1'000行 - -/* 4. 非常に大きなブロックがすべてを上書きすることを確認 */ -INSERT INTO memory SELECT * FROM numbers(9000, 10000); -- 10'000行 - -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -``` - -```text -┌─total_bytes─┬─total_rows─┐ -│ 65536 │ 10000 │ -└─────────────┴────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md.hash deleted file mode 100644 index 69e3a865438..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/memory.md.hash +++ /dev/null @@ -1 +0,0 @@ -a364d9b151f456cd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md deleted file mode 100644 index 54197e08319..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -description: 'The `Merge` engine (not to be confused with `MergeTree`) does not - store data itself, but allows reading from any number of other tables simultaneously.' -sidebar_label: 'Merge' -sidebar_position: 30 -slug: '/engines/table-engines/special/merge' -title: 'Merge Table Engine' ---- - - - - -# Merge Table Engine - -`Merge`エンジン(`MergeTree`と混同しないでください)は、データを自身で保存することはありませんが、他の任意の数のテーブルから同時に読み取ることを可能にします。 - -読み取りは自動的に並列化されます。テーブルへの書き込みはサポートされていません。読み取る際には、実際に読み取られているテーブルのインデックスが使用されます(存在する場合)。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE ... Engine=Merge(db_name, tables_regexp [, table_to_write]) -``` - -## エンジンパラメータ {#engine-parameters} - -### db_name {#db_name} - -`db_name` — 可能な値: - - データベース名、 - - データベース名を返す定数式、例えば、`currentDatabase()`、 - - `REGEXP(expression)`、ここで `expression` はDB名に一致する正規表現です。 - -### tables_regexp {#tables_regexp} - -`tables_regexp` — 指定されたDBまたはDBs内のテーブル名に一致する正規表現。 - -正規表現 — [re2](https://github.com/google/re2)(PCREのサブセットをサポート)、大文字と小文字を区別します。 -正規表現のシンボルのエスケープに関する注意点は、「一致」セクションを参照してください。 - -### table_to_write {#table_to_write} - -`table_to_write` - `Merge`テーブルへの挿入時に書き込むテーブル名。 -可能な値: - - `'db_name.table_name'` - 特定のデータベースの特定のテーブルに挿入します。 - - `'table_name'` - テーブル `db_name.table_name` に挿入します。最初のパラメータ `db_name` が正規表現でない場合のみ許可されます。 - - `auto` - 辞書順で`tables_regexp`に渡された最後のテーブルに挿入します。最初のパラメータ `db_name` が正規表現でない場合のみ許可されます。 - -## 使用法 {#usage} - -読み取るテーブルを選択する際に、`Merge`テーブル自体は選択されません。これはループを避けるためです。 -互いのデータを無限に読み取ろうとする2つの`Merge`テーブルを作成することは可能ですが、良いアイデアではありません。 - -`Merge`エンジンの典型的な使用方法は、多数の`TinyLog`テーブルを単一のテーブルとして操作することです。 - -## 例 {#examples} - -**例 1** - -2つのデータベース `ABC_corporate_site` と `ABC_store`を考えます。`all_visitors`テーブルは、両方のデータベースの`visitors`テーブルからIDを含みます。 - -```sql -CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors'); -``` - -**例 2** - -古いテーブル`WatchLog_old`があり、データを新しいテーブル`WatchLog_new`に移動することなくパーティショニングを変更することにしたとしましょう。そして、両方のテーブルのデータを確認する必要があります。 - -```sql -CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) - ENGINE=MergeTree(date, (UserId, EventType), 8192); -INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); - -CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) - ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; -INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); - -CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog', 'WatchLog_new'); - -SELECT * FROM WatchLog; -``` - -```text -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-01 │ 1 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-02 │ 2 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -``` - -テーブル`WatchLog`への挿入はテーブル`WatchLog_new`に行われます。 -```sql -INSERT INTO WatchLog VALUES ('2018-01-03', 3, 'hit', 3); - -SELECT * FROM WatchLog_New; -``` - -```text -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-02 │ 2 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-03 │ 3 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -``` - -## 仮想カラム {#virtual-columns} - -- `_table` — データが読み取られたテーブルの名前を含みます。型: [String](../../../sql-reference/data-types/string.md)。 - - `WHERE/PREWHERE`節で`_table`に定数条件を設定できます(例えば、`WHERE _table='xyz'`)。この場合、読み取り操作は条件を満たすテーブルに対してのみ行われるため、`_table`カラムはインデックスとして機能します。 - -**参照先** - -- [仮想カラム](../../../engines/table-engines/index.md#table_engines-virtual_columns) -- [merge](../../../sql-reference/table-functions/merge.md) テーブル関数 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md.hash deleted file mode 100644 index 9ea895310c0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/merge.md.hash +++ /dev/null @@ -1 +0,0 @@ -68825be57255da2c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md deleted file mode 100644 index 4e46d268021..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: 'Null テーブルに書き込むと、データは無視されます。Null テーブルから読み取ると、応答は空になります。' -sidebar_label: 'Null' -sidebar_position: 50 -slug: '/engines/table-engines/special/null' -title: 'Nullテーブルエンジン' ---- - - - - -# Null Table Engine - -`Null` テーブルに書き込むと、データは無視されます。`Null` テーブルから読み込むと、応答は空になります。 - -:::note -これが役立つ理由に興味がある場合は、`Null` テーブルにマテリアライズドビューを作成できることに注意してください。したがって、テーブルに書き込まれたデータはビューに影響を与えますが、元の生データは依然として破棄されます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md.hash deleted file mode 100644 index 90863020c5e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/null.md.hash +++ /dev/null @@ -1 +0,0 @@ -5c3da08a411765e5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md deleted file mode 100644 index edc33c47fb1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: 'A data set that is always in RAM. It is intended for use on the right - side of the `IN` operator.' -sidebar_label: 'Set' -sidebar_position: 60 -slug: '/engines/table-engines/special/set' -title: 'Set Table Engine' ---- - - - - -# Set Table Engine - -常にRAMにあるデータセットです。`IN`演算子の右側での使用を目的としています(「IN演算子」セクションを参照)。 - -`INSERT`を使用してテーブルにデータを挿入できます。新しい要素はデータセットに追加され、重複は無視されます。しかし、テーブルから`SELECT`を実行することはできません。データを取得する唯一の方法は、`IN`演算子の右半分で使用することです。 - -データは常にRAMにあります。`INSERT`の場合、挿入されたデータのブロックもディスク上のテーブルのディレクトリに書き込まれます。サーバーを起動すると、このデータがRAMに読み込まれます。言い換えれば、再起動後もデータはそのまま残ります。 - -サーバーが強制的に再起動されると、ディスク上のデータブロックが失われるか、損傷する可能性があります。後者の場合、損傷したデータのファイルを手動で削除する必要があるかもしれません。 - -### Limitations and Settings {#join-limitations-and-settings} - -テーブルを作成するとき、以下の設定が適用されます。 - -#### persistent {#persistent} - -Setおよび[Join](/engines/table-engines/special/join)テーブルエンジンの永続性を無効にします。 - -I/Oオーバーヘッドを削減します。パフォーマンスを追求し、永続性を必要としないシナリオに適しています。 - -考えられる値: - -- 1 — 有効。 -- 0 — 無効。 - -デフォルト値: `1`。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md.hash deleted file mode 100644 index cd3e9087be1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/set.md.hash +++ /dev/null @@ -1 +0,0 @@ -437af011ae7b9853 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md deleted file mode 100644 index 5865687f183..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: 'リモートのHTTP/HTTPSサーバーとの間でデータをクエリします。このエンジンはFileエンジンと類似しています。' -sidebar_label: 'URL' -sidebar_position: 80 -slug: '/engines/table-engines/special/url' -title: 'URL テーブルエンジン' ---- - - - - - -# URLテーブルエンジン - -リモートHTTP/HTTPSサーバーからデータをクエリします。このエンジンは[File](../../../engines/table-engines/special/file.md)エンジンに似ています。 - -構文: `URL(URL [,Format] [,CompressionMethod])` - -- `URL`パラメータは、Uniform Resource Locatorの構造に準拠する必要があります。指定されたURLはHTTPまたはHTTPSを使用するサーバーを指す必要があります。サーバーからの応答を取得するために追加のヘッダーは必要ありません。 - -- `Format`はClickHouseが`SELECT`クエリや、必要に応じて`INSERT`で使用できるものでなければなりません。サポートされるフォーマットの完全なリストについては、[Formats](/interfaces/formats#formats-overview)を参照してください。 - - この引数が指定されていない場合、ClickHouseは自動的に`URL`パラメータのサフィックスからフォーマットを検出します。`URL`パラメータのサフィックスがサポートされるフォーマットのいずれとも一致しない場合、テーブルの作成に失敗します。たとえば、エンジン式`URL('http://localhost/test.json')`の場合、`JSON`フォーマットが適用されます。 - -- `CompressionMethod`は、HTTP本体を圧縮する必要があるかどうかを示します。圧縮が有効になっている場合、URLエンジンによって送信されるHTTPパケットには、どの圧縮方式が使用されているかを示す'Content-Encoding'ヘッダーが含まれます。 - -圧縮を有効にするには、まず`URL`パラメータで指定されたリモートHTTPエンドポイントが対応する圧縮アルゴリズムをサポートしていることを確認してください。 - -サポートされている`CompressionMethod`は以下のいずれかである必要があります: -- gzipまたはgz -- deflate -- brotliまたはbr -- lzmaまたはxz -- zstdまたはzst -- lz4 -- bz2 -- snappy -- none -- auto - -`CompressionMethod`が指定されていない場合、デフォルトは`auto`です。これはClickHouseが`URL`パラメータのサフィックスから自動的に圧縮方式を検出することを意味します。サフィックスが上記の圧縮方法のいずれかと一致する場合、対応する圧縮が適用されますが、一致しない場合は圧縮は有効になりません。 - -たとえば、エンジン式`URL('http://localhost/test.gzip')`の場合、`gzip`圧縮方式が適用されますが、`URL('http://localhost/test.fr')`の場合、サフィックス`fr`が上記の圧縮方式のいずれとも一致しないため、圧縮は有効になりません。 - -## 使用法 {#using-the-engine-in-the-clickhouse-server} - -`INSERT`および`SELECT`クエリは、それぞれ`POST`および`GET`リクエストに変換されます。`POST`リクエストを処理するために、リモートサーバーは[Chunked transfer encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding)をサポートしている必要があります。 - -[ max_http_get_redirects](/operations/settings/settings#max_http_get_redirects)設定を使用して、最大HTTP GETリダイレクトホップ数を制限できます。 - -## 例 {#example} - -**1.** サーバー上に`url_engine_table`テーブルを作成します: - -```sql -CREATE TABLE url_engine_table (word String, value UInt64) -ENGINE=URL('http://127.0.0.1:12345/', CSV) -``` - -**2.** 標準のPython 3ツールを使って基本的なHTTPサーバーを作成し、起動します: - -```python3 -from http.server import BaseHTTPRequestHandler, HTTPServer - -class CSVHTTPServer(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-type', 'text/csv') - self.end_headers() - - self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) - -if __name__ == "__main__": - server_address = ('127.0.0.1', 12345) - HTTPServer(server_address, CSVHTTPServer).serve_forever() -``` - -```bash -$ python3 server.py -``` - -**3.** データをリクエストします: - -```sql -SELECT * FROM url_engine_table -``` - -```text -┌─word──┬─value─┐ -│ Hello │ 1 │ -│ World │ 2 │ -└───────┴───────┘ -``` - -## 実装の詳細 {#details-of-implementation} - -- 読み込みと書き込みは並列で行えます -- サポートされていないもの: - - `ALTER`および`SELECT...SAMPLE`操作。 - - インデックス。 - - レプリケーション。 - -## 仮想カラム {#virtual-columns} - -- `_path` — `URL`へのパス。型: `LowCardinality(String)`. -- `_file` — `URL`のリソース名。型: `LowCardinality(String)`. -- `_size` — リソースのサイズ(バイト)。型: `Nullable(UInt64)`。サイズが不明な場合、値は`NULL`です。 -- `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は`NULL`です。 -- `_headers` - HTTP応答ヘッダー。型: `Map(LowCardinality(String), LowCardinality(String))`. - -## ストレージ設定 {#storage-settings} - -- [engine_url_skip_empty_files](/operations/settings/settings.md#engine_url_skip_empty_files) - 読み込み中に空ファイルをスキップすることを許可します。デフォルトでは無効です。 -- [enable_url_encoding](/operations/settings/settings.md#enable_url_encoding) - URI内のパスのデコード/エンコードの有効/無効を設定できます。デフォルトでは有効です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md.hash deleted file mode 100644 index e518e4e8050..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/url.md.hash +++ /dev/null @@ -1 +0,0 @@ -e14293775456537d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md deleted file mode 100644 index a4adfc45f2c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'ビューを実装するために使用されます(詳細については、 `CREATE VIEW クエリ` を参照してください)。データを保存せず、指定された - `SELECT` クエリのみを保存します。 テーブルから読み取る場合、このクエリを実行します(クエリから不要な列をすべて削除します)。' -sidebar_label: 'View' -sidebar_position: 90 -slug: '/engines/table-engines/special/view' -title: 'View テーブルエンジン' ---- - - - - -# View Table Engine - -ビューを実装するために使用されます(詳細については、`CREATE VIEW クエリ`を参照してください)。データは保存せず、指定された `SELECT` クエリのみを保存します。テーブルから読み取る際には、このクエリを実行し(クエリからすべての不要なカラムを削除します)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md.hash deleted file mode 100644 index 42b8b2dad49..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/special/view.md.hash +++ /dev/null @@ -1 +0,0 @@ -d855be041d808fff diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/faq/_category_.yml deleted file mode 100644 index d3b55f0a328..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 1 -label: 'FAQ' -collapsible: true -collapsed: true -link: - type: doc - id: en/faq/index diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/_category_.yml deleted file mode 100644 index 52163f94150..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/_category_.yml +++ /dev/null @@ -1,4 +0,0 @@ -position: 10 -label: 'General' -collapsible: true -collapsed: true diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md deleted file mode 100644 index a3a91a2c430..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: '/faq/general/columnar-database' -title: 'What is a columnar database?' -toc_hidden: true -toc_priority: 101 -description: 'This page describes what a columnar database is' ---- - -import Image from '@theme/IdealImage'; -import RowOriented from '@site/static/images/row-oriented.gif'; -import ColumnOriented from '@site/static/images/column-oriented.gif'; - - -# カラム指向データベースとは? {#what-is-a-columnar-database} - -カラム指向データベースは、各カラムのデータを独立して保存します。これにより、特定のクエリで使用されるカラムのみをディスクから読み込むことができます。その代償として、全行に影響を与える操作は比例して高コストになります。カラム指向データベースの同義語は、カラム指向データベース管理システムです。ClickHouseはそのようなシステムの典型的な例です。 - -カラム指向データベースの主な利点は以下の通りです: - -- 多くのカラムの中からいくつかのカラムのみを使用するクエリ。 -- 大量のデータに対する集約クエリ。 -- カラム単位のデータ圧縮。 - -以下は、レポートを作成する際の従来の行指向システムとカラム指向データベースの違いを示す図です: - -**従来の行指向** -従来の行指向データベース - -**カラム指向** -カラム指向データベース - -カラム指向データベースは、分析アプリケーションに最適な選択肢です。これは、必要な場合に多くのカラムをテーブルに持てる一方で、読み取りクエリの実行時間において未使用のカラムに対するコストを支払わなくて済むためです(従来のOLTPデータベースは、データが行として保存されているため、クエリ中にすべてのデータを読み取ります)。カラム指向データベースはビッグデータ処理やデータウェアハウジングのために設計されており、低コストのハードウェアの分散クラスターを使用してスケールすることが多く、スループットを向上させます。ClickHouseは、[分散](../../engines/table-engines/special/distributed.md)および[レプリケated](../../engines/table-engines/mergetree-family/replication.md)テーブルの組み合わせでこれを実現しています。 - -カラムデータベースの歴史や行指向データベースとの違い、カラムデータベースのユースケースについて詳しく知りたい場合は、[カラムデータベースガイド](https://clickhouse.com/engineering-resources/what-is-columnar-database)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md.hash deleted file mode 100644 index 87cf8c75250..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/columnar-database.md.hash +++ /dev/null @@ -1 +0,0 @@ -01baf6163d3274e0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md deleted file mode 100644 index c7872ca7bb3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: 'ClickHouseとは何を意味するのか?' -toc_hidden: true -toc_priority: 10 -slug: '/faq/general/dbms-naming' -description: '"ClickHouse"の意味について学びます。' ---- - - - - -# What Does "ClickHouse" Mean? {#what-does-clickhouse-mean} - -それは "**Click**stream" と "Data ware**House**" の組み合わせです。これはYandex.Metricaでの元々のユースケースに由来しており、ClickHouseはインターネット全体の人々によるすべてのクリックの記録を保持することを目的としていましたし、現在でもその役割を果たしています。このユースケースについては [ClickHouse history](../../about-us/history.md) ページでさらに読むことができます。 - -この二つの部分の意味には二つの結果があります。 - -- Click**H**ouse を正しく書く唯一の方法は、大文字のHを使うことです。 -- 短縮する必要がある場合は、**CH** を使用してください。歴史的な理由から、中国ではCKという略称も人気があります。これは、最初のクリックハウスに関する中国語の講演の一つがこの形を使用したためです。 - -:::info -ClickHouseがその名前を得た何年も後に、意味のある二つの単語を組み合わせるこのアプローチが、[Andy Pavloの研究](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html) においてデータベースの最適な命名方法として強調されました。彼はカーネギーメロン大学のデータベースの准教授です。ClickHouseは、「史上最も良いデータベース名」の賞をPostgresと共有しました。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md.hash deleted file mode 100644 index 3d9ab2e5053..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/dbms-naming.md.hash +++ /dev/null @@ -1 +0,0 @@ -3e23b89d13268e75 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md deleted file mode 100644 index a2d256f8a9d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -slug: '/faq/general/' -sidebar_position: 1 -sidebar_label: 'ClickHouseに関する一般的な質問' -keywords: -- 'clickhouse' -- 'faq' -- 'questions' -- 'what is' -title: 'ClickHouseに関する一般的な質問' -description: 'ClickHouseに関する一般的な質問を一覧表示するインデックスページ' ---- - - - - -# ClickHouseに関する一般的な質問 - -- [ClickHouseとは何ですか?](../../intro.md) -- [ClickHouseはなぜこんなに速いのですか?](../../concepts/why-clickhouse-is-so-fast.md) -- [誰がClickHouseを使用していますか?](../../faq/general/who-is-using-clickhouse.md) -- [「ClickHouse」とは何を意味しますか?](../../faq/general/dbms-naming.md) -- [「Не тормозит」とは何を意味しますか?](../../faq/general/ne-tormozit.md) -- [OLAPとは何ですか?](../../faq/general/olap.md) -- [列指向データベースとは何ですか?](../../faq/general/columnar-database.md) -- [主キーをどのように選択しますか?](../../guides/best-practices/sparse-primary-indexes.md) -- [MapReduceのようなものを使用しない理由は何ですか?](../../faq/general/mapreduce.md) -- [どのようにClickHouseにコードを寄付できますか?](/knowledgebase/how-do-i-contribute-code-to-clickhouse) - -:::info 探しているものが見つかりませんか? -私たちの[ナレッジベース](/knowledgebase/)を確認し、ドキュメント内にある役立つ記事をブラウズしてみてください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md.hash deleted file mode 100644 index d1b4b8d2770..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -f86157879c4c5b42 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md deleted file mode 100644 index 6347211b281..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -slug: '/faq/general/mapreduce' -title: 'Why not use something like MapReduce?' -toc_hidden: true -toc_priority: 110 -description: 'This page explains why you would use ClickHouse over MapReduce' ---- - - - - -# なぜ MapReduce のようなものを使用しないのか? {#why-not-use-something-like-mapreduce} - -MapReduce のようなシステムは、分散ソートに基づいている reduce 操作を持つ分散コンピューティングシステムと見なすことができます。このクラスで最も一般的なオープンソースソリューションは [Apache Hadoop](http://hadoop.apache.org) です。 - -これらのシステムは、高いレイテンシのためオンラインクエリには適していません。言い換えれば、これらはウェブインターフェースのバックエンドとしては使用できません。このタイプのシステムは、リアルタイムデータの更新には役立ちません。分散ソートは、操作の結果とすべての中間結果(もしあるなら)が通常は単一のサーバーのRAMにあるオンラインクエリに対して、reduce 操作を行う最良の方法ではありません。このような場合、ハッシュテーブルが reduce 操作を行う最適な方法です。map-reduce タスクを最適化する一般的なアプローチは、RAM内のハッシュテーブルを使用した事前集約(部分的な reduce)です。この最適化はユーザーが手動で行います。分散ソートは、シンプルな map-reduce タスクを実行する際のパフォーマンスが低下する主な原因の一つです。 - -多くの MapReduce 実装は、クラスタ上で任意のコードを実行することを許可しています。しかし、宣言型クエリ言語は OLAP により適しており、実験を迅速に実行するのに有利です。たとえば、Hadoop には Hive と Pig があります。また、Spark 用の Cloudera Impala や(時代遅れの)Shark、Spark SQL、Presto、Apache Drill も考慮に入れるべきです。このようなタスクを実行する際のパフォーマンスは、専門のシステムと比較して非常にサブ最適ですが、比較的高いレイテンシにより、これらのシステムをウェブインターフェースのバックエンドとして使用することは現実的ではありません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md.hash deleted file mode 100644 index f24dfff6623..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/mapreduce.md.hash +++ /dev/null @@ -1 +0,0 @@ -5584f76610df5888 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md deleted file mode 100644 index 2b8610a9d2a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -slug: '/faq/general/ne-tormozit' -title: 'What does "не тормозит" mean?' -toc_hidden: true -toc_priority: 11 -description: 'This page explains what "Не тормозит" means' ---- - - - - -# "Не тормозит" の意味は何ですか? {#what-does-ne-tormozit-mean} - -ヴィンテージ(限定生産)ClickHouse Tシャツを見たときによく聞かれる質問です。これらのTシャツの前面には **"ClickHouse не тормозит"** と大きな太字で書かれています。 - -ClickHouseがオープンソースになる前は、大手ヨーロッパIT企業の[Yandex](https://yandex.com/company/)によって社内ストレージシステムとして開発されていました。そのため、最初のスローガンはキリル文字で「не тормозит」(発音は「ne tormozit」)として付けられました。オープンソース版がリリースされた後、初めて地元イベント用にそのTシャツをいくつか製作し、そのスローガンをそのまま使用するのは当然のことでした。 - -これらのTシャツの2回目のロットは国際イベントで配布される予定でしたが、スローガンの英語版を作成しようとしました。 -残念ながら、英語で印象的な同等の表現を見つけることができませんでした。元のフレーズは表現が洗練されている一方で簡潔であり、Tシャツのスペースの制約から、どの翻訳案も長すぎるか不正確に見えたため、十分な翻訳を考え出すことができませんでした。 -国際イベント用に製作されたTシャツでもスローガンをそのまま維持することに決めました。これは素晴らしい決定だったようで、世界中の人々は見たときに驚きと好奇心を持っていました。 - -それでは、どういう意味なのでしょうか? *"не тормозит"* の翻訳のいくつかの方法は次の通りです: - -- 直訳すると *"ClickHouseはブレーキペダルを踏まない"* のようになります。 -- 短くて正確さに欠ける翻訳としては、* "ClickHouseは遅くない" *、* "ClickHouseはラグがない" *、または単に * "ClickHouseは速い" * があります。 - -これらのTシャツを実際に見たことがない場合は、多くのClickHouse関連のビデオでオンラインでチェックすることができます。例えば、このビデオ: - -
- -
- -_P.S. これらのTシャツは販売されていません。これらは、一部の[ClickHouse Meetups](https://www.meetup.com/pro/clickhouse/)で、通常は最優秀質問やその他の形での積極的な参加へのギフトとして無料で配布されました。現在、これらのTシャツはもはや製作されておらず、非常に価値のあるコレクターアイテムとなっています。_ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md.hash deleted file mode 100644 index 13ab77b06ad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/ne-tormozit.md.hash +++ /dev/null @@ -1 +0,0 @@ -cf7381a74213c7eb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md deleted file mode 100644 index 8b6a233921a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -slug: '/faq/general/olap' -title: 'OLAPとは何ですか?' -toc_hidden: true -toc_priority: 100 -description: 'オンライン解析処理とは何かについての説明' ---- - - - - -# OLAPとは何か? {#what-is-olap} - -[OLAP](https://en.wikipedia.org/wiki/Online_analytical_processing)は、オンライン分析処理を意味します。これは、技術的な視点とビジネス的な視点の2つから見ることができる広範な用語です。しかし、非常に高いレベルで見ると、これらの言葉を逆に読むことができます。 - -処理 -: 一部のソースデータが処理されます... - -分析 -: ...分析レポートやインサイトを生成します... - -オンライン -: ...リアルタイムで。 - -## ビジネスの視点からのOLAP {#olap-from-the-business-perspective} - -近年、ビジネス界の人々はデータの価値に気づき始めました。盲目的に意思決定をする企業は、競争に追いつくことができないことが多いのです。成功した企業のデータ駆動型アプローチは、ビジネス意思決定に役立つかもしれないすべてのデータを収集し、それをタイムリーに分析するためのメカニズムを必要とします。ここでOLAPデータベース管理システム(DBMS)が登場します。 - -ビジネスの観点から、OLAPは企業が継続的に運営活動を計画、分析、報告することを可能にし、それによって効率を最大化し、コストを削減し、最終的には市場シェアを獲得します。これは、内部システムで行うか、ウェブ/モバイル分析サービス、CRMサービスなどのSaaSプロバイダーにアウトソースすることができます。OLAPは多くのBIアプリケーション(ビジネスインテリジェンス)の背後にある技術です。 - -ClickHouseは、ドメイン固有のデータを分析するためのこれらのSaaSソリューションのバックエンドとしてよく使用されるOLAPデータベース管理システムです。ただし、一部の企業は依然としてサードパーティプロバイダーとデータを共有することに躊躇しており、内部データウェアハウスのシナリオも有効です。 - -## 技術の視点からのOLAP {#olap-from-the-technical-perspective} - -すべてのデータベース管理システムは、OLAP(オンライン **分析** 処理)とOLTP(オンライン **トランザクション** 処理)の2つのグループに分類できます。前者は、大量の過去データに基づいてレポートを構築することに焦点を当てていますが、それを頻繁に行うわけではありません。一方、後者は通常、トランザクションの継続的なストリームを処理し、データの現在の状態を常に変更します。 - -実際には、OLAPとOLTPはカテゴリーではなく、むしろスペクトルのようなものです。ほとんどの実際のシステムは通常、それらのどちらかに焦点を当てていますが、反対の種類のワークロードが必要な場合には解決策や回避策を提供します。この状況は、企業が統合された複数のストレージシステムを運用せざるを得なくなることが多く、特に大きな問題ではないかもしれませんが、より多くのシステムを持つことはメンテナンスのコストを高くすることになります。したがって、最近のトレンドはHTAP(**ハイブリッドトランザクショナル/分析処理**)であり、両方のワークロードが単一のデータベース管理システムによって同様に適切に処理されます。 - -DBMSが純粋なOLAPまたは純粋なOLTPとして開始された場合でも、競争に追いつくためにHTAPの方向に移行せざるを得ません。ClickHouseも例外ではなく、初めは[可能な限り高速なOLAPシステム](../../concepts/why-clickhouse-is-so-fast.md)として設計されており、まだ完全なトランザクションサポートは持っていませんが、一貫した読み取り/書き込みおよびデータの更新/削除のための変異などのいくつかの機能を追加する必要がありました。 - -OLAPとOLTPシステムの間の根本的なトレードオフは以下の通りです: - -- 効率的に分析レポートを構築するには、カラムを別々に読み取ることが重要です。そのため、ほとんどのOLAPデータベースは[列指向](../../faq/general/columnar-database.md)です。 -- 一方、カラムを別々に保存することは、行に対する操作のコストを、カラムの数に比例して増加させます(システムが場合に備えてイベントのすべての詳細を収集しようとした場合は、巨大になる可能性があります)。したがって、ほとんどのOLTPシステムは行によってデータを配置しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md.hash deleted file mode 100644 index b3c0c95208a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/olap.md.hash +++ /dev/null @@ -1 +0,0 @@ -a9d3b7ac728bd8d4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md deleted file mode 100644 index 8aafee5ec69..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -slug: '/faq/general/who-is-using-clickhouse' -title: 'Who is using ClickHouse?' -toc_hidden: true -toc_priority: 9 -description: 'Describes who is using ClickHouse' ---- - - - - -# Who is using ClickHouse? {#who-is-using-clickhouse} - -オープンソース製品であるため、この質問に対する答えは簡単ではありません。ClickHouseの使用を開始したい場合、誰にでもそのことを伝える必要はなく、ソースコードやプリコンパイルされたパッケージを入手するだけです。契約書にサインする必要はなく、[Apache 2.0ライセンス](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE)により、制約のないソフトウェア配布が可能です。 - -また、技術スタックはしばしばNDAの範囲内のグレーゾーンにあります。いくつかの企業は、オープンソースであっても使用する技術を競争上の優位性と見なしており、従業員が公に詳細を共有することを許可していません。一部はPRリスクを考慮し、従業員がPR部門の承認を得てのみ実装の詳細を共有することを許可します。 - -では、ClickHouseを使用している人をどのように特定すればよいでしょうか? - -一つの方法は、**周りに聞いてみる**ことです。書面になっていない場合、人々は自社で使用している技術、ユースケース、使用しているハードウェア、データ量などについて非常に話しやすくなります。私たちは世界中の[ClickHouse Meetups](https://www.youtube.com/channel/UChtmrD-dsdpspr42P_PyRAw/playlists)で定期的にユーザーと話し、ClickHouseを使用している1000以上の企業についての話を聞いてきました。残念ながら、それは再現可能ではないため、私たちはそのような話をNDAの下で語られたかのように扱うよう努めています。しかし、今後のミートアップに参加して他のユーザーと直接話すことができます。ミートアップの告知方法は複数あります。たとえば、[私たちのTwitter](http://twitter.com/ClickHouseDB/) をフォローすることができます。 - -二つ目の方法は、**公に**ClickHouseを使用していると言っている企業を探すことです。これはより実質的で、通常はブログ記事、トークのビデオ録画、スライドデッキなどの確かな証拠があります。私たちはそのような証拠へのリンクを**[Adopters](../../about-us/adopters.md)**ページに集めています。あなたの雇用主のストーリーや偶然見つけたリンクを自由に寄稿してください(ただし、NDAに違反しないように注意してください)。 - -アダプターリストには、Bloomberg、Cisco、China Telecom、Tencent、Lyftなどの非常に大きな企業の名前を見ることができますが、最初のアプローチでは、さらに多くの企業があることがわかりました。たとえば、[Forbesによる2020年の世界の大手IT企業のリスト](https://www.forbes.com/sites/hanktucker/2020/05/13/worlds-largest-technology-companies-2020-apple-stays-on-top-zoom-and-uber-debut/)を見ると、その半数以上が何らかの形でClickHouseを使用しています。また、ClickHouseを2016年に初めてオープンソース化した企業であり、ヨーロッパで最大のIT企業の一つである[Yandex](../../about-us/history.md)に触れないのは不公平です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md.hash deleted file mode 100644 index 46d212d2778..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/general/who-is-using-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -4687d1cf5018471c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md deleted file mode 100644 index d1f877a9eb0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -slug: '/concepts/faq' -title: 'FAQ' -description: 'Landing page for FAQ' -pagination_prev: null -pagination_next: null ---- - - - -| Page | Description | -|---------------------------------------------------------------|----------------------------------------------------------------------------------------| -| [ClickHouseに関する一般的な質問](general/index.md) | ClickHouseに関してよく受ける一般的な質問です。 | -| [MapReduceのようなものを使わないのはなぜですか?](general/mapreduce.md) | OLAPシナリオにMapReduce実装が適していない理由の解説です。 | -| [「не тормозит」は何を意味しますか](general/ne-tormozit.md) | ClickHouseのTシャツで見たかもしれない「не тормозит」の意味に関する解説です。 | -| [OLAPとは何ですか](general/olap.md) | オンライン分析処理(OLAP)とは何かに関する解説です。 | -| [ClickHouseを使用しているのは誰ですか](general/who-is-using-clickhouse.md) | ClickHouseを使用している人々について学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md.hash deleted file mode 100644 index 326ab82995f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -2205140d0773c473 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/_category_.yml deleted file mode 100644 index 28e8a6f6134..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/_category_.yml +++ /dev/null @@ -1,4 +0,0 @@ -position: 20 -label: 'Integration' -collapsible: true -collapsed: true diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md deleted file mode 100644 index 81a83b1980a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -slug: '/faq/integration/' -sidebar_position: 1 -sidebar_label: '他のシステムとのClickHouse統合' -keywords: -- 'clickhouse' -- 'faq' -- 'questions' -- 'integrations' -title: 'ClickHouseと他のシステムの統合に関する質問' -description: 'ClickHouseを他のシステムと統合する関連質問をリストアップしたページ' ---- - - - - -# ClickHouseと他のシステムの統合に関する質問 - -- [ClickHouseからファイルにデータをエクスポートするには?](/knowledgebase/file-export) -- [JSONをClickHouseにインポートする方法は?](/integrations/data-ingestion/data-formats/json/intro.md) -- [KafkaをClickHouseに接続するには?](/integrations/data-ingestion/kafka/index.md) -- [JavaアプリケーションをClickHouseに接続できますか?](/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md) -- [ClickHouseはMySQLのテーブルを読み取ることができますか?](/integrations/data-ingestion/dbms/mysql/index.md) -- [ClickHouseはPostgreSQLのテーブルを読み取ることができますか?](/integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql.md) -- [ODBC経由でOracleに接続するときにエンコーディングに問題がある場合は?](/faq/integration/oracle-odbc.md) - -:::info 探している内容が見つかりませんか? -私たちの[ナレッジベース](/knowledgebase/)をご覧ください。また、ここにある多くの役立つ記事もぜひご覧ください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md.hash deleted file mode 100644 index bb1c453c3ba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -b4f55d342e4cafcb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md deleted file mode 100644 index d10e1eb9c81..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -slug: '/faq/integration/json-import' -title: 'ClickHouseへのJSONインポート方法' -toc_hidden: true -toc_priority: 11 -description: 'このページでは、JSONをClickHouseにインポートする方法について説明します。' ---- - - - - -# How to Import JSON Into ClickHouse? {#how-to-import-json-into-clickhouse} - -ClickHouse は、入力と出力のための幅広い [データフォーマット](../../interfaces/formats.md) をサポートしています。その中には複数の JSON バリエーションがありますが、データの取り込みに最も一般的に使用されるのは [JSONEachRow](../../interfaces/formats.md#jsoneachrow) です。これは、各行ごとに 1 つの JSON オブジェクトを期待し、各オブジェクトは改行で区切られる必要があります。 - -## Examples {#examples} - -[HTTP インターフェース](../../interfaces/http.md)を使用する場合: - -``` bash -$ echo '{"foo":"bar"}' | curl 'http://localhost:8123/?query=INSERT%20INTO%20test%20FORMAT%20JSONEachRow' --data-binary @- -``` - -[CLI インターフェース](../../interfaces/cli.md)を使用する場合: - -``` bash -$ echo '{"foo":"bar"}' | clickhouse-client --query="INSERT INTO test FORMAT JSONEachRow" -``` - -データを手動で挿入する代わりに、[統合ツール](../../integrations/index.mdx) を使用することを検討しても良いでしょう。 - -## Useful Settings {#useful-settings} - -- `input_format_skip_unknown_fields` は、テーブルスキーマに存在しない追加のフィールドがあっても JSON を挿入することを可能にします(それらを破棄します)。 -- `input_format_import_nested_json` は、[Nested](../../sql-reference/data-types/nested-data-structures/index.md) タイプのカラムにネストされた JSON オブジェクトを挿入することを可能にします。 - -:::note -設定は、HTTP インターフェースの `GET` パラメータとして指定するか、`CLI` インターフェースのために `--` で始まる追加のコマンドライン引数として指定されます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md.hash deleted file mode 100644 index 691d710674d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/json-import.md.hash +++ /dev/null @@ -1 +0,0 @@ -188b5a9c939c0bca diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md deleted file mode 100644 index 96241cdff9b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -slug: '/faq/integration/oracle-odbc' -title: 'Oracleを使用する際にODBC経由でエンコードに問題が発生した場合はどうすればよいですか?' -toc_hidden: true -toc_priority: 20 -description: 'このページでは、Oracleを使用する際にODBC経由でエンコーディングに問題が発生した場合の対処方法についてのガイダンスを提供します。' ---- - - - - -# Oracle ODBCを使用しているときのエンコーディングに関する問題がある場合はどうすればよいですか? {#oracle-odbc-encodings} - -Oracle ODBCドライバを介してClickHouseの外部ディクショナリのソースとしてOracleを使用する場合、 `/etc/default/clickhouse` にある `NLS_LANG` 環境変数に正しい値を設定する必要があります。詳細については、[Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html)を参照してください。 - -**例** - -``` sql -NLS_LANG=RUSSIAN_RUSSIA.UTF8 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md.hash deleted file mode 100644 index f66d464e2e6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/integration/oracle-odbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -3a20b44372d4174e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/_category_.yml deleted file mode 100644 index b1843465799..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/_category_.yml +++ /dev/null @@ -1,4 +0,0 @@ -position: 30 -label: 'Operations' -collapsible: true -collapsed: true diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md deleted file mode 100644 index de355028f88..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -slug: '/faq/operations/delete-old-data' -title: 'ClickHouseテーブルから古いレコードを削除することは可能ですか?' -toc_hidden: true -toc_priority: 20 -description: 'このページでは、ClickHouseテーブルから古いレコードを削除することが可能かどうかについて説明します。' ---- - - - - -# 古いレコードを ClickHouse テーブルから削除することは可能ですか? {#is-it-possible-to-delete-old-records-from-a-clickhouse-table} - -短い答えは「はい」です。ClickHouse には、古いデータを削除してディスクスペースを解放する複数のメカニズムがあります。それぞれのメカニズムは異なるシナリオを対象としています。 - -## TTL {#ttl} - -ClickHouse は、特定の条件が発生したときに自動的に値を削除することを許可します。この条件は、通常は任意のタイムスタンプカラムに対して静的オフセットとして設定された式に基づいて構成されます。 - -このアプローチの主な利点は、TTL が構成された後、データの削除がバックグラウンドで自動的に行われるため、トリガー用の外部システムを必要としないことです。 - -:::note -TTL は、データを [/dev/null](https://en.wikipedia.org/wiki/Null_device) に移動するだけでなく、SSD から HDD などの異なるストレージシステム間で移動するためにも使用できます。 -::: - -[TTL の構成に関する詳細](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)。 - -## DELETE FROM {#delete-from} - -[DELETE FROM](/sql-reference/statements/delete.md) は、ClickHouse で標準の DELETE クエリを実行できるようにします。フィルター句で指定された行は削除されたとしてマークされ、将来の結果セットから削除されます。行のクリーンアップは非同期で行われます。 - -:::note -DELETE FROM は、バージョン 23.3 以降から一般的に利用可能です。古いバージョンでは、実験的であり、次のように有効にする必要があります: -```sql -SET allow_experimental_lightweight_delete = true; -``` -::: - -## ALTER DELETE {#alter-delete} - -ALTER DELETE は、非同期のバッチ操作を使用して行を削除します。DELETE FROM とは異なり、ALTER DELETE の後、およびバッチ操作が完了する前に実行されたクエリには、削除対象の行が含まれます。詳細については、[ALTER DELETE](/sql-reference/statements/alter/delete.md) ドキュメントを参照してください。 - -`ALTER DELETE` は、古いデータを柔軟に削除するために発行できます。定期的に削除する必要がある場合、主な欠点はクエリを送信するために外部システムを持つ必要があることです。また、単一の行を削除するだけでも、変更によって完全なパーツが再書き込みされるため、パフォーマンス上の考慮点もあります。 - -これは、ClickHouse ベースのシステムを [GDPR](https://gdpr-info.eu) 準拠にするための最も一般的なアプローチです。 - -[変更](/sql-reference/statements/alter#mutations) に関する詳細。 - -## DROP PARTITION {#drop-partition} - -`ALTER TABLE ... DROP PARTITION` は、全体のパーティションを削除するコスト効率の良い方法を提供します。それほど柔軟ではなく、テーブル作成時に適切なパーティショニングスキームを設定する必要がありますが、一般的なケースのほとんどをカバーしています。定期的な使用のためには、外部システムから実行する必要があります。 - -[パーティションの操作に関する詳細](/sql-reference/statements/alter/partition)。 - -## TRUNCATE {#truncate} - -テーブルからすべてのデータを削除するのはかなり過激ですが、場合によっては正にそれが必要な場合があります。 - -[テーブルのトランケートに関する詳細](/sql-reference/statements/truncate.md)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md.hash deleted file mode 100644 index e739fd96c23..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/delete-old-data.md.hash +++ /dev/null @@ -1 +0,0 @@ -066dd351201aa717 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md deleted file mode 100644 index a4386031133..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -slug: '/faq/operations/' -sidebar_position: 3 -sidebar_label: 'ClickHouseサーバーとクラスターの運用に関する質問' -title: 'ClickHouseサーバーとクラスターの運用に関する質問' -description: 'ClickHouseサーバーとクラスターの運用に関する質問のランディングページ' ---- - - - - -# ClickHouseサーバーとクラスターの運用に関する質問 - -- [本番環境で使用すべきClickHouseのバージョンはどれですか?](/faq/operations/production.md) -- [ストレージと計算を分けてClickHouseをデプロイすることは可能ですか?](/faq/operations/separate_storage.md) -- [ClickHouseテーブルから古いレコードを削除することは可能ですか?](/faq/operations/delete-old-data.md) -- [ClickHouse Keeperを設定するにはどうすればよいですか?](/guides/sre/keeper/index.md) -- [ClickHouseはLDAPと統合できますか?](/guides/sre/user-management/configuring-ldap.md) -- [ClickHouseでユーザー、ロール、権限を設定するにはどうすればよいですか?](/guides/sre/user-management/index.md) -- [ClickHouseで行を更新または削除できますか?](/guides/developer/mutations.md) -- [ClickHouseはマルチリージョンレプリケーションをサポートしていますか?](/faq/operations/multi-region-replication.md) - -:::info 探しているものが見つかりませんか? -私たちの[ナレッジベース](/knowledgebase/)をチェックし、ここにある多くの役立つ記事も参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md.hash deleted file mode 100644 index e4ebb7249a0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -f2fcb77e75e8680d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md deleted file mode 100644 index a521d360b41..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -slug: '/faq/operations/multi-region-replication' -title: 'ClickHouseはマルチリージョンレプリケーションをサポートしていますか?' -toc_hidden: true -toc_priority: 30 -description: 'このページは、ClickHouseがマルチリージョンレプリケーションをサポートしているかどうかについて回答します。' ---- - - - - -# ClickHouseはマルチリージョンレプリケーションをサポートしていますか? {#does-clickhouse-support-multi-region-replication} - -短い答えは「はい」です。しかし、すべてのリージョン/データセンター間のレイテンシは二桁の範囲に保つことをお勧めします。そうしないと、分散合意プロトコルを通るため、書き込みパフォーマンスが低下します。例えば、アメリカの海岸間でのレプリケーションは問題なく機能するでしょうが、アメリカとヨーロッパ間ではうまくいかないでしょう。 - -構成に関しては、単一リージョンのレプリケーションと違いはなく、単に異なる場所にあるホストをレプリカに使用します。 - -詳細については、[データレプリケーションに関する完全な記事](../../engines/table-engines/mergetree-family/replication.md)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md.hash deleted file mode 100644 index e571b4fbb44..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/multi-region-replication.md.hash +++ /dev/null @@ -1 +0,0 @@ -2dc91b204d1d01d5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md deleted file mode 100644 index 6d795145290..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -slug: '/faq/operations/production' -title: 'プロダクションで使用するClickHouseバージョンは?' -toc_hidden: true -toc_priority: 10 -description: 'このページでは、プロダクションで使用するClickHouseバージョンについてのガイダンスを提供します' ---- - - - - -# どの ClickHouse バージョンを本番環境で使用するべきですか? {#which-clickhouse-version-to-use-in-production} - -まず最初に、なぜ人々がこの質問をするのかについて話しましょう。主に二つの理由があります。 - -1. ClickHouse は非常に高い速度で開発されており、通常、年間に 10 回以上の安定版リリースがあります。これは選択肢が非常に多くなることを意味し、決して trivial な選択ではありません。 -2. 一部のユーザーは、自分のユースケースに最適なバージョンを見つけるために時間をかけたくなく、他の誰かのアドバイスに従うことを望んでいます。 - -二つ目の理由はより根本的なものであるため、まずそれについて説明し、その後さまざまな ClickHouse リリースのナビゲーションに戻ります。 - -## どの ClickHouse バージョンを推奨しますか? {#which-clickhouse-version-do-you-recommend} - -コンサルタントを雇ったり、信頼できる専門家に頼ったりすることは魅力的です。そうすることで、あなたの本番環境に対する責任を放棄することができます。誰かが推奨する特定の ClickHouse バージョンをインストールします。そのバージョンに問題があった場合も、それはあなたの責任ではなく、誰か他の人のものです。この推論は大きな罠です。外部の誰もが、あなたの会社の本番環境で何が起こっているかをあなたよりもよく知っているわけではありません。 - -それでは、どのようにして適切に ClickHouse のバージョンを選ぶのでしょうか? また、どのようにして最初の ClickHouse バージョンを選べばよいのでしょうか? まず第一に、**現実的なプリプロダクション環境**の構築に投資する必要があります。理想的な世界では、完全に同一なシャドーコピーを作成できますが、通常は高価です。 - -低コストで現実に近いプリプロダクション環境を得るための重要なポイントは次の通りです。 - -- プリプロダクション環境では、本番で実行する予定のクエリセットにできるだけ近いクエリを実行する必要があります: - - 冷凍データを使った読み取り専用にしないでください。 - - 単にデータをコピーする書き込み専用にはしないで、典型的なレポートを作成するべきです。 - - スキーマのマイグレーションを適用する代わりに、クリーンな状態にしないでください。 -- 実際の本番データとクエリのサンプルを使用してください。選択するサンプルは代表的であり、`SELECT` クエリが合理的な結果を返すようにします。データが機密情報であり、内部ポリシーが本番環境からのデータの流出を許可していない場合は、データを難読化してください。 -- プリプロダクションも本番環境と同様に、監視およびアラートソフトウェアでカバーされていることを確認してください。 -- 本番が複数のデータセンターや地域にまたがっている場合は、プリプロダクションも同じようにします。 -- 本番でレプリケーション、分散テーブル、カスケード マテリアライズド ビューなどの複雑な機能を使用している場合は、プリプロダクションでも同様に設定してください。 -- プリプロダクションで本番とほぼ同じ数のサーバーまたは VM を使用するか、小さいサイズで同じ数のサーバーを使用するかのトレードオフがあります。最初のオプションは追加のネットワーク関連の問題をキャッチする可能性がありますが、後者は管理が容易です。 - -次に投資すべき分野は、**自動テストインフラストラクチャ**です。ある種のクエリが一度成功裏に実行されたからといって、それが永遠に成功するとは限りません。ClickHouse がモックされた状態でのユニットテストを持つのは問題ありませんが、製品が実際の ClickHouse に対して実行され、すべての重要なユースケースが期待通りに動作していることを確認する合理的な自動テストのセットを持っていることを確認してください。 - -一歩進んだステップとして、[ClickHouse のオープンソーステストインフラストラクチャ](https://github.com/ClickHouse/ClickHouse/tree/master/tests) にその自動テストを貢献することが考えられます。このインフラは ClickHouse の日常的な開発で継続的に使用されています。どのように実行するかを学ぶためには追加の時間と労力が必要です[どうやって実行するか](../../development/tests.md)を確認し、次にそのテストをこのフレームワークに適応させる方法を学ぶ必要がありますが、ClickHouse のリリースが安定版として発表されるときにはすでにそれらのテストを通過していることを確認できるため、問題を報告した後に時間を無駄にし、バグ修正を実施してもらうのを待つのではなく、価値があります。一部の企業では、内部ポリシーとしてこのようなテストの貢献を持つことが一般的です(Google では [Beyonce の法則](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well) と呼ばれています)。 - -プリプロダクション環境とテストインフラストラクチャを整備したら、最適なバージョンを選択するのは簡単です: - -1. 定期的に新しい ClickHouse リリースに対して自動テストを実行します。`testing` とマークされた ClickHouse リリースに対しても行うことができますが、それらに対して次のステップに進むことは推奨されません。 -2. テストに合格した ClickHouse リリースをプリプロダクションにデプロイし、すべてのプロセスが期待通りに機能していることを確認します。 -3. 発見した問題を [ClickHouse GitHub Issues](https://github.com/ClickHouse/ClickHouse/issues) に報告します。 -4. 重大な問題がなければ、ClickHouse リリースを本番環境にデプロイし始めるのが安全です。 [カナリアリリース](https://martinfowler.com/bliki/CanaryRelease.html) や [青緑デプロイメント](https://martinfowler.com/bliki/BlueGreenDeployment.html) に類似したアプローチを実装する段階的リリースの自動化に投資することで、本番環境での問題のリスクをさらに軽減できます。 - -ご覧のとおり、上記のアプローチには ClickHouse に特有のものは何もありません。人々は、本番環境を真剣に考慮するならば、自分たちが依存するインフラのためにそれを実行します。 - -## ClickHouse リリースの選び方 {#how-to-choose-between-clickhouse-releases} - -ClickHouse パッケージリポジトリの内容を調べると、二種類のパッケージがあることがわかります。 - -1. `stable` -2. `lts` (長期サポート) - -それらの間で選択する方法についてのガイダンスは次のとおりです。 - -- `stable` はデフォルトで推奨されるパッケージの種類です。これらはおおよそ月ごとにリリースされ(したがって新機能を合理的な遅延で提供し)、最新の三つの安定版リリースは診断やバグ修正のバックポートに関してサポートされています。 -- `lts` は年に二回リリースされ、初回リリースから一年間サポートされます。以下の場合には `lts` を `stable` より優先することがあるかもしれません: - - あなたの会社に頻繁なアップグレードや非 LTS ソフトウェアの使用を許可しない内部ポリシーがある。 - - あなたが ClickHouse を、複雑な ClickHouse 機能を必要としないか、更新を維持するための十分なリソースがない他の二次製品で使用している。 - -最初は `lts` の方が良いと考えているチームも、最終的には自分たちの製品にとって重要な最近の機能のために `stable` に切り替えることが多いです。 - -:::tip -ClickHouse をアップグレードするときに考慮すべきもう一つのことは、リリース間の互換性を常に確認しているものの、合理的に維持できない場合や、一部の詳細が変更されることがあるということです。アップグレードする前に、[変更履歴]( /whats-new/changelog/index.md) を確認して、後方互換性のない変更についての注記がないかを確認してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md.hash deleted file mode 100644 index 00ff00a7cef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/production.md.hash +++ /dev/null @@ -1 +0,0 @@ -f26411440da6af28 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md deleted file mode 100644 index 76a5b560ed6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -slug: '/faq/operations/deploy-separate-storage-and-compute' -title: 'ClickHouseのストレージと計算を別々に展開することは可能ですか?' -sidebar_label: 'ClickHouseのストレージと計算を別々に展開することは可能ですか?' -toc_hidden: true -toc_priority: 20 -description: 'このページでは、ClickHouseをストレージと計算を別々に展開することが可能かどうかについて回答しています。' ---- - - - -短い答えは「はい」です。 - -オブジェクトストレージ(S3、GCS)は、ClickHouse テーブル内のデータのための弾力的な主ストレージバックエンドとして使用できます。[S3-backed MergeTree](/integrations/data-ingestion/s3/index.md)および[GCS-backed MergeTree](/integrations/data-ingestion/gcs/index.md) ガイドが公開されています。この構成では、メタデータのみが計算ノードにローカルに保存されます。このセットアップでは、追加のノードがメタデータをレプリケートする必要があるため、コンピューティングリソースを簡単に拡張および縮小できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md.hash deleted file mode 100644 index 45ee44ba318..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/operations/separate_storage.md.hash +++ /dev/null @@ -1 +0,0 @@ -4c5aff4db36a77b2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md deleted file mode 100644 index d5e70be086e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'トラブルシューティング' -slug: '/faq/troubleshooting' -description: '一般的な ClickHouse Cloud エラーメッセージのトラブルシューティング方法。' ---- - - - -## ClickHouse Cloud トラブルシューティング {#clickhouse-cloud-troubleshooting} - -### ClickHouse Cloud サービスにアクセスできない {#unable-to-access-a-clickhouse-cloud-service} - -以下のようなエラーメッセージが表示される場合、IPアクセスリストがアクセスを拒否している可能性があります: - -```response -curl: (35) error:02FFF036:system library:func(4095):Connection reset by peer -``` -または -```response -curl: (35) LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to HOSTNAME.clickhouse.cloud:8443 -``` -または -```response -Code: 210. DB::NetException: SSL connection unexpectedly closed (e46453teek.us-east-2.aws.clickhouse-staging.com:9440). (NETWORK_ERROR) -``` - -[IPアクセスリスト](/cloud/security/setting-ip-filters)を確認してください。許可されたリストの外から接続を試みている場合、接続は失敗します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md.hash deleted file mode 100644 index 2221ba0db80..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/troubleshooting.md.hash +++ /dev/null @@ -1 +0,0 @@ -b827a3738b382499 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/_category_.yml deleted file mode 100644 index 4c0bfa72940..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/_category_.yml +++ /dev/null @@ -1,4 +0,0 @@ -position: 40 -label: 'Use Cases' -collapsible: true -collapsed: true diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md deleted file mode 100644 index 48e556286bd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -slug: '/faq/use-cases/' -sidebar_position: 2 -sidebar_label: 'ClickHouseのユースケースに関する質問' -title: 'ClickHouseのユースケースについての質問' -description: 'ClickHouseのユースケースに関する一般的な質問をリストアップしたランディングページ' ---- - - - - -# ClickHouseのユースケースに関する質問 - -- [ClickHouseを時系列データベースとして使用できますか?](/knowledgebase/time-series) -- [ClickHouseをキー-バリューストレージとして使用できますか?](/knowledgebase/key-value) - -:::info 探しているものが見つかりませんか? -私たちの[ナレッジベース](/knowledgebase/)をチェックし、ドキュメント内の多くの役立つ記事もご覧ください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md.hash deleted file mode 100644 index d877f5e89bc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -64b2b3caa97c9529 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md deleted file mode 100644 index 51277d6554d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -slug: '/faq/use-cases/key-value' -title: 'ClickHouseをキー値ストレージとして使用できますか?' -toc_hidden: true -toc_priority: 101 -description: 'ClickHouseをキー値ストレージとして使用できるかどうかについてのよくある質問に答えます。' ---- - - - - -# ClickHouseをキー・バリュー・ストレージとして使用できますか? {#can-i-use-clickhouse-as-a-key-value-storage} - -短い答えは**「いいえ」**です。キー・バリューのワークロードは、ClickHouseを使用しないべきケースのリストの中でもトップの位置にあります。結局のところ、ClickHouseは[OLAP](../../faq/general/olap.md)システムであり、優れたキー・バリュー・ストレージシステムは他にも多く存在します。 - -しかし、キー・バリューのようなクエリにClickHouseを使用することが理にかなう状況もあるかもしれません。通常、これは主に分析的な性質のワークロードがあり、ClickHouseに適している低予算の製品の一部であり、しかしながらリクエストスループットがそれほど高くなく、厳しいレイテンシーの要件がないキー・バリューのパターンを必要とする二次プロセスがあります。もし予算が無限であったなら、その二次ワークロードのために別のキー・バリュー・データベースを設置していたでしょうが、実際には、もう一つのストレージシステム(監視、バックアップなど)を維持するための追加コストがあるため、それを避けたい場合があります。 - -推奨に反してClickHouseに対してキー・バリューのようなクエリを実行することを決めた場合、以下のヒントがあります: - -- ClickHouseにおけるポイントクエリが高価である主要な理由は、その主な[MergeTreeテーブルエンジンファミリー](../..//engines/table-engines/mergetree-family/mergetree.md)のスパース主インデックスです。このインデックスは、特定のデータ行を指し示すことができず、代わりに各N番目の行を指し示し、システムは隣接するN番目の行から目的の行にスキャンしなければならず、その過程で過剰なデータを読み込む必要があります。キー・バリューのシナリオでは、`index_granularity`設定を使用してNの値を減らすことが有用かもしれません。 -- ClickHouseは各カラムを別々のファイルセットに保持するため、完全な1行を構成するためには各ファイルを通過する必要があります。カラムの数は、カラムの数に応じて線形に増加するため、キー・バリューのシナリオでは、多くのカラムの使用を避け、すべてのペイロードをJSON、Protobuf、またはそれが意味のあるものである何らかのシリアライズ形式でエンコードされた単一の`String`カラムに置くことが価値があるかもしれません。 -- 正常な`MergeTree`テーブルの代わりに[Join](../../engines/table-engines/special/join.md)テーブルエンジンを使用し、データを取得するために[joinGet](../../sql-reference/functions/other-functions.md#joinget)関数を使用する代替アプローチもあります。これにより、クエリのパフォーマンスが向上する可能性がありますが、一部の使いやすさや信頼性の問題があるかもしれません。こちらが[使用例](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00800_versatile_storage_join.sql#L49-L51)です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md.hash deleted file mode 100644 index 7a03360f0b4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/key-value.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ae3589b050333f1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md deleted file mode 100644 index a411d00c3f5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -slug: '/faq/use-cases/time-series' -title: 'ClickHouseを時系列データベースとして使用することは可能ですか?' -toc_hidden: true -toc_priority: 101 -description: 'ClickHouseを時系列データベースとして使用する方法について説明するページ' ---- - - - - -# Can I Use ClickHouse As a Time-Series Database? {#can-i-use-clickhouse-as-a-time-series-database} - -_Note: Please see the blog [Working with Time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) for additional examples of using ClickHouse for time series analysis._ - -ClickHouseは、[OLAP](../../faq/general/olap.md) ワークロード用の汎用データストレージソリューションですが、多くの専門の[時系列データベース管理システム](https://clickhouse.com/engineering-resources/what-is-time-series-database)も存在します。それにもかかわらず、ClickHouseの[クエリ実行速度の重視](../../concepts/why-clickhouse-is-so-fast.md) により、専門のシステムを上回るパフォーマンスを発揮することが多いです。このトピックに関しては、多くの独立したベンチマークが存在するため、ここで実施することはありません。代わりに、そのユースケースに重要なClickHouseの機能に焦点を当てましょう。 - -まず第一に、典型的な時系列データを処理するための**[専門的なコーデック](../../sql-reference/statements/create/table.md#specialized-codecs)**があります。`DoubleDelta`や`Gorilla`のような一般的なアルゴリズム、またはClickHouse専用の`T64`などです。 - -第二に、時系列クエリはしばしば最近のデータ、例えば1日または1週間前のデータにのみアクセスします。高速なNVMe/SSDドライブと大容量のHDDドライブの両方を兼ね備えたサーバーを使用することが理にかなっています。ClickHouseの[TTL](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl)機能を使用すると、新鮮なホットデータを高速ドライブに保持し、データが古くなるにつれて徐々に遅いドライブに移動できます。要件が求める場合、さらに古いデータのロールアップや削除も可能です。 - -生データをストレージして処理するというClickHouseの哲学に反しますが、[マテリアライズドビュー](../../sql-reference/statements/create/view.md)を使用して、より厳しいレイテンシやコストの要件に適合させることができます。 - -## Related Content {#related-content} - -- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md.hash deleted file mode 100644 index 6620aff808f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/faq/use-cases/time-series.md.hash +++ /dev/null @@ -1 +0,0 @@ -f6ce215c6389fe00 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/fast-release-24-2.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/fast-release-24-2.md.hash deleted file mode 100644 index 9e434f34a33..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/fast-release-24-2.md.hash +++ /dev/null @@ -1 +0,0 @@ -1c6bcbad89365eef diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/_category_.yml deleted file mode 100644 index 4fcef013158..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 1 -label: 'Getting Started' -collapsible: true -collapsed: true -link: - type: generated-index - title: Getting Started - slug: /getting-started diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md deleted file mode 100644 index b4cd754f664..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -description: 'Over 150M customer reviews of Amazon products' -sidebar_label: 'Amazon customer reviews' -slug: '/getting-started/example-datasets/amazon-reviews' -title: 'Amazon Customer Review' ---- - - - -This dataset contains over 150M customer reviews of Amazon products. The data is in snappy-compressed Parquet files in AWS S3 that total 49GB in size (compressed). Let's walk through the steps to insert it into ClickHouse. - -:::note -The queries below were executed on a **Production** instance of ClickHouse Cloud. For more information see -["Playground specifications"](/getting-started/playground#specifications). -::: - -## データセットの読み込み {#loading-the-dataset} - -1. ClickHouseにデータを挿入せずに、その場でクエリを実行できます。いくつかの行を取得して、どのようなものか見てみましょう: - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_2015.snappy.parquet') -LIMIT 3 -``` - -行は次のようになります: - -```response -Row 1: -────── -review_date: 16462 -marketplace: US -customer_id: 25444946 -- 25.44百万 -review_id: R146L9MMZYG0WA -product_id: B00NV85102 -product_parent: 908181913 -- 908.18百万 -product_title: XIKEZAN iPhone 6 Plus 5.5インチ防水ケース、衝撃防止、防塵、防雪フルボディスキンケース、ハンドストラップ&ヘッドフォンアダプタ&キックスタンド付き -product_category: Wireless -star_rating: 4 -helpful_votes: 0 -total_votes: 0 -vine: false -verified_purchase: true -review_headline: ケースは頑丈で、私が望む通りに保護します -review_body: 防水部分は過信しません(下のゴムシールは私の神経を使ったので外しました)。でも、このケースは頑丈で、私が望む通りに保護します。 - -Row 2: -────── -review_date: 16462 -marketplace: US -customer_id: 1974568 -- 1.97百万 -review_id: R2LXDXT293LG1T -product_id: B00OTFZ23M -product_parent: 951208259 -- 951.21百万 -product_title: Season.C シカゴ・ブルズ マリリン・モンロー No.1 ハードバックケースカバー サムスンギャラクシーS5 i9600用 -product_category: Wireless -star_rating: 1 -helpful_votes: 0 -total_votes: 0 -vine: false -verified_purchase: true -review_headline: 一つ星 -review_body: ケースが電話に合わないので使えません。お金の無駄です! - -Row 3: -────── -review_date: 16462 -marketplace: US -customer_id: 24803564 -- 24.80百万 -review_id: R7K9U5OEIRJWR -product_id: B00LB8C4U4 -product_parent: 524588109 -- 524.59百万 -product_title: iPhone 5s ケース、BUDDIBOX [Shield] 薄型デュアルレイヤー保護ケース キックスタンド付き Apple iPhone 5および5s用 -product_category: Wireless -star_rating: 4 -helpful_votes: 0 -total_votes: 0 -vine: false -verified_purchase: true -review_headline: しかし全体的にこのケースはかなり頑丈で、電話を良く保護します -review_body: 最初は前面の部分を電話に固定するのが少し難しかったですが、全体的にこのケースはかなり頑丈で、電話を良く保護します。これは私が必要なことです。このケースを再度購入するつもりです。 -``` - -2. データをClickHouseに保存するために、新しい `MergeTree` テーブル `amazon_reviews` を定義しましょう: - -```sql -CREATE DATABASE amazon - -CREATE TABLE amazon.amazon_reviews -( - `review_date` Date, - `marketplace` LowCardinality(String), - `customer_id` UInt64, - `review_id` String, - `product_id` String, - `product_parent` UInt64, - `product_title` String, - `product_category` LowCardinality(String), - `star_rating` UInt8, - `helpful_votes` UInt32, - `total_votes` UInt32, - `vine` Bool, - `verified_purchase` Bool, - `review_headline` String, - `review_body` String, - PROJECTION helpful_votes - ( - SELECT * - ORDER BY helpful_votes - ) -) -ENGINE = MergeTree -ORDER BY (review_date, product_category) -``` - -3. 次の `INSERT` コマンドは、`s3Cluster` テーブル関数を使用しており、これによりクラスタのすべてのノードを使用して複数のS3ファイルを同時に処理できます。また、`https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_*.snappy.parquet` という名前で始まるファイルを挿入するためにワイルドカードも使用しています: - -```sql -INSERT INTO amazon.amazon_reviews SELECT * -FROM s3Cluster('default', -'https://datasets-documentation.s3.eu-west-3.amazonaws.com/amazon_reviews/amazon_reviews_*.snappy.parquet') -``` - -:::tip -ClickHouse Cloudでは、クラスタの名前は `default` です。 `default` をあなたのクラスタ名に変更するか、クラスタがない場合は `s3Cluster` の代わりに `s3` テーブル関数を使用してください。 -::: - -5. このクエリは時間がかからず、平均して毎秒約300,000行の速度で処理されます。5分ほどの間にすべての行が挿入されるはずです: - -```sql runnable -SELECT formatReadableQuantity(count()) -FROM amazon.amazon_reviews -``` - -6. データがどれだけのスペースを使用しているか見てみましょう: - -```sql runnable -SELECT - disk_name, - formatReadableSize(sum(data_compressed_bytes) AS size) AS compressed, - formatReadableSize(sum(data_uncompressed_bytes) AS usize) AS uncompressed, - round(usize / size, 2) AS compr_rate, - sum(rows) AS rows, - count() AS part_count -FROM system.parts -WHERE (active = 1) AND (table = 'amazon_reviews') -GROUP BY disk_name -ORDER BY size DESC -``` - -元のデータは約70Gでしたが、ClickHouseでは約30Gのサイズを占めました。 - -## 例のクエリ {#example-queries} - -7. いくつかのクエリを実行してみましょう。データセット内で最も役立つレビューのトップ10はこちらです: - -```sql runnable -SELECT - product_title, - review_headline -FROM amazon.amazon_reviews -ORDER BY helpful_votes DESC -LIMIT 10 -``` - -:::note -このクエリは、パフォーマンスを向上させるために [プロジェクション](/data-modeling/projections) を使用しています。 -::: - -8. Amazonでレビューが最も多いトップ10製品はこちらです: - -```sql runnable -SELECT - any(product_title), - count() -FROM amazon.amazon_reviews -GROUP BY product_id -ORDER BY 2 DESC -LIMIT 10; -``` - -9. 各製品の月ごとの平均レビュー評価を示します(実際の [Amazonの就職面接質問](https://datalemur.com/questions/sql-avg-review-ratings)!): - -```sql runnable -SELECT - toStartOfMonth(review_date) AS month, - any(product_title), - avg(star_rating) AS avg_stars -FROM amazon.amazon_reviews -GROUP BY - month, - product_id -ORDER BY - month DESC, - product_id ASC -LIMIT 20; -``` - -10. 各製品カテゴリごとの投票総数を示します。このクエリは、`product_category` が主キーに含まれているため高速です: - -```sql runnable -SELECT - sum(total_votes), - product_category -FROM amazon.amazon_reviews -GROUP BY product_category -ORDER BY 1 DESC -``` - -11. レビュー内で最も頻繁に**"awful"**という単語が出現する製品を探します。これは大きな作業です - 1.51億以上の文字列を解析して単語を探す必要があります: - -```sql runnable settings={'enable_parallel_replicas':1} -SELECT - product_id, - any(product_title), - avg(star_rating), - count() AS count -FROM amazon.amazon_reviews -WHERE position(review_body, 'awful') > 0 -GROUP BY product_id -ORDER BY count DESC -LIMIT 50; -``` - -このような大量のデータに対するクエリ時間に注目してください。結果も読むのが楽しいです! - -12. 同じクエリを再度実行できますが、今回はレビュー内で**awesome**を検索します: - -```sql runnable settings={'enable_parallel_replicas':1} -SELECT - product_id, - any(product_title), - avg(star_rating), - count() AS count -FROM amazon.amazon_reviews -WHERE position(review_body, 'awesome') > 0 -GROUP BY product_id -ORDER BY count DESC -LIMIT 50; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md.hash deleted file mode 100644 index 2ad5a6dd233..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amazon-reviews.md.hash +++ /dev/null @@ -1 +0,0 @@ -0792d96c84865e67 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md deleted file mode 100644 index 8a298287e4f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: 'A benchmark dataset used for comparing the performance of data warehousing - solutions.' -sidebar_label: 'AMPLab Big Data Benchmark' -slug: '/getting-started/example-datasets/amplab-benchmark' -title: 'AMPLab Big Data Benchmark' ---- - - - -See https://amplab.cs.berkeley.edu/benchmark/ - -無料アカウントにサインアップするには、https://aws.amazon.com にアクセスしてください。クレジットカード、メールアドレス、電話番号が必要です。新しいアクセスキーは、https://console.aws.amazon.com/iam/home?nc2=h_m_sc#security_credential で取得できます。 - -コンソールで次のコマンドを実行します: - -```bash -$ sudo apt-get install s3cmd -$ mkdir tiny; cd tiny; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . -$ cd .. -$ mkdir 1node; cd 1node; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . -$ cd .. -$ mkdir 5nodes; cd 5nodes; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . -$ cd .. -``` - -次の ClickHouse クエリを実行します: - -```sql -CREATE TABLE rankings_tiny -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_tiny -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); - -CREATE TABLE rankings_1node -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_1node -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); - -CREATE TABLE rankings_5nodes_on_single -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_5nodes_on_single -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); -``` - -コンソールに戻ります: - -```bash -$ for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done -$ for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done -$ for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done -$ for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done -$ for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done -$ for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done -``` - -データサンプルを取得するためのクエリ: - -```sql -SELECT pageURL, pageRank FROM rankings_1node WHERE pageRank > 1000 - -SELECT substring(sourceIP, 1, 8), sum(adRevenue) FROM uservisits_1node GROUP BY substring(sourceIP, 1, 8) - -SELECT - sourceIP, - sum(adRevenue) AS totalRevenue, - avg(pageRank) AS pageRank -FROM rankings_1node ALL INNER JOIN -( - SELECT - sourceIP, - destinationURL AS pageURL, - adRevenue - FROM uservisits_1node - WHERE (visitDate > '1980-01-01') AND (visitDate < '1980-04-01') -) USING pageURL -GROUP BY sourceIP -ORDER BY totalRevenue DESC -LIMIT 1 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md.hash deleted file mode 100644 index de899a19628..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/amplab-benchmark.md.hash +++ /dev/null @@ -1 +0,0 @@ -955aa7116a01089e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md deleted file mode 100644 index 9308b022918..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md +++ /dev/null @@ -1,448 +0,0 @@ ---- -description: 'A new analytical benchmark for machine-generated log data' -sidebar_label: 'Brown University Benchmark' -slug: '/getting-started/example-datasets/brown-benchmark' -title: 'Brown University Benchmark' ---- - - - -`MgBench`は、機械生成のログデータに対する新しい分析ベンチマークです。 [Andrew Crotty](http://cs.brown.edu/people/acrotty/)。 - -データをダウンロード: -```bash -wget https://datasets.clickhouse.com/mgbench{1..3}.csv.xz -``` - -データを解凍: -```bash -xz -v -d mgbench{1..3}.csv.xz -``` - -データベースとテーブルを作成: -```sql -CREATE DATABASE mgbench; -``` - -```sql -USE mgbench; -``` - -```sql -CREATE TABLE mgbench.logs1 ( - log_time DateTime, - machine_name LowCardinality(String), - machine_group LowCardinality(String), - cpu_idle Nullable(Float32), - cpu_nice Nullable(Float32), - cpu_system Nullable(Float32), - cpu_user Nullable(Float32), - cpu_wio Nullable(Float32), - disk_free Nullable(Float32), - disk_total Nullable(Float32), - part_max_used Nullable(Float32), - load_fifteen Nullable(Float32), - load_five Nullable(Float32), - load_one Nullable(Float32), - mem_buffers Nullable(Float32), - mem_cached Nullable(Float32), - mem_free Nullable(Float32), - mem_shared Nullable(Float32), - swap_free Nullable(Float32), - bytes_in Nullable(Float32), - bytes_out Nullable(Float32) -) -ENGINE = MergeTree() -ORDER BY (machine_group, machine_name, log_time); -``` - -```sql -CREATE TABLE mgbench.logs2 ( - log_time DateTime, - client_ip IPv4, - request String, - status_code UInt16, - object_size UInt64 -) -ENGINE = MergeTree() -ORDER BY log_time; -``` - -```sql -CREATE TABLE mgbench.logs3 ( - log_time DateTime64, - device_id FixedString(15), - device_name LowCardinality(String), - device_type LowCardinality(String), - device_floor UInt8, - event_type LowCardinality(String), - event_unit FixedString(1), - event_value Nullable(Float32) -) -ENGINE = MergeTree() -ORDER BY (event_type, log_time); -``` - -データを挿入: - -```bash -clickhouse-client --query "INSERT INTO mgbench.logs1 FORMAT CSVWithNames" < mgbench1.csv -clickhouse-client --query "INSERT INTO mgbench.logs2 FORMAT CSVWithNames" < mgbench2.csv -clickhouse-client --query "INSERT INTO mgbench.logs3 FORMAT CSVWithNames" < mgbench3.csv -``` - -## ベンチマーククエリを実行する: {#run-benchmark-queries} - -```sql -USE mgbench; -``` - -```sql --- Q1.1: 真夜中から各ウェブサーバーのCPU/ネットワーク利用率は? - -SELECT machine_name, - MIN(cpu) AS cpu_min, - MAX(cpu) AS cpu_max, - AVG(cpu) AS cpu_avg, - MIN(net_in) AS net_in_min, - MAX(net_in) AS net_in_max, - AVG(net_in) AS net_in_avg, - MIN(net_out) AS net_out_min, - MAX(net_out) AS net_out_max, - AVG(net_out) AS net_out_avg -FROM ( - SELECT machine_name, - COALESCE(cpu_user, 0.0) AS cpu, - COALESCE(bytes_in, 0.0) AS net_in, - COALESCE(bytes_out, 0.0) AS net_out - FROM logs1 - WHERE machine_name IN ('anansi','aragog','urd') - AND log_time >= TIMESTAMP '2017-01-11 00:00:00' -) AS r -GROUP BY machine_name; -``` - -```sql --- Q1.2: 過去1日間にオフラインになったコンピュータラボのマシンは? - -SELECT machine_name, - log_time -FROM logs1 -WHERE (machine_name LIKE 'cslab%' OR - machine_name LIKE 'mslab%') - AND load_one IS NULL - AND log_time >= TIMESTAMP '2017-01-10 00:00:00' -ORDER BY machine_name, - log_time; -``` - -```sql --- Q1.3: 特定のワークステーションの過去10日間の時間ごとの平均メトリックは? - -SELECT dt, - hr, - AVG(load_fifteen) AS load_fifteen_avg, - AVG(load_five) AS load_five_avg, - AVG(load_one) AS load_one_avg, - AVG(mem_free) AS mem_free_avg, - AVG(swap_free) AS swap_free_avg -FROM ( - SELECT CAST(log_time AS DATE) AS dt, - EXTRACT(HOUR FROM log_time) AS hr, - load_fifteen, - load_five, - load_one, - mem_free, - swap_free - FROM logs1 - WHERE machine_name = 'babbage' - AND load_fifteen IS NOT NULL - AND load_five IS NOT NULL - AND load_one IS NOT NULL - AND mem_free IS NOT NULL - AND swap_free IS NOT NULL - AND log_time >= TIMESTAMP '2017-01-01 00:00:00' -) AS r -GROUP BY dt, - hr -ORDER BY dt, - hr; -``` - -```sql --- Q1.4: 1か月間、各サーバーがディスクI/Oでブロックされた頻度は? - -SELECT machine_name, - COUNT(*) AS spikes -FROM logs1 -WHERE machine_group = 'Servers' - AND cpu_wio > 0.99 - AND log_time >= TIMESTAMP '2016-12-01 00:00:00' - AND log_time < TIMESTAMP '2017-01-01 00:00:00' -GROUP BY machine_name -ORDER BY spikes DESC -LIMIT 10; -``` - -```sql --- Q1.5: 外部からアクセス可能なVMの中でメモリが不足したものは? - -SELECT machine_name, - dt, - MIN(mem_free) AS mem_free_min -FROM ( - SELECT machine_name, - CAST(log_time AS DATE) AS dt, - mem_free - FROM logs1 - WHERE machine_group = 'DMZ' - AND mem_free IS NOT NULL -) AS r -GROUP BY machine_name, - dt -HAVING MIN(mem_free) < 10000 -ORDER BY machine_name, - dt; -``` - -```sql --- Q1.6: 全ファイルサーバーにおける総時間ごとのネットワークトラフィックは? - -SELECT dt, - hr, - SUM(net_in) AS net_in_sum, - SUM(net_out) AS net_out_sum, - SUM(net_in) + SUM(net_out) AS both_sum -FROM ( - SELECT CAST(log_time AS DATE) AS dt, - EXTRACT(HOUR FROM log_time) AS hr, - COALESCE(bytes_in, 0.0) / 1000000000.0 AS net_in, - COALESCE(bytes_out, 0.0) / 1000000000.0 AS net_out - FROM logs1 - WHERE machine_name IN ('allsorts','andes','bigred','blackjack','bonbon', - 'cadbury','chiclets','cotton','crows','dove','fireball','hearts','huey', - 'lindt','milkduds','milkyway','mnm','necco','nerds','orbit','peeps', - 'poprocks','razzles','runts','smarties','smuggler','spree','stride', - 'tootsie','trident','wrigley','york') -) AS r -GROUP BY dt, - hr -ORDER BY both_sum DESC -LIMIT 10; -``` - -```sql --- Q2.1: 過去2週間にサーバーエラーを引き起こしたリクエストは? - -SELECT * -FROM logs2 -WHERE status_code >= 500 - AND log_time >= TIMESTAMP '2012-12-18 00:00:00' -ORDER BY log_time; -``` - -```sql --- Q2.2: 特定の2週間の間にユーザーパスワードファイルが漏洩したか? - -SELECT * -FROM logs2 -WHERE status_code >= 200 - AND status_code < 300 - AND request LIKE '%/etc/passwd%' - AND log_time >= TIMESTAMP '2012-05-06 00:00:00' - AND log_time < TIMESTAMP '2012-05-20 00:00:00'; -``` - -```sql --- Q2.3: 過去1か月間のトップレベルリクエストの平均パスの深さは? - -SELECT top_level, - AVG(LENGTH(request) - LENGTH(REPLACE(request, '/', ''))) AS depth_avg -FROM ( - SELECT SUBSTRING(request FROM 1 FOR len) AS top_level, - request - FROM ( - SELECT POSITION(SUBSTRING(request FROM 2), '/') AS len, - request - FROM logs2 - WHERE status_code >= 200 - AND status_code < 300 - AND log_time >= TIMESTAMP '2012-12-01 00:00:00' - ) AS r - WHERE len > 0 -) AS s -WHERE top_level IN ('/about','/courses','/degrees','/events', - '/grad','/industry','/news','/people', - '/publications','/research','/teaching','/ugrad') -GROUP BY top_level -ORDER BY top_level; -``` - -```sql --- Q2.4: 過去3か月間に過剰なリクエストを送ったクライアントは? - -SELECT client_ip, - COUNT(*) AS num_requests -FROM logs2 -WHERE log_time >= TIMESTAMP '2012-10-01 00:00:00' -GROUP BY client_ip -HAVING COUNT(*) >= 100000 -ORDER BY num_requests DESC; -``` - -```sql --- Q2.5: 日々のユニークビジター数は? - -SELECT dt, - COUNT(DISTINCT client_ip) -FROM ( - SELECT CAST(log_time AS DATE) AS dt, - client_ip - FROM logs2 -) AS r -GROUP BY dt -ORDER BY dt; -``` - -```sql --- Q2.6: 平均および最大データ転送速度 (Gbps) は? - -SELECT AVG(transfer) / 125000000.0 AS transfer_avg, - MAX(transfer) / 125000000.0 AS transfer_max -FROM ( - SELECT log_time, - SUM(object_size) AS transfer - FROM logs2 - GROUP BY log_time -) AS r; -``` - -```sql --- Q3.1: 週末に屋内温度が凍結に達したか? - -SELECT * -FROM logs3 -WHERE event_type = 'temperature' - AND event_value <= 32.0 - AND log_time >= '2019-11-29 17:00:00.000'; -``` - -```sql --- Q3.4: 過去6か月間に各ドアが開かれた頻度は? - -SELECT device_name, - device_floor, - COUNT(*) AS ct -FROM logs3 -WHERE event_type = 'door_open' - AND log_time >= '2019-06-01 00:00:00.000' -GROUP BY device_name, - device_floor -ORDER BY ct DESC; -``` - -クエリ3.5はUNIONを使用します。 SELECTクエリの結果を結合するためのモードを設定します。この設定は、UNION ALLまたはUNION DISTINCTを明示的に指定せずにUNIONで共有される場合のみ使用されます。 -```sql -SET union_default_mode = 'DISTINCT' -``` - -```sql --- Q3.5: 冬と夏の間に建物内で大きな温度変動が発生する場所は? - -WITH temperature AS ( - SELECT dt, - device_name, - device_type, - device_floor - FROM ( - SELECT dt, - hr, - device_name, - device_type, - device_floor, - AVG(event_value) AS temperature_hourly_avg - FROM ( - SELECT CAST(log_time AS DATE) AS dt, - EXTRACT(HOUR FROM log_time) AS hr, - device_name, - device_type, - device_floor, - event_value - FROM logs3 - WHERE event_type = 'temperature' - ) AS r - GROUP BY dt, - hr, - device_name, - device_type, - device_floor - ) AS s - GROUP BY dt, - device_name, - device_type, - device_floor - HAVING MAX(temperature_hourly_avg) - MIN(temperature_hourly_avg) >= 25.0 -) -SELECT DISTINCT device_name, - device_type, - device_floor, - 'WINTER' -FROM temperature -WHERE dt >= DATE '2018-12-01' - AND dt < DATE '2019-03-01' -UNION -SELECT DISTINCT device_name, - device_type, - device_floor, - 'SUMMER' -FROM temperature -WHERE dt >= DATE '2019-06-01' - AND dt < DATE '2019-09-01'; -``` - -```sql --- Q3.6: 各デバイスカテゴリの月次電力消費メトリックは? - -SELECT yr, - mo, - SUM(coffee_hourly_avg) AS coffee_monthly_sum, - AVG(coffee_hourly_avg) AS coffee_monthly_avg, - SUM(printer_hourly_avg) AS printer_monthly_sum, - AVG(printer_hourly_avg) AS printer_monthly_avg, - SUM(projector_hourly_avg) AS projector_monthly_sum, - AVG(projector_hourly_avg) AS projector_monthly_avg, - SUM(vending_hourly_avg) AS vending_monthly_sum, - AVG(vending_hourly_avg) AS vending_monthly_avg -FROM ( - SELECT dt, - yr, - mo, - hr, - AVG(coffee) AS coffee_hourly_avg, - AVG(printer) AS printer_hourly_avg, - AVG(projector) AS projector_hourly_avg, - AVG(vending) AS vending_hourly_avg - FROM ( - SELECT CAST(log_time AS DATE) AS dt, - EXTRACT(YEAR FROM log_time) AS yr, - EXTRACT(MONTH FROM log_time) AS mo, - EXTRACT(HOUR FROM log_time) AS hr, - CASE WHEN device_name LIKE 'coffee%' THEN event_value END AS coffee, - CASE WHEN device_name LIKE 'printer%' THEN event_value END AS printer, - CASE WHEN device_name LIKE 'projector%' THEN event_value END AS projector, - CASE WHEN device_name LIKE 'vending%' THEN event_value END AS vending - FROM logs3 - WHERE device_type = 'meter' - ) AS r - GROUP BY dt, - yr, - mo, - hr -) AS s -GROUP BY yr, - mo -ORDER BY yr, - mo; -``` - -データは、[Playground](https://sql.clickhouse.com)でインタラクティブクエリのために利用可能でもあります。 [例](https://sql.clickhouse.com?query_id=1MXMHASDLEQIP4P1D1STND)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md.hash deleted file mode 100644 index 044d86f77df..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/brown-benchmark.md.hash +++ /dev/null @@ -1 +0,0 @@ -6f9e00357462da2d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md deleted file mode 100644 index 86eb60d069f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md +++ /dev/null @@ -1,368 +0,0 @@ ---- -description: 'Learn how to load OpenCelliD data into ClickHouse, connect Apache - Superset to ClickHouse and build a dashboard based on data' -sidebar_label: 'Geo Data' -sidebar_position: 3 -slug: '/getting-started/example-datasets/cell-towers' -title: 'Geo Data using the Cell Tower Dataset' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; -import ActionsMenu from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_service_actions_menu.md'; -import SQLConsoleDetail from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_launch_sql_console.md'; -import SupersetDocker from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md'; -import cloud_load_data_sample from '@site/static/images/_snippets/cloud-load-data-sample.png'; -import cell_towers_1 from '@site/static/images/getting-started/example-datasets/superset-cell-tower-dashboard.png' -import add_a_database from '@site/static/images/getting-started/example-datasets/superset-add.png' -import choose_clickhouse_connect from '@site/static/images/getting-started/example-datasets/superset-choose-a-database.png' -import add_clickhouse_as_superset_datasource from '@site/static/images/getting-started/example-datasets/superset-connect-a-database.png' -import add_cell_towers_table_as_dataset from '@site/static/images/getting-started/example-datasets/superset-add-dataset.png' -import create_a_map_in_superset from '@site/static/images/getting-started/example-datasets/superset-create-map.png' -import specify_long_and_lat from '@site/static/images/getting-started/example-datasets/superset-lon-lat.png' -import superset_mcc_2024 from '@site/static/images/getting-started/example-datasets/superset-mcc-204.png' -import superset_radio_umts from '@site/static/images/getting-started/example-datasets/superset-radio-umts.png' -import superset_umts_netherlands from '@site/static/images/getting-started/example-datasets/superset-umts-netherlands.png' -import superset_cell_tower_dashboard from '@site/static/images/getting-started/example-datasets/superset-cell-tower-dashboard.png' - -## ゴール {#goal} - -このガイドでは、次のことを学びます: -- OpenCelliDデータをClickHouseにロードする -- Apache SupersetをClickHouseに接続する -- データセットに基づくダッシュボードを構築する - -ここにこのガイドで作成されたダッシュボードのプレビューがあります: - -mcc 204の無線タイプによるセルタワーのダッシュボード - -## データセットを取得する {#get-the-dataset} - -このデータセットは[OpenCelliD](https://www.opencellid.org/)からのものであり、世界最大のオープンなセルスタワーデータベースです。 - -2021年現在、世界中のセルタワー(GSM、LTE、UMTSなど)に関する4000万件以上のレコードが、地理座標やメタデータ(国コード、ネットワークなど)とともに含まれています。 - -OpenCelliDプロジェクトは、クリエイティブ・コモンズ表示-継承4.0国際ライセンスの下でライセンスされており、同じライセンスの条件のもとでこのデータセットのスナップショットを再配布します。最新のデータセットのバージョンは、サインイン後にダウンロードできます。 - - - - -### サンプルデータをロードする {#load-the-sample-data} - -ClickHouse Cloudは、このデータセットをS3からアップロードするための簡単なボタンを提供します。ClickHouse Cloud組織にログインするか、[ClickHouse.cloud](https://clickhouse.cloud)で無料トライアルを作成してください。 - - -**サンプルデータ**タブから**Cell Towers**データセットを選択し、**Load data**をクリックします。 - -セルタワーデータセットをロードする - -### cell_towersテーブルのスキーマを調べる {#examine-the-schema-of-the-cell_towers-table} -```sql -DESCRIBE TABLE cell_towers -``` - - - -これは`DESCRIBE`の出力です。このガイドの後半では、フィールドタイプの選択が説明されます。 -```response -┌─name──────────┬─type──────────────────────────────────────────────────────────────────┬ -│ radio │ Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5) │ -│ mcc │ UInt16 │ -│ net │ UInt16 │ -│ area │ UInt16 │ -│ cell │ UInt64 │ -│ unit │ Int16 │ -│ lon │ Float64 │ -│ lat │ Float64 │ -│ range │ UInt32 │ -│ samples │ UInt32 │ -│ changeable │ UInt8 │ -│ created │ DateTime │ -│ updated │ DateTime │ -│ averageSignal │ UInt8 │ -└───────────────┴───────────────────────────────────────────────────────────────────────┴ -``` - - - - -1. テーブルを作成します: - -```sql -CREATE TABLE cell_towers -( - radio Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5), - mcc UInt16, - net UInt16, - area UInt16, - cell UInt64, - unit Int16, - lon Float64, - lat Float64, - range UInt32, - samples UInt32, - changeable UInt8, - created DateTime, - updated DateTime, - averageSignal UInt8 -) -ENGINE = MergeTree ORDER BY (radio, mcc, net, created); -``` - -2. 公開S3バケットからデータセットをインポートします(686 MB): - -```sql -INSERT INTO cell_towers SELECT * FROM s3('https://datasets-documentation.s3.amazonaws.com/cell_towers/cell_towers.csv.xz', 'CSVWithNames') -``` - - - - -## 一部の例を実行する {#examples} - -1. タイプ別のセルタワーの数: - -```sql -SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC -``` -```response -┌─radio─┬────────c─┐ -│ UMTS │ 20686487 │ -│ LTE │ 12101148 │ -│ GSM │ 9931304 │ -│ CDMA │ 556344 │ -│ NR │ 867 │ -└───────┴──────────┘ - -5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) -``` - -2. [モバイル国コード(MCC)](https://en.wikipedia.org/wiki/Mobile_country_code)別のセルタワー: - -```sql -SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 -``` -```response -┌─mcc─┬─count()─┐ -│ 310 │ 5024650 │ -│ 262 │ 2622423 │ -│ 250 │ 1953176 │ -│ 208 │ 1891187 │ -│ 724 │ 1836150 │ -│ 404 │ 1729151 │ -│ 234 │ 1618924 │ -│ 510 │ 1353998 │ -│ 440 │ 1343355 │ -│ 311 │ 1332798 │ -└─────┴─────────┘ - -10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) -``` - -上記のクエリと[MCCリスト](https://en.wikipedia.org/wiki/Mobile_country_code)に基づくと、セルタワーが最も多い国は、アメリカ、ドイツ、ロシアです。 - -これらの値をデコードするために、ClickHouseで[Dictionary](../../sql-reference/dictionaries/index.md)を作成したいかもしれません。 - -## 事例: ジオデータを組み込む {#use-case} - -[`pointInPolygon`](/sql-reference/functions/geo/coordinates.md/#pointinpolygon)関数を使用します。 - -1. ポリゴンを保存するテーブルを作成します: - - - - -```sql -CREATE TABLE moscow (polygon Array(Tuple(Float64, Float64))) -ORDER BY polygon; -``` - - - - -```sql -CREATE TEMPORARY TABLE -moscow (polygon Array(Tuple(Float64, Float64))); -``` - - - - -2. これは「新モスクワ」を除くモスクワの粗い形状です: - -```sql -INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), -(37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), -(37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), -(37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), -(37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), -(37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), -(37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), -(37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), -(37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), -(37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), -(37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), -(37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), -(37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), -(37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), -(37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), -(37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), -(37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), -(37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), -(37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), -(37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), -(37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), -(37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), -(37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), -(37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), -(37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), -(37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), -(37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), -(37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), -(37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), -(37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), -(37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), -(37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), -(37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), -(37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), -(37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), -(37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), -(37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), -(37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), -(37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), -(37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), -(37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), -(37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), -(37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), -(37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), -(37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), -(37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), -(37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), -(37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), -(37.84172564285271, 55.78000432402266)]); -``` - -3. モスクワにあるセルタワーの数をチェックします: - -```sql -SELECT count() FROM cell_towers -WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) -``` -```response -┌─count()─┐ -│ 310463 │ -└─────────┘ - -1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.) -``` - -## スキーマのレビュー {#review-of-the-schema} - -Supersetで視覚化を構築する前に、使用するカラムを確認してください。このデータセットは、主に世界中の携帯電話のセルタワーの位置(経度および緯度)と無線タイプを提供します。カラムの説明は、[コミュニティフォーラム](https://community.opencellid.org/t/documenting-the-columns-in-the-downloadable-cells-database-csv/186)にあります。構築する視覚化で使用されるカラムは以下の通りです。 - -OpenCelliDフォーラムからのカラムの説明は次のとおりです: - -| カラム | 説明 | -|--------------|------------------------------------------------| -| radio | 技術世代: CDMA, GSM, UMTS, 5G NR | -| mcc | モバイル国コード: `204` はオランダ | -| lon | 経度: 緯度とともに、概算タワー位置 | -| lat | 緯度: 経度とともに、概算タワー位置 | - -:::tip mcc -あなたのMCCを見つけるには[モバイルネットワークコード](https://en.wikipedia.org/wiki/Mobile_country_code)を確認し、**モバイル国コード**カラムの三桁を使用します。 -::: - -このテーブルのスキーマは、ディスク上のコンパクトなストレージとクエリ速度のために設計されました。 -- `radio`データは、文字列の代わりに`Enum8`(`UInt8`)として保存されています。 -- `mcc`またはモバイル国コードは、範囲が1~999であるため、`UInt16`として保存されています。 -- `lon`と`lat`は`Float64`です。 - -他のフィールドはこのガイド内のクエリや視覚化で使用されていませんが、興味がある場合は、上記のフォーラムで説明されています。 - -## Apache Supersetで視覚化を構築する {#build-visualizations-with-apache-superset} - -SupersetはDockerから簡単に実行できます。すでにSupersetを実行している場合は、必要なことは `pip install clickhouse-connect`を実行してClickHouseを接続することだけです。Supersetをインストールする必要がある場合は、直接下の**Launch Apache Superset in Docker**を開いてください。 - - - -OpenCelliDデータセットを使用してSupersetダッシュボードを構築するには、次の手順を実行します: -- ClickHouseサービスをSupersetの**データベース**として追加する -- テーブル**cell_towers**をSupersetの**データセット**として追加する -- いくつかの**チャート**を作成する -- チャートを**ダッシュボード**に追加する - -### ClickHouseサービスをSupersetデータベースとして追加する {#add-your-clickhouse-service-as-a-superset-database} - - - -Supersetでは、データベースの種類を選択し、接続の詳細を提供することによってデータベースを追加できます。Supersetを開き、**+**を探してクリックすると、**Data**メニューが表示され、そこから**Connect database**オプションがあります。 - -データベースを追加する - -リストから**ClickHouse Connect**を選択します: - -データベースタイプとしてClickHouse Connectを選択 - -:::note -**ClickHouse Connect**がオプションの1つでない場合は、インストールする必要があります。そのコマンドは`pip install clickhouse-connect`です。詳細は[こちら](https://pypi.org/project/clickhouse-connect/)で確認できます。 -::: - -#### 接続の詳細を追加します: {#add-your-connection-details} - -:::tip -ClickHouse CloudまたはSSLの使用を強制する他のClickHouseシステムに接続する際は、**SSL**をオンにすることを確認してください。 -::: - -SupersetデータソースとしてClickHouseを追加 - -### テーブル**cell_towers**をSupersetの**データセット**として追加する {#add-the-table-cell_towers-as-a-superset-dataset} - -Supersetでは、**データセット**がデータベース内のテーブルにマップされます。データセットを追加をクリックし、ClickHouseサービス、テーブルを含むデータベース(`default`)、および`cell_towers`テーブルを選択します: - -cell_towersテーブルをデータセットとして追加 - -### いくつかの**チャート**を作成する {#create-some-charts} - -Supersetでチャートを追加することを選択すると、データセット(`cell_towers`)とチャートタイプを指定する必要があります。OpenCelliDデータセットはセルタワーの経度および緯度座標を提供しているので、**Map**チャートを作成します。**deck.gL Scatterplot**タイプは、このデータセットに適しており、マップ上の密なデータポイントとよく機能します。 - -Supersetでマップを作成する - -#### マップで使用されるクエリを指定する {#specify-the-query-used-for-the-map} - -deck.gl Scatterplotには経度と緯度が必要であり、一つ以上のフィルターをクエリに適用することもできます。この例では、UMTS無線を持つセルタワー用の1つと、オランダに割り当てられたモバイル国コード用の1つ、合計2つのフィルターが適用されます。 - -フィールド`lon`と`lat`には経度と緯度が含まれています: - -経度と緯度のフィールドを指定する - -フィルターに`mcc` = `204`(または他の任意の`mcc`値に置き換え)を追加します: - -MCC 204でフィルターする - -フィルターに`radio` = `'UMTS'`(または他の任意の`radio`値に置き換え、`DESCRIBE TABLE cell_towers`の出力で選択できます)を追加します: - -UMTSと等しい無線でフィルターする - -これは、`radio = 'UMTS'`および`mcc = 204`でフィルターするためのチャートの完全な構成です: - -MCC 204内のUMTS無線のチャート - -**UPDATE CHART**をクリックしてビジュアライゼーションをレンダリングします。 - -### チャートを**ダッシュボード**に追加する {#add-the-charts-to-a-dashboard} - -このスクリーンショットは、LTE、UMTS、およびGSM無線を持つセルタワーの位置を示しています。チャートはすべて同じように作成され、ダッシュボードに追加されます。 - -mcc 204の無線タイプによるセルタワーのダッシュボード - -:::tip -データは[プレイグラウンド](https://sql.clickhouse.com)で対話型クエリにも利用可能です。 - -この[例](https://sql.clickhouse.com?query_id=UV8M4MAGS2PWAUOAYAAARM)では、ユーザー名やクエリも自動的に入力されます。 - -プレイグラウンドではテーブルを作成することはできませんが、すべてのクエリを実行したり、Supersetを使用して(ホスト名やポート番号を調整して)利用したりすることができます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md.hash deleted file mode 100644 index d38047c8e04..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/cell-towers.md.hash +++ /dev/null @@ -1 +0,0 @@ -8f6475c38f2319ab diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md deleted file mode 100644 index 2f3ee214f5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -description: 'COVID-19 Open-Data is a large, open-source database of COVID-19 epidemiological - data and related factors like demographics, economics, and government responses' -sidebar_label: 'COVID-19 Open-Data' -slug: '/getting-started/example-datasets/covid19' -title: 'COVID-19 Open-Data' ---- - - - -COVID-19 Open-Dataは、Covid-19の疫学データベースを最大化することを目指しており、広範な共変量の強力なセットを提供します。人口統計、経済、疫学、地理、健康、入院、移動、政府の対応、天候などに関連するオープンで公共にソースされたライセンスデータが含まれています。 - -詳細はGitHubの [こちら](https://github.com/GoogleCloudPlatform/covid-19-open-data) にあります。 - -このデータをClickHouseに挿入するのは簡単です... - -:::note -以下のコマンドは、[ClickHouse Cloud](https://clickhouse.cloud) の**Production**インスタンスで実行されました。ローカルインストールでも簡単に実行できます。 -::: - -1. データがどのような形をしているか見てみましょう: - -```sql -DESCRIBE url( - 'https://storage.googleapis.com/covid19-open-data/v3/epidemiology.csv', - 'CSVWithNames' -); -``` - -CSVファイルには10列があります: - -```response -┌─name─────────────────┬─type─────────────┐ -│ date │ Nullable(Date) │ -│ location_key │ Nullable(String) │ -│ new_confirmed │ Nullable(Int64) │ -│ new_deceased │ Nullable(Int64) │ -│ new_recovered │ Nullable(Int64) │ -│ new_tested │ Nullable(Int64) │ -│ cumulative_confirmed │ Nullable(Int64) │ -│ cumulative_deceased │ Nullable(Int64) │ -│ cumulative_recovered │ Nullable(Int64) │ -│ cumulative_tested │ Nullable(Int64) │ -└──────────────────────┴──────────────────┘ - -10 rows in set. Elapsed: 0.745 sec. -``` - -2. では、いくつかの行を表示してみましょう: - -```sql -SELECT * -FROM url('https://storage.googleapis.com/covid19-open-data/v3/epidemiology.csv') -LIMIT 100; -``` - -`url` 関数はCSVファイルからデータを簡単に読み取ります: - -```response -┌─c1─────────┬─c2───────────┬─c3────────────┬─c4───────────┬─c5────────────┬─c6─────────┬─c7───────────────────┬─c8──────────────────┬─c9───────────────────┬─c10───────────────┐ -│ date │ location_key │ new_confirmed │ new_deceased │ new_recovered │ new_tested │ cumulative_confirmed │ cumulative_deceased │ cumulative_recovered │ cumulative_tested │ -│ 2020-04-03 │ AD │ 24 │ 1 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 466 │ 17 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -│ 2020-04-04 │ AD │ 57 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 523 │ 17 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -│ 2020-04-05 │ AD │ 17 │ 4 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 540 │ 21 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -│ 2020-04-06 │ AD │ 11 │ 1 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 551 │ 22 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -│ 2020-04-07 │ AD │ 15 │ 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 566 │ 24 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -│ 2020-04-08 │ AD │ 23 │ 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 589 │ 26 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -└────────────┴──────────────┴───────────────┴──────────────┴───────────────┴────────────┴──────────────────────┴─────────────────────┴──────────────────────┴───────────────────┘ -``` - -3. データがどのようなものか分かったので、テーブルを作成しましょう: - -```sql -CREATE TABLE covid19 ( - date Date, - location_key LowCardinality(String), - new_confirmed Int32, - new_deceased Int32, - new_recovered Int32, - new_tested Int32, - cumulative_confirmed Int32, - cumulative_deceased Int32, - cumulative_recovered Int32, - cumulative_tested Int32 -) -ENGINE = MergeTree -ORDER BY (location_key, date); -``` - -4. 次のコマンドは、全データセットを`covid19`テーブルに挿入します: - -```sql -INSERT INTO covid19 - SELECT * - FROM - url( - 'https://storage.googleapis.com/covid19-open-data/v3/epidemiology.csv', - CSVWithNames, - 'date Date, - location_key LowCardinality(String), - new_confirmed Int32, - new_deceased Int32, - new_recovered Int32, - new_tested Int32, - cumulative_confirmed Int32, - cumulative_deceased Int32, - cumulative_recovered Int32, - cumulative_tested Int32' - ); -``` - -5. かなり早く進みます - 挿入された行数を見てみましょう: - -```sql -SELECT formatReadableQuantity(count()) -FROM covid19; -``` - -```response -┌─formatReadableQuantity(count())─┐ -│ 12.53 million │ -└─────────────────────────────────┘ -``` - -6. Covid-19の合計件数を確認しましょう: - -```sql -SELECT formatReadableQuantity(sum(new_confirmed)) -FROM covid19; -``` - -```response -┌─formatReadableQuantity(sum(new_confirmed))─┐ -│ 1.39 billion │ -└────────────────────────────────────────────┘ -``` - -7. データには日付に対して多くの0があることに気づくでしょう - 週末や数値が毎日報告されなかった日です。ウィンドウ関数を使用して、新しいケースの日次平均を平滑化します: - -```sql -SELECT - AVG(new_confirmed) OVER (PARTITION BY location_key ORDER BY date ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS cases_smoothed, - new_confirmed, - location_key, - date -FROM covid19; -``` - -8. このクエリは各場所の最新の値を取得します。すべての国が毎日報告しているわけではないので、`max(date)`は使用できませんので、`ROW_NUMBER`を用いて最後の行を取得します: - -```sql -WITH latest_deaths_data AS - ( SELECT location_key, - date, - new_deceased, - new_confirmed, - ROW_NUMBER() OVER (PARTITION BY location_key ORDER BY date DESC) as rn - FROM covid19) -SELECT location_key, - date, - new_deceased, - new_confirmed, - rn -FROM latest_deaths_data -WHERE rn=1; -``` - -9. `lagInFrame`を使用して毎日の新規症例の`LAG`を決定します。このクエリでは`US_DC`のロケーションでフィルターします: - -```sql -SELECT - new_confirmed - lagInFrame(new_confirmed,1) OVER (PARTITION BY location_key ORDER BY date) AS confirmed_cases_delta, - new_confirmed, - location_key, - date -FROM covid19 -WHERE location_key = 'US_DC'; -``` - -レスポンスは次のようになります: - -```response -┌─confirmed_cases_delta─┬─new_confirmed─┬─location_key─┬───────date─┐ -│ 0 │ 0 │ US_DC │ 2020-03-08 │ -│ 2 │ 2 │ US_DC │ 2020-03-09 │ -│ -2 │ 0 │ US_DC │ 2020-03-10 │ -│ 6 │ 6 │ US_DC │ 2020-03-11 │ -│ -6 │ 0 │ US_DC │ 2020-03-12 │ -│ 0 │ 0 │ US_DC │ 2020-03-13 │ -│ 6 │ 6 │ US_DC │ 2020-03-14 │ -│ -5 │ 1 │ US_DC │ 2020-03-15 │ -│ 4 │ 5 │ US_DC │ 2020-03-16 │ -│ 4 │ 9 │ US_DC │ 2020-03-17 │ -│ -1 │ 8 │ US_DC │ 2020-03-18 │ -│ 24 │ 32 │ US_DC │ 2020-03-19 │ -│ -26 │ 6 │ US_DC │ 2020-03-20 │ -│ 15 │ 21 │ US_DC │ 2020-03-21 │ -│ -3 │ 18 │ US_DC │ 2020-03-22 │ -│ 3 │ 21 │ US_DC │ 2020-03-23 │ -``` - -10. このクエリは毎日の新規ケースの変化のパーセンテージを計算し、結果セットに簡単な`increase`または`decrease`の列を含めます: - -```sql -WITH confirmed_lag AS ( - SELECT - *, - lagInFrame(new_confirmed) OVER( - PARTITION BY location_key - ORDER BY date - ) AS confirmed_previous_day - FROM covid19 -), -confirmed_percent_change AS ( - SELECT - *, - COALESCE(ROUND((new_confirmed - confirmed_previous_day) / confirmed_previous_day * 100), 0) AS percent_change - FROM confirmed_lag -) -SELECT - date, - new_confirmed, - percent_change, - CASE - WHEN percent_change > 0 THEN 'increase' - WHEN percent_change = 0 THEN 'no change' - ELSE 'decrease' - END AS trend -FROM confirmed_percent_change -WHERE location_key = 'US_DC'; -``` - -結果は次のようになります: - -```response -┌───────date─┬─new_confirmed─┬─percent_change─┬─trend─────┐ -│ 2020-03-08 │ 0 │ nan │ decrease │ -│ 2020-03-09 │ 2 │ inf │ increase │ -│ 2020-03-10 │ 0 │ -100 │ decrease │ -│ 2020-03-11 │ 6 │ inf │ increase │ -│ 2020-03-12 │ 0 │ -100 │ decrease │ -│ 2020-03-13 │ 0 │ nan │ decrease │ -│ 2020-03-14 │ 6 │ inf │ increase │ -│ 2020-03-15 │ 1 │ -83 │ decrease │ -│ 2020-03-16 │ 5 │ 400 │ increase │ -│ 2020-03-17 │ 9 │ 80 │ increase │ -│ 2020-03-18 │ 8 │ -11 │ decrease │ -│ 2020-03-19 │ 32 │ 300 │ increase │ -│ 2020-03-20 │ 6 │ -81 │ decrease │ -│ 2020-03-21 │ 21 │ 250 │ increase │ -│ 2020-03-22 │ 18 │ -14 │ decrease │ -│ 2020-03-23 │ 21 │ 17 │ increase │ -│ 2020-03-24 │ 46 │ 119 │ increase │ -│ 2020-03-25 │ 48 │ 4 │ increase │ -│ 2020-03-26 │ 36 │ -25 │ decrease │ -│ 2020-03-27 │ 37 │ 3 │ increase │ -│ 2020-03-28 │ 38 │ 3 │ increase │ -│ 2020-03-29 │ 59 │ 55 │ increase │ -│ 2020-03-30 │ 94 │ 59 │ increase │ -│ 2020-03-31 │ 91 │ -3 │ decrease │ -│ 2020-04-01 │ 67 │ -26 │ decrease │ -│ 2020-04-02 │ 104 │ 55 │ increase │ -│ 2020-04-03 │ 145 │ 39 │ increase │ -``` - -:::note -[GitHubリポジトリ](https://github.com/GoogleCloudPlatform/covid-19-open-data) に記載されているように、このデータセットは2022年9月15日以降は更新されていません。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md.hash deleted file mode 100644 index caef2b440e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/covid19.md.hash +++ /dev/null @@ -1 +0,0 @@ -604ac96d5d8f0b2b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md deleted file mode 100644 index 22e075721af..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -description: 'A terabyte of Click Logs from Criteo' -sidebar_label: 'Terabyte Click Logs from Criteo' -slug: '/getting-started/example-datasets/criteo' -title: 'Terabyte Click Logs from Criteo' ---- - - - -Download the data from http://labs.criteo.com/downloads/download-terabyte-click-logs/ - -Create a table to import the log to: - -```sql -CREATE TABLE criteo_log ( - date Date, - clicked UInt8, - int1 Int32, - int2 Int32, - int3 Int32, - int4 Int32, - int5 Int32, - int6 Int32, - int7 Int32, - int8 Int32, - int9 Int32, - int10 Int32, - int11 Int32, - int12 Int32, - int13 Int32, - cat1 String, - cat2 String, - cat3 String, - cat4 String, - cat5 String, - cat6 String, - cat7 String, - cat8 String, - cat9 String, - cat10 String, - cat11 String, - cat12 String, - cat13 String, - cat14 String, - cat15 String, - cat16 String, - cat17 String, - cat18 String, - cat19 String, - cat20 String, - cat21 String, - cat22 String, - cat23 String, - cat24 String, - cat25 String, - cat26 String -) ENGINE = Log; -``` - -Insert the data: - -```bash -$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done -``` - -Create a table for the converted data: - -```sql -CREATE TABLE criteo -( - date Date, - clicked UInt8, - int1 Int32, - int2 Int32, - int3 Int32, - int4 Int32, - int5 Int32, - int6 Int32, - int7 Int32, - int8 Int32, - int9 Int32, - int10 Int32, - int11 Int32, - int12 Int32, - int13 Int32, - icat1 UInt32, - icat2 UInt32, - icat3 UInt32, - icat4 UInt32, - icat5 UInt32, - icat6 UInt32, - icat7 UInt32, - icat8 UInt32, - icat9 UInt32, - icat10 UInt32, - icat11 UInt32, - icat12 UInt32, - icat13 UInt32, - icat14 UInt32, - icat15 UInt32, - icat16 UInt32, - icat17 UInt32, - icat18 UInt32, - icat19 UInt32, - icat20 UInt32, - icat21 UInt32, - icat22 UInt32, - icat23 UInt32, - icat24 UInt32, - icat25 UInt32, - icat26 UInt32 -) ENGINE = MergeTree() -PARTITION BY toYYYYMM(date) -ORDER BY (date, icat1) -``` - -Transform data from the raw log and put it in the second table: - -```sql -INSERT INTO - criteo -SELECT - date, - clicked, - int1, - int2, - int3, - int4, - int5, - int6, - int7, - int8, - int9, - int10, - int11, - int12, - int13, - reinterpretAsUInt32(unhex(cat1)) AS icat1, - reinterpretAsUInt32(unhex(cat2)) AS icat2, - reinterpretAsUInt32(unhex(cat3)) AS icat3, - reinterpretAsUInt32(unhex(cat4)) AS icat4, - reinterpretAsUInt32(unhex(cat5)) AS icat5, - reinterpretAsUInt32(unhex(cat6)) AS icat6, - reinterpretAsUInt32(unhex(cat7)) AS icat7, - reinterpretAsUInt32(unhex(cat8)) AS icat8, - reinterpretAsUInt32(unhex(cat9)) AS icat9, - reinterpretAsUInt32(unhex(cat10)) AS icat10, - reinterpretAsUInt32(unhex(cat11)) AS icat11, - reinterpretAsUInt32(unhex(cat12)) AS icat12, - reinterpretAsUInt32(unhex(cat13)) AS icat13, - reinterpretAsUInt32(unhex(cat14)) AS icat14, - reinterpretAsUInt32(unhex(cat15)) AS icat15, - reinterpretAsUInt32(unhex(cat16)) AS icat16, - reinterpretAsUInt32(unhex(cat17)) AS icat17, - reinterpretAsUInt32(unhex(cat18)) AS icat18, - reinterpretAsUInt32(unhex(cat19)) AS icat19, - reinterpretAsUInt32(unhex(cat20)) AS icat20, - reinterpretAsUInt32(unhex(cat21)) AS icat21, - reinterpretAsUInt32(unhex(cat22)) AS icat22, - reinterpretAsUInt32(unhex(cat23)) AS icat23, - reinterpretAsUInt32(unhex(cat24)) AS icat24, - reinterpretAsUInt32(unhex(cat25)) AS icat25, - reinterpretAsUInt32(unhex(cat26)) AS icat26 -FROM - criteo_log; - -DROP TABLE criteo_log; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md.hash deleted file mode 100644 index 526eca4c21e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/criteo.md.hash +++ /dev/null @@ -1 +0,0 @@ -b094f8bc22734261 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md deleted file mode 100644 index 2322f3ce345..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -description: 'Over 20 billion records of data from Sensor.Community, a contributors-driven - global sensor network that creates Open Environmental Data.' -sidebar_label: 'Environmental Sensors Data' -slug: '/getting-started/example-datasets/environmental-sensors' -title: 'Environmental Sensors Data' ---- - -import Image from '@theme/IdealImage'; -import no_events_per_day from '@site/static/images/getting-started/example-datasets/sensors_01.png'; -import sensors_02 from '@site/static/images/getting-started/example-datasets/sensors_02.png'; - -[Sensors.Community](https://sensor.community/en/)は、オープンな環境データを作成するために貢献者主導のグローバルセンサーネットワークです。データは世界中のセンサーから収集されます。誰でもセンサーを購入し、好きな場所に設置することができます。データをダウンロードするためのAPIは[GitHub](https://github.com/opendata-stuttgart/meta/wiki/APIs)で利用可能で、データは[Database Contents License (DbCL)](https://opendatacommons.org/licenses/dbcl/1-0/)の下で自由に利用可能です。 - -:::important -データセットには200億件以上のレコードが含まれているため、リソースがその量を処理できる限り、以下のコマンドをコピー&ペーストすることに注意してください。以下のコマンドは[ClickHouse Cloud](https://clickhouse.cloud)の**Production**インスタンスで実行されました。 -::: - -1. データはS3にあり、`s3`テーブル関数を使用してファイルからテーブルを作成できます。また、データをそのままクエリすることも可能です。ClickHouseに挿入する前に、いくつかの行を見てみましょう: - -```sql -SELECT * -FROM s3( - 'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/sensors/monthly/2019-06_bmp180.csv.zst', - 'CSVWithNames' - ) -LIMIT 10 -SETTINGS format_csv_delimiter = ';'; -``` - -データはCSVファイルですが、区切り文字としてセミコロンが使用されています。行は次のようになります: - -```response -┌─sensor_id─┬─sensor_type─┬─location─┬────lat─┬────lon─┬─timestamp───────────┬──pressure─┬─altitude─┬─pressure_sealevel─┬─temperature─┐ -│ 9119 │ BMP180 │ 4594 │ 50.994 │ 7.126 │ 2019-06-01T00:00:00 │ 101471 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 19.9 │ -│ 21210 │ BMP180 │ 10762 │ 42.206 │ 25.326 │ 2019-06-01T00:00:00 │ 99525 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 19.3 │ -│ 19660 │ BMP180 │ 9978 │ 52.434 │ 17.056 │ 2019-06-01T00:00:04 │ 101570 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 15.3 │ -│ 12126 │ BMP180 │ 6126 │ 57.908 │ 16.49 │ 2019-06-01T00:00:05 │ 101802.56 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 8.07 │ -│ 15845 │ BMP180 │ 8022 │ 52.498 │ 13.466 │ 2019-06-01T00:00:05 │ 101878 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 23 │ -│ 16415 │ BMP180 │ 8316 │ 49.312 │ 6.744 │ 2019-06-01T00:00:06 │ 100176 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 14.7 │ -│ 7389 │ BMP180 │ 3735 │ 50.136 │ 11.062 │ 2019-06-01T00:00:06 │ 98905 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 12.1 │ -│ 13199 │ BMP180 │ 6664 │ 52.514 │ 13.44 │ 2019-06-01T00:00:07 │ 101855.54 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 19.74 │ -│ 12753 │ BMP180 │ 6440 │ 44.616 │ 2.032 │ 2019-06-01T00:00:07 │ 99475 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 17 │ -│ 16956 │ BMP180 │ 8594 │ 52.052 │ 8.354 │ 2019-06-01T00:00:08 │ 101322 │ ᴺᵁᴾᴾ │ ᴺᵁᴸᴸ │ 17.2 │ -└───────────┴─────────────┴──────────┴────────┴────────┴─────────────────────┴───────────┴──────────┴───────────────────┴─────────────┘ -``` - -2. ClickHouseにデータを保存するために、次の`MergeTree`テーブルを使用します: - -```sql -CREATE TABLE sensors -( - sensor_id UInt16, - sensor_type Enum('BME280', 'BMP180', 'BMP280', 'DHT22', 'DS18B20', 'HPM', 'HTU21D', 'PMS1003', 'PMS3003', 'PMS5003', 'PMS6003', 'PMS7003', 'PPD42NS', 'SDS011'), - location UInt32, - lat Float32, - lon Float32, - timestamp DateTime, - P1 Float32, - P2 Float32, - P0 Float32, - durP1 Float32, - ratioP1 Float32, - durP2 Float32, - ratioP2 Float32, - pressure Float32, - altitude Float32, - pressure_sealevel Float32, - temperature Float32, - humidity Float32, - date Date MATERIALIZED toDate(timestamp) -) -ENGINE = MergeTree -ORDER BY (timestamp, sensor_id); -``` - -3. ClickHouse Cloudサービスには `default`という名前のクラスターがあります。`s3Cluster`テーブル関数を使用すると、クラスター内のノードからS3ファイルを並列で読み取ることができます。(クラスターがない場合は、`s3`関数を使用し、クラスター名を削除してください。) - -このクエリはしばらく時間がかかります。データは圧縮されずに約1.67Tです: - -```sql -INSERT INTO sensors - SELECT * - FROM s3Cluster( - 'default', - 'https://clickhouse-public-datasets.s3.amazonaws.com/sensors/monthly/*.csv.zst', - 'CSVWithNames', - $$ sensor_id UInt16, - sensor_type String, - location UInt32, - lat Float32, - lon Float32, - timestamp DateTime, - P1 Float32, - P2 Float32, - P0 Float32, - durP1 Float32, - ratioP1 Float32, - durP2 Float32, - ratioP2 Float32, - pressure Float32, - altitude Float32, - pressure_sealevel Float32, - temperature Float32, - humidity Float32 $$ - ) -SETTINGS - format_csv_delimiter = ';', - input_format_allow_errors_ratio = '0.5', - input_format_allow_errors_num = 10000, - input_format_parallel_parsing = 0, - date_time_input_format = 'best_effort', - max_insert_threads = 32, - parallel_distributed_insert_select = 1; -``` - -ここでの応答は、行数と処理速度を示しています。入力速度は1秒あたり6M行を超えています! - -```response -0 rows in set. Elapsed: 3419.330 sec. Processed 20.69 billion rows, 1.67 TB (6.05 million rows/s., 488.52 MB/s.) -``` - -4. `sensors`テーブルに必要なストレージディスクのサイズを確認しましょう: - -```sql -SELECT - disk_name, - formatReadableSize(sum(data_compressed_bytes) AS size) AS compressed, - formatReadableSize(sum(data_uncompressed_bytes) AS usize) AS uncompressed, - round(usize / size, 2) AS compr_rate, - sum(rows) AS rows, - count() AS part_count -FROM system.parts -WHERE (active = 1) AND (table = 'sensors') -GROUP BY - disk_name -ORDER BY size DESC; -``` - -1.67Tは310GiBに圧縮され、20.69億行があります: - -```response -┌─disk_name─┬─compressed─┬─uncompressed─┬─compr_rate─┬────────rows─┬─part_count─┐ -│ s3disk │ 310.21 GiB │ 1.30 TiB │ 4.29 │ 20693971809 │ 472 │ -└───────────┴────────────┴──────────────┴────────────┴─────────────┴────────────┘ -``` - -5. データがClickHouseに入ったので、分析を始めましょう。より多くのセンサーが展開されるにつれて、データの量が時間とともに増加していることに注意してください: - -```sql -SELECT - date, - count() -FROM sensors -GROUP BY date -ORDER BY date ASC; -``` - -これはSQLコンソールで結果を視覚化するためのチャートを作成できるものです: - -1日あたりのイベント数 - -6. このクエリでは、非常に暑く湿度の高い日の数をカウントします: - -```sql -WITH - toYYYYMMDD(timestamp) AS day -SELECT day, count() FROM sensors -WHERE temperature >= 40 AND temperature <= 50 AND humidity >= 90 -GROUP BY day -ORDER BY day asc; -``` - -結果の可視化は次の通りです: - -暑く湿度の高い日々 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md.hash deleted file mode 100644 index 607130c6d1f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/environmental-sensors.md.hash +++ /dev/null @@ -1 +0,0 @@ -5bccaeafb2cc0c9b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md deleted file mode 100644 index a3185d53732..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -description: '地図上の店舗、レストラン、公園、遊び場、記念碑などの情報を含む1億件以上のレコードを持つデータセット。' -sidebar_label: 'Foursquare places' -slug: '/getting-started/example-datasets/foursquare-places' -title: 'Foursquare places' -keywords: -- 'visualizing' ---- - -import Image from '@theme/IdealImage'; -import visualization_1 from '@site/static/images/getting-started/example-datasets/visualization_1.png'; -import visualization_2 from '@site/static/images/getting-started/example-datasets/visualization_2.png'; -import visualization_3 from '@site/static/images/getting-started/example-datasets/visualization_3.png'; -import visualization_4 from '@site/static/images/getting-started/example-datasets/visualization_4.png'; - -## Dataset {#dataset} - -このデータセットは Foursquare によって提供されており、[ダウンロード](https://docs.foursquare.com/data-products/docs/access-fsq-os-places)が可能で、Apache 2.0 ライセンスの下で無料で使用できます。 - -このデータセットには、店舗、レストラン、公園、遊び場、記念碑など、商業的な観光地(POI)の1億件以上のレコードが含まれています。また、カテゴリやソーシャルメディア情報など、これらの場所に関する追加のメタデータも含まれています。 - -## Data exploration {#data-exploration} - -データを探索するために、[`clickhouse-local`](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)という小さなコマンドラインツールを使用します。このツールは、完全な ClickHouse エンジンを提供しますが、ClickHouse Cloud、`clickhouse-client`、または `chDB` を使用することもできます。 - -データが保存されている S3 バケットからデータを選択するには、次のクエリを実行します: - -```sql title="Query" -SELECT * FROM s3('s3://fsq-os-places-us-east-1/release/dt=2025-04-08/places/parquet/*') LIMIT 1 -``` - -```response title="Response" -Row 1: -────── -fsq_place_id: 4e1ef76cae60cd553dec233f -name: @VirginAmerica In-flight Via @Gogo -latitude: 37.62120111687914 -longitude: -122.39003793803701 -address: ᴺᵁᴸᴸ -locality: ᴺᵁᴸᴸ -region: ᴺᵁᴸᴸ -postcode: ᴺᵁᴸᴸ -admin_region: ᴺᵁᴸᴸ -post_town: ᴺᵁᴸᴸ -po_box: ᴺᵁᴸᴸ -country: US -date_created: 2011-07-14 -date_refreshed: 2018-07-05 -date_closed: 2018-07-05 -tel: ᴺᵁᴸᴸ -website: ᴺᵁᴸᴸ -email: ᴺᵁᴸᴸ -facebook_id: ᴺᵁᴸᴸ -instagram: ᴺᵁᴸᴸ -twitter: ᴺᵁᴸᴸ -fsq_category_ids: ['4bf58dd8d48988d1f7931735'] -fsq_category_labels: ['Travel and Transportation > Transport Hub > Airport > Plane'] -placemaker_url: https://foursquare.com/placemakers/review-place/4e1ef76cae60cd553dec233f -geom: �^��a�^@Bσ��� -bbox: (-122.39003793803701,37.62120111687914,-122.39003793803701,37.62120111687914) -``` - -多くのフィールドが `ᴺᵁᴸᴸ` になっているため、より使いやすいデータを取得するために、クエリに追加条件を追加できます: - -```sql title="Query" -SELECT * FROM s3('s3://fsq-os-places-us-east-1/release/dt=2025-04-08/places/parquet/*') - WHERE address IS NOT NULL AND postcode IS NOT NULL AND instagram IS NOT NULL LIMIT 1 -``` - -```response -Row 1: -────── -fsq_place_id: 59b2c754b54618784f259654 -name: Villa 722 -latitude: ᴺᵁᴸᴸ -longitude: ᴺᵁᴸᴸ -address: Gijzenveldstraat 75 -locality: Zutendaal -region: Limburg -postcode: 3690 -admin_region: ᴺᵁᴸᴸ -post_town: ᴺᵁᴸᴸ -po_box: ᴺᵁᴸᴸ -country: ᴺᵁᴸᴸ -date_created: 2017-09-08 -date_refreshed: 2020-01-25 -date_closed: ᴺᵁᴸᴸ -tel: ᴺᵁᴸᴸ -website: https://www.landal.be -email: ᴺᵁᴸᴸ -facebook_id: 522698844570949 -- 522.70 trillion -instagram: landalmooizutendaal -twitter: landalzdl -fsq_category_ids: ['56aa371be4b08b9a8d5734e1'] -fsq_category_labels: ['Travel and Transportation > Lodging > Vacation Rental'] -placemaker_url: https://foursquare.com/placemakers/review-place/59b2c754b54618784f259654 -geom: ᴺᵁᴸᴸ -bbox: (NULL,NULL,NULL,NULL) -``` - -データの自動推定スキーマを表示するには、次のクエリを使用して `DESCRIBE` を実行します: - -```sql title="Query" -DESCRIBE s3('s3://fsq-os-places-us-east-1/release/dt=2025-04-08/places/parquet/*') -``` - -```response title="Response" - ┌─name────────────────┬─type────────────────────────┬ - 1. │ fsq_place_id │ Nullable(String) │ - 2. │ name │ Nullable(String) │ - 3. │ latitude │ Nullable(Float64) │ - 4. │ longitude │ Nullable(Float64) │ - 5. │ address │ Nullable(String) │ - 6. │ locality │ Nullable(String) │ - 7. │ region │ Nullable(String) │ - 8. │ postcode │ Nullable(String) │ - 9. │ admin_region │ Nullable(String) │ -10. │ post_town │ Nullable(String) │ -11. │ po_box │ Nullable(String) │ -12. │ country │ Nullable(String) │ -13. │ date_created │ Nullable(String) │ -14. │ date_refreshed │ Nullable(String) │ -15. │ date_closed │ Nullable(String) │ -16. │ tel │ Nullable(String) │ -17. │ website │ Nullable(String) │ -18. │ email │ Nullable(String) │ -19. │ facebook_id │ Nullable(Int64) │ -20. │ instagram │ Nullable(String) │ -21. │ twitter │ Nullable(String) │ -22. │ fsq_category_ids │ Array(Nullable(String)) │ -23. │ fsq_category_labels │ Array(Nullable(String)) │ -24. │ placemaker_url │ Nullable(String) │ -25. │ geom │ Nullable(String) │ -26. │ bbox │ Tuple( ↴│ - │ │↳ xmin Nullable(Float64),↴│ - │ │↳ ymin Nullable(Float64),↴│ - │ │↳ xmax Nullable(Float64),↴│ - │ │↳ ymax Nullable(Float64)) │ - └─────────────────────┴─────────────────────────────┘ -``` - -## Loading the data into ClickHouse {#loading-the-data} - -ディスク上にデータを永続化したい場合は、`clickhouse-server` または ClickHouse Cloud を使用できます。 - -テーブルを作成するには、次のコマンドを実行してください: - -```sql title="Query" -CREATE TABLE foursquare_mercator -( - fsq_place_id String, - name String, - latitude Float64, - longitude Float64, - address String, - locality String, - region LowCardinality(String), - postcode LowCardinality(String), - admin_region LowCardinality(String), - post_town LowCardinality(String), - po_box LowCardinality(String), - country LowCardinality(String), - date_created Nullable(Date), - date_refreshed Nullable(Date), - date_closed Nullable(Date), - tel String, - website String, - email String, - facebook_id String, - instagram String, - twitter String, - fsq_category_ids Array(String), - fsq_category_labels Array(String), - placemaker_url String, - geom String, - bbox Tuple( - xmin Nullable(Float64), - ymin Nullable(Float64), - xmax Nullable(Float64), - ymax Nullable(Float64) - ), - category LowCardinality(String) ALIAS fsq_category_labels[1], - mercator_x UInt32 MATERIALIZED 0xFFFFFFFF * ((longitude + 180) / 360), - mercator_y UInt32 MATERIALIZED 0xFFFFFFFF * ((1 / 2) - ((log(tan(((latitude + 90) / 360) * pi())) / 2) / pi())), - INDEX idx_x mercator_x TYPE minmax, - INDEX idx_y mercator_y TYPE minmax -) -ORDER BY mortonEncode(mercator_x, mercator_y) -``` - -いくつかのカラムに対して [`LowCardinality`](/sql-reference/data-types/lowcardinality) データ型を使用していることに注意してください。このデータ型は、データ型の内部表現を辞書エンコードに変更します。辞書エンコードされたデータを操作することで、多くのアプリケーションにおいて `SELECT` クエリのパフォーマンスが大幅に向上します。 - -さらに、2つの `UInt32` の `MATERIALIZED` カラム、`mercator_x` および `mercator_y` が作成され、緯度/経度座標を[Web Mercator プロジェクション](https://en.wikipedia.org/wiki/Web_Mercator_projection)にマッピングすることで、地図をタイルに簡単にセグメント化します: - -```sql -mercator_x UInt32 MATERIALIZED 0xFFFFFFFF * ((longitude + 180) / 360), -mercator_y UInt32 MATERIALIZED 0xFFFFFFFF * ((1 / 2) - ((log(tan(((latitude + 90) / 360) * pi())) / 2) / pi())), -``` - -上記の各カラムで何が起こっているのかを分解してみましょう。 - -**mercator_x** - -このカラムは、経度の値を Mercator プロジェクションの X 座標に変換します: - -- `longitude + 180` は経度の範囲を [-180, 180] から [0, 360] にシフトします -- 360 で割ることにより、0 と 1 の間の値に正規化されます -- `0xFFFFFFFF`(最大32ビット符号なし整数の16進数)を掛けることで、この正規化された値を32ビット整数の全範囲にスケールします - -**mercator_y** - -このカラムは、緯度の値を Mercator プロジェクションの Y 座標に変換します: - -- `latitude + 90` は緯度を [-90, 90] から [0, 180] にシフトします -- 360 で割って pi() を掛けることで、三角関数のためにラジアンに変換します -- `log(tan(...))` 部分が Mercator プロジェクションの公式のコアです -- `0xFFFFFFFF` を掛けることで、32ビット整数の全範囲にスケールします - -`MATERIALIZED` を指定すると、ClickHouse はデータを `INSERT` する際にこれらのカラムの値を計算し、`INSERT` ステートメントではこれらのカラムを指定する必要がありません(これらは元のデータスキーマの一部ではありません)。 - -テーブルは `mortonEncode(mercator_x, mercator_y)` によってオーダーされており、これにより `mercator_x`, `mercator_y` の Z-オーダー空間充填曲線が生成され、地理空間クエリのパフォーマンスが大幅に向上します。この Z-オーダー曲線のオーダリングにより、データが物理的に空間的近接性に基づいて整理されます: - -```sql -ORDER BY mortonEncode(mercator_x, mercator_y) -``` - -より高速な検索のために、2つの `minmax` インデックスも作成されます: - -```sql -INDEX idx_x mercator_x TYPE minmax, -INDEX idx_y mercator_y TYPE minmax -``` - -ご覧の通り、ClickHouse はリアルタイムマッピングアプリケーションに必要なすべてを提供しています! - -データをロードするには、次のクエリを実行します: - -```sql -INSERT INTO foursquare_mercator -SELECT * FROM s3('s3://fsq-os-places-us-east-1/release/dt=2025-04-08/places/parquet/*') -``` - -## Visualizing the data {#data-visualization} - -このデータセットで可能なことを確認するには、[adsb.exposed](https://adsb.exposed/?dataset=Places&zoom=5&lat=52.3488&lng=4.9219)をチェックしてください。adsb.exposed は、共同創設者で CTO の Alexey Milovidov が ADS-B(自動依存監視 - ブロードキャスト)フライトデータを視覚化するために最初に構築したもので、これのデータは1000倍の大きさです。会社のハッカソンで、Alexey はこのツールに Foursquare データを追加しました。 - -いくつかのお気に入りの視覚化を以下に示しますので、お楽しみください。 - -Density map of points of interest in Europe - -Sake bars in Japan - -ATMs - -Map of Europe with points of interest categorised by country diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md.hash deleted file mode 100644 index 66707445add..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/foursquare-os-places.md.hash +++ /dev/null @@ -1 +0,0 @@ -4a9a535fcab956c8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md deleted file mode 100644 index 9bb19a2360e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -description: 'Dataset containing all events on GitHub from 2011 to Dec 6 2020, with - a size of 3.1 billion records.' -sidebar_label: 'GitHub Events' -slug: '/getting-started/example-datasets/github-events' -title: 'GitHub Events Dataset' ---- - - - -データセットには、2011年から2020年12月6日までのGitHubのすべてのイベントが含まれており、サイズは31億レコードです。ダウンロードサイズは75 GBで、lz4圧縮を使用してテーブルに格納した場合、ディスク上に最大200 GBのスペースが必要です。 - -完全なデータセットの説明、洞察、ダウンロード手順、およびインタラクティブクエリは[こちら](https://ghe.clickhouse.tech/)に掲載されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md.hash deleted file mode 100644 index 92c83529b02..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github-events.md.hash +++ /dev/null @@ -1 +0,0 @@ -25a25bbd2a2ed6f1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md deleted file mode 100644 index bc313d9c628..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md +++ /dev/null @@ -1,2430 +0,0 @@ ---- -description: 'Dataset containing all of the commits and changes for the ClickHouse - repository' -sidebar_label: 'Github Repo' -sidebar_position: 1 -slug: '/getting-started/example-datasets/github' -title: 'Writing Queries in ClickHouse using GitHub Data' ---- - -import Image from '@theme/IdealImage'; -import superset_github_lines_added_deleted from '@site/static/images/getting-started/example-datasets/superset-github-lines-added-deleted.png' -import superset_commits_authors from '@site/static/images/getting-started/example-datasets/superset-commits-authors.png' -import superset_authors_matrix from '@site/static/images/getting-started/example-datasets/superset-authors-matrix.png' -import superset_authors_matrix_v2 from '@site/static/images/getting-started/example-datasets/superset-authors-matrix_v2.png' - -このデータセットには、ClickHouseリポジトリに関するすべてのコミットと変更が含まれています。これは、ClickHouseに付属しているネイティブな `git-import` ツールを使用して生成できます。 - -生成されたデータは、以下の各テーブルに対して `tsv` ファイルを提供します。 - -- `commits` - 統計付きのコミット。 -- `file_changes` - 各コミットで変更されたファイルと変更に関する情報および統計。 -- `line_changes` - 各コミットの各変更されたファイル内の変更行すべてと、その行の詳細情報、そしてこの行の以前の変更に関する情報。 - -2022年11月8日現在、各TSVは約以下のサイズと行数です: - -- `commits` - 7.8M - 266,051行 -- `file_changes` - 53M - 266,051行 -- `line_changes` - 2.7G - 7,535,157行 -## データの生成 {#generating-the-data} - -これは任意です。我々はデータを自由に配布しています - [データのダウンロードと挿入について](#downloading-and-inserting-the-data)を参照してください。 - -```bash -git clone git@github.com:ClickHouse/ClickHouse.git -cd ClickHouse -clickhouse git-import --skip-paths 'generated\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/' --skip-commits-with-messages '^Merge branch ' -``` - -この作業は、ClickHouseリポジトリのために、約3分かかります(2022年11月8日のMacBook Pro 2021の場合)。 - -利用可能なオプションの完全な一覧は、ツールのネイティブヘルプから取得できます。 - -```bash -clickhouse git-import -h -``` - -このヘルプでは、上記の各テーブルのDDLも提供されます。例えば: - -```sql -CREATE TABLE git.commits -( - hash String, - author LowCardinality(String), - time DateTime, - message String, - files_added UInt32, - files_deleted UInt32, - files_renamed UInt32, - files_modified UInt32, - lines_added UInt32, - lines_deleted UInt32, - hunks_added UInt32, - hunks_removed UInt32, - hunks_changed UInt32 -) ENGINE = MergeTree ORDER BY time; -``` - -**これらのクエリは任意のリポジトリで機能するはずです。調査してフィードバックをお寄せください。** 実行時間に関するいくつかのガイドライン(2022年11月現在): - -- Linux - `~/clickhouse git-import` - 160分 -## データのダウンロードと挿入 {#downloading-and-inserting-the-data} - -以下のデータは、動作環境を再現するために使用できます。あるいは、このデータセットはplay.clickhouse.comで入手可能です - 詳細については[クエリ](#queries)を参照してください。 - -以下のリポジトリに対する生成されたファイルは、以下で見つけることができます: - -- ClickHouse (2022年11月8日) - - https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/commits.tsv.xz - 2.5 MB - - https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/file_changes.tsv.xz - 4.5MB - - https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/line_changes.tsv.xz - 127.4 MB -- Linux (2022年11月8日) - - https://datasets-documentation.s3.amazonaws.com/github/commits/linux/commits.tsv.xz - 44 MB - - https://datasets-documentation.s3.amazonaws.com/github/commits/linux/file_changes.tsv.xz - 467MB - - https://datasets-documentation.s3.amazonaws.com/github/commits/linux/line_changes.tsv.xz - 1.1G - -このデータを挿入するには、次のクエリを実行してデータベースを準備します。 - -```sql -DROP DATABASE IF EXISTS git; -CREATE DATABASE git; - -CREATE TABLE git.commits -( - hash String, - author LowCardinality(String), - time DateTime, - message String, - files_added UInt32, - files_deleted UInt32, - files_renamed UInt32, - files_modified UInt32, - lines_added UInt32, - lines_deleted UInt32, - hunks_added UInt32, - hunks_removed UInt32, - hunks_changed UInt32 -) ENGINE = MergeTree ORDER BY time; - -CREATE TABLE git.file_changes -( - change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6), - path LowCardinality(String), - old_path LowCardinality(String), - file_extension LowCardinality(String), - lines_added UInt32, - lines_deleted UInt32, - hunks_added UInt32, - hunks_removed UInt32, - hunks_changed UInt32, - - commit_hash String, - author LowCardinality(String), - time DateTime, - commit_message String, - commit_files_added UInt32, - commit_files_deleted UInt32, - commit_files_renamed UInt32, - commit_files_modified UInt32, - commit_lines_added UInt32, - commit_lines_deleted UInt32, - commit_hunks_added UInt32, - commit_hunks_removed UInt32, - commit_hunks_changed UInt32 -) ENGINE = MergeTree ORDER BY time; - -CREATE TABLE git.line_changes -( - sign Int8, - line_number_old UInt32, - line_number_new UInt32, - hunk_num UInt32, - hunk_start_line_number_old UInt32, - hunk_start_line_number_new UInt32, - hunk_lines_added UInt32, - hunk_lines_deleted UInt32, - hunk_context LowCardinality(String), - line LowCardinality(String), - indent UInt8, - line_type Enum('Empty' = 0, 'Comment' = 1, 'Punct' = 2, 'Code' = 3), - - prev_commit_hash String, - prev_author LowCardinality(String), - prev_time DateTime, - - file_change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6), - path LowCardinality(String), - old_path LowCardinality(String), - file_extension LowCardinality(String), - file_lines_added UInt32, - file_lines_deleted UInt32, - file_hunks_added UInt32, - file_hunks_removed UInt32, - file_hunks_changed UInt32, - - commit_hash String, - author LowCardinality(String), - time DateTime, - commit_message String, - commit_files_added UInt32, - commit_files_deleted UInt32, - commit_files_renamed UInt32, - commit_files_modified UInt32, - commit_lines_added UInt32, - commit_lines_deleted UInt32, - commit_hunks_added UInt32, - commit_hunks_removed UInt32, - commit_hunks_changed UInt32 -) ENGINE = MergeTree ORDER BY time; -``` - -データを挿入するには、`INSERT INTO SELECT` と [s3 function](/sql-reference/table-functions/s3)を使用します。例えば、以下ではClickHouseのファイルをそれぞれのテーブルに挿入します: - -*commits* - -```sql -INSERT INTO git.commits SELECT * -FROM s3('https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/commits.tsv.xz', 'TSV', 'hash String,author LowCardinality(String), time DateTime, message String, files_added UInt32, files_deleted UInt32, files_renamed UInt32, files_modified UInt32, lines_added UInt32, lines_deleted UInt32, hunks_added UInt32, hunks_removed UInt32, hunks_changed UInt32') - -0 rows in set. Elapsed: 1.826 sec. Processed 62.78 thousand rows, 8.50 MB (34.39 thousand rows/s., 4.66 MB/s.) -``` - -*file_changes* - -```sql -INSERT INTO git.file_changes SELECT * -FROM s3('https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/file_changes.tsv.xz', 'TSV', 'change_type Enum(\'Add\' = 1, \'Delete\' = 2, \'Modify\' = 3, \'Rename\' = 4, \'Copy\' = 5, \'Type\' = 6), path LowCardinality(String), old_path LowCardinality(String), file_extension LowCardinality(String), lines_added UInt32, lines_deleted UInt32, hunks_added UInt32, hunks_removed UInt32, hunks_changed UInt32, commit_hash String, author LowCardinality(String), time DateTime, commit_message String, commit_files_added UInt32, commit_files_deleted UInt32, commit_files_renamed UInt32, commit_files_modified UInt32, commit_lines_added UInt32, commit_lines_deleted UInt32, commit_hunks_added UInt32, commit_hunks_removed UInt32, commit_hunks_changed UInt32') - -0 rows in set. Elapsed: 2.688 sec. Processed 266.05 thousand rows, 48.30 MB (98.97 thousand rows/s., 17.97 MB/s.) -``` - -*line_changes* - -```sql -INSERT INTO git.line_changes SELECT * -FROM s3('https://datasets-documentation.s3.amazonaws.com/github/commits/clickhouse/line_changes.tsv.xz', 'TSV', ' sign Int8, line_number_old UInt32, line_number_new UInt32, hunk_num UInt32, hunk_start_line_number_old UInt32, hunk_start_line_number_new UInt32, hunk_lines_added UInt32,\n hunk_lines_deleted UInt32, hunk_context LowCardinality(String), line LowCardinality(String), indent UInt8, line_type Enum(\'Empty\' = 0, \'Comment\' = 1, \'Punct\' = 2, \'Code\' = 3), prev_commit_hash String, prev_author LowCardinality(String), prev_time DateTime, file_change_type Enum(\'Add\' = 1, \'Delete\' = 2, \'Modify\' = 3, \'Rename\' = 4, \'Copy\' = 5, \'Type\' = 6),\n path LowCardinality(String), old_path LowCardinality(String), file_extension LowCardinality(String), file_lines_added UInt32, file_lines_deleted UInt32, file_hunks_added UInt32, file_hunks_removed UInt32, file_hunks_changed UInt32, commit_hash String,\n author LowCardinality(String), time DateTime, commit_message String, commit_files_added UInt32, commit_files_deleted UInt32, commit_files_renamed UInt32, commit_files_modified UInt32, commit_lines_added UInt32, commit_lines_deleted UInt32, commit_hunks_added UInt32, commit_hunks_removed UInt32, commit_hunks_changed UInt32') - -0 rows in set. Elapsed: 50.535 sec. Processed 7.54 million rows, 2.09 GB (149.11 thousand rows/s., 41.40 MB/s.) -``` -## クエリ {#queries} - -ツールは、そのヘルプ出力を介していくつかのクエリを提案しています。我々は、これらに加え、いくつかの追加の補足的な質問に対しても回答しました。これらのクエリは、ツールの任意の順序に対して、約増加する複雑さで構成されています。 - -このデータセットは、`git_clickhouse` データベース内で [play.clickhouse.com](https://sql.clickhouse.com?query_id=DCQPNPAIMAQXRLHYURLKVJ) で利用可能です。すべてのクエリに対してこの環境へのリンクを提供し、必要に応じてデータベース名を適応しています。データ収集の時期の違いにより、プレイ結果はここに示されているものと異なる場合がありますのでご注意ください。 -### 単一ファイルの履歴 {#history-of-a-single-file} - -最もシンプルなクエリです。ここでは `StorageReplicatedMergeTree.cpp` のすべてのコミットメッセージを見ていきます。これらはおそらくもっと興味深いので、最近のメッセージから順に並べ替えます。 - -[play](https://sql.clickhouse.com?query_id=COAZRFX2YFULDBXRQTCQ1S) - -```sql -SELECT - time, - substring(commit_hash, 1, 11) AS commit, - change_type, - author, - path, - old_path, - lines_added, - lines_deleted, - commit_message -FROM git.file_changes -WHERE path = 'src/Storages/StorageReplicatedMergeTree.cpp' -ORDER BY time DESC -LIMIT 10 - -┌────────────────time─┬─commit──────┬─change_type─┬─author─────────────┬─path────────────────────────────────────────┬─old_path─┬─lines_added─┬─lines_deleted─┬─commit_message───────────────────────────────────┐ -│ 2022-10-30 16:30:51 │ c68ab231f91 │ Modify │ Alexander Tokmakov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 13 │ 10 │ fix accessing part in Deleting state │ -│ 2022-10-23 16:24:20 │ b40d9200d20 │ Modify │ Anton Popov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 28 │ 30 │ better semantic of constsness of DataPartStorage │ -│ 2022-10-23 01:23:15 │ 56e5daba0c9 │ Modify │ Anton Popov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 28 │ 44 │ remove DataPartStorageBuilder │ -│ 2022-10-21 13:35:37 │ 851f556d65a │ Modify │ Igor Nikonov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 3 │ 2 │ Remove unused parameter │ -│ 2022-10-21 13:02:52 │ 13d31eefbc3 │ Modify │ Igor Nikonov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 4 │ 4 │ Replicated merge tree polishing │ -│ 2022-10-21 12:25:19 │ 4e76629aafc │ Modify │ Azat Khuzhin │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 3 │ 2 │ Fixes for -Wshorten-64-to-32 │ -│ 2022-10-19 13:59:28 │ 05e6b94b541 │ Modify │ Antonio Andelic │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 4 │ 0 │ Polishing │ -│ 2022-10-19 13:34:20 │ e5408aac991 │ Modify │ Antonio Andelic │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 3 │ 53 │ Simplify logic │ -│ 2022-10-18 15:36:11 │ 7befe2825c9 │ Modify │ Alexey Milovidov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 2 │ 2 │ Update StorageReplicatedMergeTree.cpp │ -│ 2022-10-18 15:35:44 │ 0623ad4e374 │ Modify │ Alexey Milovidov │ src/Storages/StorageReplicatedMergeTree.cpp │ │ 1 │ 1 │ Update StorageReplicatedMergeTree.cpp │ -└─────────────────────┴─────────────┴─────────────┴────────────────────┴─────────────────────────────────────────────┴──────────┴─────────────┴───────────────┴──────────────────────────────────────────────────┘ - -10 rows in set. Elapsed: 0.006 sec. Processed 12.10 thousand rows, 1.60 MB (1.93 million rows/s., 255.40 MB/s.) -``` - -リネームイベントの前に存在したファイルの変更を表示しないことにより、リネームを除外して行の変更を確認することもできます: - -[play](https://sql.clickhouse.com?query_id=AKS9SYLARFMZCHGAAQNEBN) - -```sql -SELECT - time, - substring(commit_hash, 1, 11) AS commit, - sign, - line_number_old, - line_number_new, - author, - line -FROM git.line_changes -WHERE path = 'src/Storages/StorageReplicatedMergeTree.cpp' -ORDER BY line_number_new ASC -LIMIT 10 - -┌────────────────time─┬─commit──────┬─sign─┬─line_number_old─┬─line_number_new─┬─author───────────┬─line──────────────────────────────────────────────────┐ -│ 2020-04-16 02:06:10 │ cdeda4ab915 │ -1 │ 1 │ 1 │ Alexey Milovidov │ #include │ -│ 2020-04-16 02:06:10 │ cdeda4ab915 │ 1 │ 2 │ 1 │ Alexey Milovidov │ #include │ -│ 2020-04-16 02:06:10 │ cdeda4ab915 │ 1 │ 2 │ 2 │ Alexey Milovidov │ │ -│ 2021-05-03 23:46:51 │ 02ce9cc7254 │ -1 │ 3 │ 2 │ Alexey Milovidov │ #include │ -│ 2021-05-27 22:21:02 │ e2f29b9df02 │ -1 │ 3 │ 2 │ s-kat │ #include │ -│ 2022-10-03 22:30:50 │ 210882b9c4d │ 1 │ 2 │ 3 │ alesapin │ #include │ -│ 2022-10-23 16:24:20 │ b40d9200d20 │ 1 │ 2 │ 3 │ Anton Popov │ #include │ -│ 2021-06-20 09:24:43 │ 4c391f8e994 │ 1 │ 2 │ 3 │ Mike Kot │ #include "Common/hex.h" │ -│ 2021-12-29 09:18:56 │ 8112a712336 │ -1 │ 6 │ 5 │ avogar │ #include │ -│ 2022-04-21 20:19:13 │ 9133e398b8c │ 1 │ 11 │ 12 │ Nikolai Kochetov │ #include │ -└─────────────────────┴─────────────┴──────┴─────────────────┴─────────────────┴──────────────────┴───────────────────────────────────────────────────────┘ - -このクエリには、[行ごとのコミット履歴](#line-by-line-commit-history-of-a-file)を考慮に入れたより複雑なバリアントが存在することに注意してください。 -### 現在のアクティブファイルの検索 {#find-the-current-active-files} - -これは、リポジトリ内の現在のファイルのみを考慮したい後の分析に重要です。このセットは、リネームまたは削除されず(その後再追加/リネームされない)ファイルと見なされるファイルとして推定します。 - -**ファイルのリネームに関しては、`dbms`、`libs`、`tests/testflows/` ディレクトリ内で壊れたコミット履歴があったように見受けられます。したがって、それらも除外します。** - -[play](https://sql.clickhouse.com?query_id=2HNFWPCFWEEY92WTAPMA7W) - -```sql -SELECT path -FROM -( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path -) -GROUP BY path -HAVING (argMax(change_type, last_time) != 2) AND NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)') ORDER BY path -LIMIT 10 - -┌─path────────────────────────────────────────────────────────────┐ -│ tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh │ -│ tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh │ -│ tests/performance/file_table_function.xml │ -│ tests/queries/0_stateless/01902_self_aliases_in_columns.sql │ -│ tests/queries/0_stateless/01070_h3_get_base_cell.reference │ -│ src/Functions/ztest.cpp │ -│ src/Interpreters/InterpreterShowTablesQuery.h │ -│ src/Parsers/Kusto/ParserKQLStatement.h │ -│ tests/queries/0_stateless/00938_dataset_test.sql │ -│ src/Dictionaries/Embedded/GeodataProviders/Types.h │ -└─────────────────────────────────────────────────────────────────┘ - -10 rows in set. Elapsed: 0.085 sec. Processed 532.10 thousand rows, 8.68 MB (6.30 million rows/s., 102.64 MB/s.) -``` - -これは、ファイルがリネームされ、その後元の値に再リネームされることも許可します。まず、リネームの結果として削除されたファイルのリストのために `old_path` を集約します。このリストを最後の操作を持つすべての `path` で UNION します。最後に、最終イベントが `Delete` でないリストをフィルタリングします。 - -[play](https://sql.clickhouse.com?query_id=1OXCKMOH2JVMSHD3NS2WW6) - -```sql -SELECT uniq(path) -FROM -( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)') ORDER BY path -) - -┌─uniq(path)─┐ -│ 18559 │ -└────────────┘ -1 row in set. Elapsed: 0.089 sec. Processed 532.10 thousand rows, 8.68 MB (6.01 million rows/s., 97.99 MB/s.) -``` - -ここでは、インポート中にいくつかのディレクトリをスキップしました。つまり、 - -`--skip-paths 'generated\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/'` - -このパターンを `git list-files` に適用すると、18155が報告されます。 - -```bash -git ls-files | grep -v -E 'generated\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/' | wc -l - 18155 -``` - -**我々の現在の解決策は、従って現在のファイルの推定値です。** - -ここでの違いは、いくつかの要因によって引き起こされます: - -- リネームは、ファイルへの他の変更と同時に発生する可能性があります。これらはファイル変更の中で別々のイベントとしてリストされていますが、同じ時間で行われます。`argMax`関数はそれらを区別する方法を持っていないため、最初の値を選択します。挿入の自然順序(正しい順序を知る唯一の手段)は、union全体で維持されないため、修正イベントが選択される可能性があります。例えば、以下の `src/Functions/geometryFromColumn.h`ファイルは、`src/Functions/geometryConverters.h`にリネームされる前に複数の修正が行われています。我々の現在の解決策はModifyイベントを選択し、`src/Functions/geometryFromColumn.h`を保持することになります。 - -[play](https://sql.clickhouse.com?query_id=SCXWMR9GBMJ9UNZYQXQBFA) - -```sql - SELECT - change_type, - path, - old_path, - time, - commit_hash - FROM git.file_changes - WHERE (path = 'src/Functions/geometryFromColumn.h') OR (old_path = 'src/Functions/geometryFromColumn.h') - - ┌─change_type─┬─path───────────────────────────────┬─old_path───────────────────────────┬────────────────time─┬─commit_hash──────────────────────────────┐ - │ Add │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 9376b676e9a9bb8911b872e1887da85a45f7479d │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 6d59be5ea4768034f6526f7f9813062e0c369f7b │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 33acc2aa5dc091a7cb948f78c558529789b2bad8 │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 78e0db268ceadc42f82bc63a77ee1a4da6002463 │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 14a891057d292a164c4179bfddaef45a74eaf83a │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ d0d6e6953c2a2af9fb2300921ff96b9362f22edb │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ fe8382521139a58c0ba277eb848e88894658db66 │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ 3be3d5cde8788165bc0558f1e2a22568311c3103 │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ afad9bf4d0a55ed52a3f55483bc0973456e10a56 │ - │ Modify │ src/Functions/geometryFromColumn.h │ │ 2021-03-11 12:08:16 │ e3290ecc78ca3ea82b49ebcda22b5d3a4df154e6 │ - │ Rename │ src/Functions/geometryConverters.h │ src/Functions/geometryFromColumn.h │ 2021-03-11 12:08:16 │ 125945769586baf6ffd15919b29565b1b2a63218 │ - └─────────────┴────────────────────────────────────┴────────────────────────────────────┴─────────────────────┴──────────────────────────────────────────┘ - 11 rows in set. Elapsed: 0.030 sec. Processed 266.05 thousand rows, 6.61 MB (8.89 million rows/s., 220.82 MB/s.) -``` -- 壊れたコミット履歴 - 削除イベントが欠落しています。ソースと原因は未定です。 - -これらの違いは、当社の分析に有意義な影響を与えるべきではありません。**このクエリの改善版を歓迎します。** -### 変更回数が最も多いファイルのリスト {#list-files-with-most-modifications} - -現在のファイルに限定し、削除と追加の合計として変更回数を考慮します。 - -[play](https://sql.clickhouse.com?query_id=MHXPSBNPTDMJYR3OYSXVR7) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - sum(lines_added) + sum(lines_deleted) AS modifications -FROM git.file_changes -WHERE (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) -GROUP BY path -ORDER BY modifications DESC -LIMIT 10 - -┌─path───────────────────────────────────────────────────┬─modifications─┐ -│ src/Storages/StorageReplicatedMergeTree.cpp │ 21871 │ -│ src/Storages/MergeTree/MergeTreeData.cpp │ 17709 │ -│ programs/client/Client.cpp │ 15882 │ -│ src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp │ 14249 │ -│ src/Interpreters/InterpreterSelectQuery.cpp │ 12636 │ -│ src/Parsers/ExpressionListParsers.cpp │ 11794 │ -│ src/Analyzer/QueryAnalysisPass.cpp │ 11760 │ -│ src/Coordination/KeeperStorage.cpp │ 10225 │ -│ src/Functions/FunctionsConversion.h │ 9247 │ -│ src/Parsers/ExpressionElementParsers.cpp │ 8197 │ -└────────────────────────────────────────────────────────┴───────────────┘ - -10 rows in set. Elapsed: 0.134 sec. Processed 798.15 thousand rows, 16.46 MB (5.95 million rows/s., 122.62 MB/s.) -``` -### 通常コミットが行われる曜日はいつですか? {#what-day-of-the-week-do-commits-usually-occur} - -[play](https://sql.clickhouse.com?query_id=GED2STFSYJDRAA59H8RLIV) - -```sql -SELECT - day_of_week, - count() AS c -FROM git.commits -GROUP BY dayOfWeek(time) AS day_of_week - -┌─day_of_week─┬─────c─┐ -│ 1 │ 10575 │ -│ 2 │ 10645 │ -│ 3 │ 10748 │ -│ 4 │ 10944 │ -│ 5 │ 10090 │ -│ 6 │ 4617 │ -│ 7 │ 5166 │ -└─────────────┴───────┘ -7 rows in set. Elapsed: 0.262 sec. Processed 62.78 thousand rows, 251.14 KB (239.73 thousand rows/s., 958.93 KB/s.) -``` - -これは、金曜日に生産性が低下していることに納得がいきます。週末にコードをコミットする人々を見るのは素晴らしいことです!多大な感謝を当社の貢献者に送ります! -### サブディレクトリ/ファイルの履歴 - 行数、コミット数、貢献者数の推移 {#history-of-subdirectoryfile---number-of-lines-commits-and-contributors-over-time} - -フィルタリングされていない場合、大きなクエリ結果が生成され、表示や視覚化が現実的ではない可能性があります。したがって、以下の例では、ファイルまたはサブディレクトリをフィルタリングできるようにします。ここでは、`toStartOfWeek`関数を使用して週ごとにグループ化しています - 必要に応じて調整してください。 - -[play](https://sql.clickhouse.com?query_id=REZRXDVU7CAWT5WKNJSTNY) - -```sql -SELECT - week, - sum(lines_added) AS lines_added, - sum(lines_deleted) AS lines_deleted, - uniq(commit_hash) AS num_commits, - uniq(author) AS authors -FROM git.file_changes -WHERE path LIKE 'src/Storages%' -GROUP BY toStartOfWeek(time) AS week -ORDER BY week ASC -LIMIT 10 - -┌───────week─┬─lines_added─┬─lines_deleted─┬─num_commits─┬─authors─┐ -│ 2020-03-29 │ 49 │ 35 │ 4 │ 3 │ -│ 2020-04-05 │ 940 │ 601 │ 55 │ 14 │ -│ 2020-04-12 │ 1472 │ 607 │ 32 │ 11 │ -│ 2020-04-19 │ 917 │ 841 │ 39 │ 12 │ -│ 2020-04-26 │ 1067 │ 626 │ 36 │ 10 │ -│ 2020-05-03 │ 514 │ 435 │ 27 │ 10 │ -│ 2020-05-10 │ 2552 │ 537 │ 48 │ 12 │ -│ 2020-05-17 │ 3585 │ 1913 │ 83 │ 9 │ -│ 2020-05-24 │ 2851 │ 1812 │ 74 │ 18 │ -│ 2020-05-31 │ 2771 │ 2077 │ 77 │ 16 │ -└────────────┴─────────────┴───────────────┴─────────────┴─────────┘ -10 rows in set. Elapsed: 0.043 sec. Processed 266.05 thousand rows, 15.85 MB (6.12 million rows/s., 364.61 MB/s.) -``` - -このデータは視覚化に適しています。以下ではSupersetを使用します。 - -**追加および削除された行について:** - -追加および削除された行について - -**コミットと作者について:** - -コミットと作者について -### 最大の著者数を持つファイルのリスト {#list-files-with-maximum-number-of-authors} - -現在のファイルのみに制限しています。 - -[play](https://sql.clickhouse.com?query_id=CYQFNQNK9TAMPU2OZ8KG5Y) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - uniq(author) AS num_authors -FROM git.file_changes -WHERE path IN (current_files) -GROUP BY path -ORDER BY num_authors DESC -LIMIT 10 - -┌─path────────────────────────────────────────┬─num_authors─┐ -│ src/Core/Settings.h │ 127 │ -│ CMakeLists.txt │ 96 │ -│ .gitmodules │ 85 │ -│ src/Storages/MergeTree/MergeTreeData.cpp │ 72 │ -│ src/CMakeLists.txt │ 71 │ -│ programs/server/Server.cpp │ 70 │ -│ src/Interpreters/Context.cpp │ 64 │ -│ src/Storages/StorageReplicatedMergeTree.cpp │ 63 │ -│ src/Common/ErrorCodes.cpp │ 61 │ -│ src/Interpreters/InterpreterSelectQuery.cpp │ 59 │ -└─────────────────────────────────────────────┴─────────────┘ - -10 rows in set. Elapsed: 0.239 sec. Processed 798.15 thousand rows, 14.13 MB (3.35 million rows/s., 59.22 MB/s.) -``` -### リポジトリ内の最古のコード行 {#oldest-lines-of-code-in-the-repository} - -現在のファイルのみに制限されています。 - -[play](https://sql.clickhouse.com?query_id=VWPBPGRZVGTHOCQYWNQZNT) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - any(path) AS file_path, - line, - max(time) AS latest_change, - any(file_change_type) -FROM git.line_changes -WHERE path IN (current_files) -GROUP BY line -ORDER BY latest_change ASC -LIMIT 10 - -┌─file_path───────────────────────────────────┬─line────────────────────────────────────────────────────────┬───────latest_change─┬─any(file_change_type)─┐ -│ utils/compressor/test.sh │ ./compressor -d < compressor.snp > compressor2 │ 2011-06-17 22:19:39 │ Modify │ -│ utils/compressor/test.sh │ ./compressor < compressor > compressor.snp │ 2011-06-17 22:19:39 │ Modify │ -│ utils/compressor/test.sh │ ./compressor -d < compressor.qlz > compressor2 │ 2014-02-24 03:14:30 │ Add │ -│ utils/compressor/test.sh │ ./compressor < compressor > compressor.qlz │ 2014-02-24 03:14:30 │ Add │ -│ utils/config-processor/config-processor.cpp │ if (argc != 2) │ 2014-02-26 19:10:00 │ Add │ -│ utils/config-processor/config-processor.cpp │ std::cerr << "std::exception: " << e.what() << std::endl; │ 2014-02-26 19:10:00 │ Add │ -│ utils/config-processor/config-processor.cpp │ std::cerr << "Exception: " << e.displayText() << std::endl; │ 2014-02-26 19:10:00 │ Add │ -│ utils/config-processor/config-processor.cpp │ Poco::XML::DOMWriter().writeNode(std::cout, document); │ 2014-02-26 19:10:00 │ Add │ -│ utils/config-processor/config-processor.cpp │ std::cerr << "Some exception" << std::endl; │ 2014-02-26 19:10:00 │ Add │ -│ utils/config-processor/config-processor.cpp │ std::cerr << "usage: " << argv[0] << " path" << std::endl; │ 2014-02-26 19:10:00 │ Add │ -└─────────────────────────────────────────────┴─────────────────────────────────────────────────────────────┴─────────────────────┴───────────────────────┘ - -10 rows in set. Elapsed: 1.101 sec. Processed 8.07 million rows, 905.86 MB (7.33 million rows/s., 823.13 MB/s.) -``` -### 最も長い履歴を持つファイル {#files-with-longest-history} - -現在のファイルのみに制限されています。 - -[play](https://sql.clickhouse.com?query_id=VWPBPGRZVGTHOCQYWNQZNT) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - count() AS c, - path, - max(time) AS latest_change -FROM git.file_changes -WHERE path IN (current_files) -GROUP BY path -ORDER BY c DESC -LIMIT 10 - -┌───c─┬─path────────────────────────────────────────┬───────latest_change─┐ -│ 790 │ src/Storages/StorageReplicatedMergeTree.cpp │ 2022-10-30 16:30:51 │ -│ 788 │ src/Storages/MergeTree/MergeTreeData.cpp │ 2022-11-04 09:26:44 │ -│ 752 │ src/Core/Settings.h │ 2022-10-25 11:35:25 │ -│ 749 │ CMakeLists.txt │ 2022-10-05 21:00:49 │ -│ 575 │ src/Interpreters/InterpreterSelectQuery.cpp │ 2022-11-01 10:20:10 │ -│ 563 │ CHANGELOG.md │ 2022-10-27 08:19:50 │ -│ 491 │ src/Interpreters/Context.cpp │ 2022-10-25 12:26:29 │ -│ 437 │ programs/server/Server.cpp │ 2022-10-21 12:25:19 │ -│ 375 │ programs/client/Client.cpp │ 2022-11-03 03:16:55 │ -│ 350 │ src/CMakeLists.txt │ 2022-10-24 09:22:37 │ -└─────┴─────────────────────────────────────────────┴─────────────────────┘ - -10 rows in set. Elapsed: 0.124 sec. Processed 798.15 thousand rows, 14.71 MB (6.44 million rows/s., 118.61 MB/s.) -``` - -私たちのコアデータ構造である Merge Tree は、常に進化し続けており、長い編集の歴史を持っています! -### ドキュメントとコードに対する寄稿者の分布 {#distribution-of-contributors-with-respect-to-docs-and-code-over-the-month} - -**データ取得中に `docs/` フォルダの変更が非常にコミットの汚れた履歴のためにフィルタリングされました。このクエリの結果は正確ではありません。** - -私たちはリリース日周辺など、特定の時期にドキュメントを書くことが多いのでしょうか? `countIf` 関数を利用して簡単な比率を算出し、`bar` 関数を使って結果を視覚化できます。 - -[play](https://sql.clickhouse.com?query_id=BA4RZUXUHNQBH9YK7F2T9J) - -```sql -SELECT - day, - bar(docs_ratio * 1000, 0, 100, 100) AS bar -FROM -( - SELECT - day, - countIf(file_extension IN ('h', 'cpp', 'sql')) AS code, - countIf(file_extension = 'md') AS docs, - docs / (code + docs) AS docs_ratio - FROM git.line_changes - WHERE (sign = 1) AND (file_extension IN ('h', 'cpp', 'sql', 'md')) - GROUP BY dayOfMonth(time) AS day -) - -┌─day─┬─bar─────────────────────────────────────────────────────────────┐ -│ 1 │ ███████████████████████████████████▍ │ -│ 2 │ ███████████████████████▋ │ -│ 3 │ ████████████████████████████████▋ │ -│ 4 │ █████████████ │ -│ 5 │ █████████████████████▎ │ -│ 6 │ ████████ │ -│ 7 │ ███▋ │ -│ 8 │ ████████▌ │ -│ 9 │ ██████████████▎ │ -│ 10 │ █████████████████▏ │ -│ 11 │ █████████████▎ │ -│ 12 │ ███████████████████████████████████▋ │ -│ 13 │ █████████████████████████████▎ │ -│ 14 │ ██████▋ │ -│ 15 │ █████████████████████████████████████████▊ │ -│ 16 │ ██████████▎ │ -│ 17 │ ██████████████████████████████████████▋ │ -│ 18 │ █████████████████████████████████▌ │ -│ 19 │ ███████████ │ -│ 20 │ █████████████████████████████████▊ │ -│ 21 │ █████ │ -│ 22 │ ███████████████████████▋ │ -│ 23 │ ███████████████████████████▌ │ -│ 24 │ ███████▌ │ -│ 25 │ ██████████████████████████████████▎ │ -│ 26 │ ███████████▏ │ -│ 27 │ ███████████████████████████████████████████████████████████████ │ -│ 28 │ ████████████████████████████████████████████████████▏ │ -│ 29 │ ███▌ │ -│ 30 │ ████████████████████████████████████████▎ │ -│ 31 │ █████████████████████████████████▏ │ -└─────┴─────────────────────────────────────────────────────────────────┘ - -31 rows in set. Elapsed: 0.043 sec. Processed 7.54 million rows, 40.53 MB (176.71 million rows/s., 950.40 MB/s.) -``` - -月の終わりに近づくにつれて少し多くなるかもしれませんが、全体として良好な分配を維持しています。再度、これはデータ挿入中のドキュメントフィルタのフィルタリングによるため不正確です。 -### 最も多様な影響を与える著者 {#authors-with-the-most-diverse-impact} - -ここでの多様性は、著者が寄稿したユニークなファイルの数を示します。 - -[play](https://sql.clickhouse.com?query_id=MT8WBABUKYBYSBA78W5TML) - -```sql -SELECT - author, - uniq(path) AS num_files -FROM git.file_changes -WHERE (change_type IN ('Add', 'Modify')) AND (file_extension IN ('h', 'cpp', 'sql')) -GROUP BY author -ORDER BY num_files DESC -LIMIT 10 - -┌─author─────────────┬─num_files─┐ -│ Alexey Milovidov │ 8433 │ -│ Nikolai Kochetov │ 3257 │ -│ Vitaly Baranov │ 2316 │ -│ Maksim Kita │ 2172 │ -│ Azat Khuzhin │ 1988 │ -│ alesapin │ 1818 │ -│ Alexander Tokmakov │ 1751 │ -│ Amos Bird │ 1641 │ -│ Ivan │ 1629 │ -│ alexey-milovidov │ 1581 │ -└────────────────────┴───────────┘ - -10 rows in set. Elapsed: 0.041 sec. Processed 266.05 thousand rows, 4.92 MB (6.56 million rows/s., 121.21 MB/s.) -``` - -最近の作業において最も多様なコミットを持つ人を見てみましょう。日付で制限するのではなく、著者の最後の N 回のコミットに制限します(この場合、3 を使用しましたが、変更も自由です)。 - -[play](https://sql.clickhouse.com?query_id=4Q3D67FWRIVWTY8EIDDE5U) - -```sql -SELECT - author, - sum(num_files_commit) AS num_files -FROM -( - SELECT - author, - commit_hash, - uniq(path) AS num_files_commit, - max(time) AS commit_time - FROM git.file_changes - WHERE (change_type IN ('Add', 'Modify')) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY - author, - commit_hash - ORDER BY - author ASC, - commit_time DESC - LIMIT 3 BY author -) -GROUP BY author -ORDER BY num_files DESC -LIMIT 10 - -┌─author───────────────┬─num_files─┐ -│ Mikhail │ 782 │ -│ Li Yin │ 553 │ -│ Roman Peshkurov │ 119 │ -│ Vladimir Smirnov │ 88 │ -│ f1yegor │ 65 │ -│ maiha │ 54 │ -│ Vitaliy Lyudvichenko │ 53 │ -│ Pradeep Chhetri │ 40 │ -│ Orivej Desh │ 38 │ -│ liyang │ 36 │ -└──────────────────────┴───────────┘ - -10 rows in set. Elapsed: 0.106 sec. Processed 266.05 thousand rows, 21.04 MB (2.52 million rows/s., 198.93 MB/s.) -``` -### 著者のお気に入りのファイル {#favorite-files-for-an-author} - -ここでは、私たちの創設者である [Alexey Milovidov](https://github.com/alexey-milovidov) を選択し、分析を現在のファイルに制限します。 - -[play](https://sql.clickhouse.com?query_id=OKGZBACRHVGCRAGCZAJKMF) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - count() AS c -FROM git.file_changes -WHERE (author = 'Alexey Milovidov') AND (path IN (current_files)) -GROUP BY path -ORDER BY c DESC -LIMIT 10 - -┌─path────────────────────────────────────────┬───c─┐ -│ CMakeLists.txt │ 165 │ -│ CHANGELOG.md │ 126 │ -│ programs/server/Server.cpp │ 73 │ -│ src/Storages/MergeTree/MergeTreeData.cpp │ 71 │ -│ src/Storages/StorageReplicatedMergeTree.cpp │ 68 │ -│ src/Core/Settings.h │ 65 │ -│ programs/client/Client.cpp │ 57 │ -│ programs/server/play.html │ 48 │ -│ .gitmodules │ 47 │ -│ programs/install/Install.cpp │ 37 │ -└─────────────────────────────────────────────┴─────┘ - -10 rows in set. Elapsed: 0.106 sec. Processed 798.15 thousand rows, 13.97 MB (7.51 million rows/s., 131.41 MB/s.) -``` - -これは、Alexeyが変更履歴を維持する責任を持っているため、理にかなっています。しかし、ファイルの基本名を使用して人気のファイルを識別する場合はどうでしょうか - これにより、名前変更を許可し、コードの貢献に焦点を当てることができます。 - -[play](https://sql.clickhouse.com?query_id=P9PBDZGOSVTKXEXU73ZNAJ) - -```sql -SELECT - base, - count() AS c -FROM git.file_changes -WHERE (author = 'Alexey Milovidov') AND (file_extension IN ('h', 'cpp', 'sql')) -GROUP BY basename(path) AS base -ORDER BY c DESC -LIMIT 10 - -┌─base───────────────────────────┬───c─┐ -│ StorageReplicatedMergeTree.cpp │ 393 │ -│ InterpreterSelectQuery.cpp │ 299 │ -│ Aggregator.cpp │ 297 │ -│ Client.cpp │ 280 │ -│ MergeTreeData.cpp │ 274 │ -│ Server.cpp │ 264 │ -│ ExpressionAnalyzer.cpp │ 259 │ -│ StorageMergeTree.cpp │ 239 │ -│ Settings.h │ 225 │ -│ TCPHandler.cpp │ 205 │ -└────────────────────────────────┴─────┘ -10 rows in set. Elapsed: 0.032 sec. Processed 266.05 thousand rows, 5.68 MB (8.22 million rows/s., 175.50 MB/s.) -``` - -これは、彼の関心のある分野をより反映しているかもしれません。 -### 著者数が最も少ない最大のファイル {#largest-files-with-lowest-number-of-authors} - -これには、まず最大のファイルを特定する必要があります。すべてのファイルの完全なファイル再構築による推定は非常に高価です! - -現在のファイルに制限すると仮定して、行の追加を合計し、削除を引き算して推定できます。それから、長さと著者数の比率を計算できます。 - -[play](https://sql.clickhouse.com?query_id=PVSDOHZYUMRDDUZFEYJC7J) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - sum(lines_added) - sum(lines_deleted) AS num_lines, - uniqExact(author) AS num_authors, - num_lines / num_authors AS lines_author_ratio -FROM git.file_changes -WHERE path IN (current_files) -GROUP BY path -ORDER BY lines_author_ratio DESC -LIMIT 10 - -┌─path──────────────────────────────────────────────────────────────────┬─num_lines─┬─num_authors─┬─lines_author_ratio─┐ -│ src/Common/ClassificationDictionaries/emotional_dictionary_rus.txt │ 148590 │ 1 │ 148590 │ -│ src/Functions/ClassificationDictionaries/emotional_dictionary_rus.txt │ 55533 │ 1 │ 55533 │ -│ src/Functions/ClassificationDictionaries/charset_freq.txt │ 35722 │ 1 │ 35722 │ -│ src/Common/ClassificationDictionaries/charset_freq.txt │ 35722 │ 1 │ 35722 │ -│ tests/integration/test_storage_meilisearch/movies.json │ 19549 │ 1 │ 19549 │ -│ tests/queries/0_stateless/02364_multiSearch_function_family.reference │ 12874 │ 1 │ 12874 │ -│ src/Functions/ClassificationDictionaries/programming_freq.txt │ 9434 │ 1 │ 9434 │ -│ src/Common/ClassificationDictionaries/programming_freq.txt │ 9434 │ 1 │ 9434 │ -│ tests/performance/explain_ast.xml │ 5911 │ 1 │ 5911 │ -│ src/Analyzer/QueryAnalysisPass.cpp │ 5686 │ 1 │ 5686 │ -└───────────────────────────────────────────────────────────────────────┴───────────┴─────────────┴────────────────────┘ - -10 rows in set. Elapsed: 0.138 sec. Processed 798.15 thousand rows, 16.57 MB (5.79 million rows/s., 120.11 MB/s.) -``` - -テキスト辞書は現実的でないかもしれないので、ファイル拡張子フィルターを使用してコードのみに制限しましょう! - -[play](https://sql.clickhouse.com?query_id=BZHGWUIZMPZZUHS5XRBK2M) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - sum(lines_added) - sum(lines_deleted) AS num_lines, - uniqExact(author) AS num_authors, - num_lines / num_authors AS lines_author_ratio -FROM git.file_changes -WHERE (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) -GROUP BY path -ORDER BY lines_author_ratio DESC -LIMIT 10 - -┌─path──────────────────────────────────┬─num_lines─┬─num_authors─┬─lines_author_ratio─┐ -│ src/Analyzer/QueryAnalysisPass.cpp │ 5686 │ 1 │ 5686 │ -│ src/Analyzer/QueryTreeBuilder.cpp │ 880 │ 1 │ 880 │ -│ src/Planner/Planner.cpp │ 873 │ 1 │ 873 │ -│ src/Backups/RestorerFromBackup.cpp │ 869 │ 1 │ 869 │ -│ utils/memcpy-bench/FastMemcpy.h │ 770 │ 1 │ 770 │ -│ src/Planner/PlannerActionsVisitor.cpp │ 765 │ 1 │ 765 │ -│ src/Functions/sphinxstemen.cpp │ 728 │ 1 │ 728 │ -│ src/Planner/PlannerJoinTree.cpp │ 708 │ 1 │ 708 │ -│ src/Planner/PlannerJoins.cpp │ 695 │ 1 │ 695 │ -│ src/Analyzer/QueryNode.h │ 607 │ 1 │ 607 │ -└───────────────────────────────────────┴───────────┴─────────────┴────────────────────┘ -10 rows in set. Elapsed: 0.140 sec. Processed 798.15 thousand rows, 16.84 MB (5.70 million rows/s., 120.32 MB/s.) -``` - -これは最近の偏りを持っている - 新しいファイルはコミットの機会が少なくなります。少なくとも1年前のファイルに制限するとどうなるでしょうか? - -[play](https://sql.clickhouse.com?query_id=RMHHZEDHFUCBGRQVQA2732) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - min(time) AS min_date, - path, - sum(lines_added) - sum(lines_deleted) AS num_lines, - uniqExact(author) AS num_authors, - num_lines / num_authors AS lines_author_ratio -FROM git.file_changes -WHERE (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) -GROUP BY path -HAVING min_date <= (now() - toIntervalYear(1)) -ORDER BY lines_author_ratio DESC -LIMIT 10 - -┌────────────min_date─┬─path───────────────────────────────────────────────────────────┬─num_lines─┬─num_authors─┬─lines_author_ratio─┐ -│ 2021-03-08 07:00:54 │ utils/memcpy-bench/FastMemcpy.h │ 770 │ 1 │ 770 │ -│ 2021-05-04 13:47:34 │ src/Functions/sphinxstemen.cpp │ 728 │ 1 │ 728 │ -│ 2021-03-14 16:52:51 │ utils/memcpy-bench/glibc/dwarf2.h │ 592 │ 1 │ 592 │ -│ 2021-03-08 09:04:52 │ utils/memcpy-bench/FastMemcpy_Avx.h │ 496 │ 1 │ 496 │ -│ 2020-10-19 01:10:50 │ tests/queries/0_stateless/01518_nullable_aggregate_states2.sql │ 411 │ 1 │ 411 │ -│ 2020-11-24 14:53:34 │ programs/server/GRPCHandler.cpp │ 399 │ 1 │ 399 │ -│ 2021-03-09 14:10:28 │ src/DataTypes/Serializations/SerializationSparse.cpp │ 363 │ 1 │ 363 │ -│ 2021-08-20 15:06:57 │ src/Functions/vectorFunctions.cpp │ 1327 │ 4 │ 331.75 │ -│ 2020-08-04 03:26:23 │ src/Interpreters/MySQL/CreateQueryConvertVisitor.cpp │ 311 │ 1 │ 311 │ -│ 2020-11-06 15:45:13 │ src/Storages/Rocksdb/StorageEmbeddedRocksdb.cpp │ 611 │ 2 │ 305.5 │ -└─────────────────────┴────────────────────────────────────────────────────────────────┴───────────┴─────────────┴────────────────────┘ - -10 rows in set. Elapsed: 0.143 sec. Processed 798.15 thousand rows, 18.00 MB (5.58 million rows/s., 125.87 MB/s.) -``` -### Commits and lines of code distribution by time; by weekday, by author; for specific subdirectories {#commits-and-lines-of-code-distribution-by-time-by-weekday-by-author-for-specific-subdirectories} - -これを曜日ごとの追加および削除された行数として解釈します。この場合、[Functionsディレクトリ](https://github.com/ClickHouse/ClickHouse/tree/master/src/Functions) に焦点を当てます。 - -[play](https://sql.clickhouse.com?query_id=PF3KEMYG5CVLJGCFYQEGB1) - -```sql -SELECT - dayOfWeek, - uniq(commit_hash) AS commits, - sum(lines_added) AS lines_added, - sum(lines_deleted) AS lines_deleted -FROM git.file_changes -WHERE path LIKE 'src/Functions%' -GROUP BY toDayOfWeek(time) AS dayOfWeek - -┌─dayOfWeek─┬─commits─┬─lines_added─┬─lines_deleted─┐ -│ 1 │ 476 │ 24619 │ 15782 │ -│ 2 │ 434 │ 18098 │ 9938 │ -│ 3 │ 496 │ 26562 │ 20883 │ -│ 4 │ 587 │ 65674 │ 18862 │ -│ 5 │ 504 │ 85917 │ 14518 │ -│ 6 │ 314 │ 13604 │ 10144 │ -│ 7 │ 294 │ 11938 │ 6451 │ -└───────────┴─────────┴─────────────┴───────────────┘ - -7 rows in set. Elapsed: 0.034 sec. Processed 266.05 thousand rows, 14.66 MB (7.73 million rows/s., 425.56 MB/s.) -``` - -そして、時刻別に、 - -[play](https://sql.clickhouse.com?query_id=Q4VDVKEGHHRBCUJHNCVTF1) - -```sql -SELECT - hourOfDay, - uniq(commit_hash) AS commits, - sum(lines_added) AS lines_added, - sum(lines_deleted) AS lines_deleted -FROM git.file_changes -WHERE path LIKE 'src/Functions%' -GROUP BY toHour(time) AS hourOfDay - -┌─hourOfDay─┬─commits─┬─lines_added─┬─lines_deleted─┐ -│ 0 │ 71 │ 4169 │ 3404 │ -│ 1 │ 90 │ 2174 │ 1927 │ -│ 2 │ 65 │ 2343 │ 1515 │ -│ 3 │ 76 │ 2552 │ 493 │ -│ 4 │ 62 │ 1480 │ 1304 │ -│ 5 │ 38 │ 1644 │ 253 │ -│ 6 │ 104 │ 4434 │ 2979 │ -│ 7 │ 117 │ 4171 │ 1678 │ -│ 8 │ 106 │ 4604 │ 4673 │ -│ 9 │ 135 │ 60550 │ 2678 │ -│ 10 │ 149 │ 6133 │ 3482 │ -│ 11 │ 182 │ 8040 │ 3833 │ -│ 12 │ 209 │ 29428 │ 15040 │ -│ 13 │ 187 │ 10204 │ 5491 │ -│ 14 │ 204 │ 9028 │ 6060 │ -│ 15 │ 231 │ 15179 │ 10077 │ -│ 16 │ 196 │ 9568 │ 5925 │ -│ 17 │ 138 │ 4941 │ 3849 │ -│ 18 │ 123 │ 4193 │ 3036 │ -│ 19 │ 165 │ 8817 │ 6646 │ -│ 20 │ 140 │ 3749 │ 2379 │ -│ 21 │ 132 │ 41585 │ 4182 │ -│ 22 │ 85 │ 4094 │ 3955 │ -│ 23 │ 100 │ 3332 │ 1719 │ -└───────────┴─────────┴─────────────┴───────────────┘ - -24 rows in set. Elapsed: 0.039 sec. Processed 266.05 thousand rows, 14.66 MB (6.77 million rows/s., 372.89 MB/s.) -``` - -この分布は、私たちの開発チームのほとんどがアムステルダムにいることを考慮すると納得がいきます。 `bar` 関数がこれらの分布を視覚化するのに役立ちます: - -[play](https://sql.clickhouse.com?query_id=9AZ8CENV8N91YGW7T6IB68) - -```sql -SELECT - hourOfDay, - bar(commits, 0, 400, 50) AS commits, - bar(lines_added, 0, 30000, 50) AS lines_added, - bar(lines_deleted, 0, 15000, 50) AS lines_deleted -FROM -( - SELECT - hourOfDay, - uniq(commit_hash) AS commits, - sum(lines_added) AS lines_added, - sum(lines_deleted) AS lines_deleted - FROM git.file_changes - WHERE path LIKE 'src/Functions%' - GROUP BY toHour(time) AS hourOfDay -) - -┌─hourOfDay─┬─commits───────────────────────┬─lines_added────────────────────────────────────────┬─lines_deleted──────────────────────────────────────┐ -│ 0 │ ████████▊ │ ██████▊ │ ███████████▎ │ -│ 1 │ ███████████▎ │ ███▌ │ ██████▍ │ -│ 2 │ ████████ │ ███▊ │ █████ │ -│ 3 │ █████████▌ │ ████▎ │ █▋ │ -│ 4 │ ███████▋ │ ██▍ │ ████▎ │ -│ 5 │ ████▋ │ ██▋ │ ▋ │ -│ 6 │ █████████████ │ ███████▍ │ █████████▊ │ -│ 7 │ ██████████████▋ │ ██████▊ │ █████▌ │ -│ 8 │ █████████████▎ │ ███████▋ │ ███████████████▌ │ -│ 9 │ ████████████████▊ │ ██████████████████████████████████████████████████ │ ████████▊ │ -│ 10 │ ██████████████████▋ │ ██████████▏ │ ███████████▌ │ -│ 11 │ ██████████████████████▋ │ █████████████▍ │ ████████████▋ │ -│ 12 │ ██████████████████████████ │ █████████████████████████████████████████████████ │ ██████████████████████████████████████████████████ │ -│ 13 │ ███████████████████████▍ │ █████████████████ │ ██████████████████▎ │ -│ 14 │ █████████████████████████▌ │ ███████████████ │ ████████████████████▏ │ -│ 15 │ ████████████████████████████▊ │ █████████████████████████▎ │ █████████████████████████████████▌ │ -│ 16 │ ████████████████████████▌ │ ███████████████▊ │ ███████████████████▋ │ -│ 17 │ █████████████████▎ │ ████████▏ │ ████████████▋ │ -│ 18 │ ███████████████▍ │ ██████▊ │ ██████████ │ -│ 19 │ ████████████████████▋ │ ██████████████▋ │ ██████████████████████▏ │ -│ 20 │ █████████████████▌ │ ██████▏ │ ███████▊ │ -│ 21 │ ████████████████▌ │ ██████████████████████████████████████████████████ │ █████████████▊ │ -│ 22 │ ██████████▋ │ ██████▋ │ █████████████▏ │ -│ 23 │ ████████████▌ │ █████▌ │ █████▋ │ -└───────────┴───────────────────────────────┴────────────────────────────────────────────────────┴────────────────────────────────────────────────────┘ - -24 rows in set. Elapsed: 0.038 sec. Processed 266.05 thousand rows, 14.66 MB (7.09 million rows/s., 390.69 MB/s.) -``` -### Matrix of authors that shows what authors tends to rewrite another authors code {#matrix-of-authors-that-shows-what-authors-tends-to-rewrite-another-authors-code} - -`sign = -1` はコード削除を示します。句読点と空行の追加は除外します。 - -[play](https://sql.clickhouse.com?query_id=448O8GWAHY3EM6ZZ7AGLAM) - -```sql -SELECT - prev_author || '(a)' as add_author, - author || '(d)' as delete_author, - count() AS c -FROM git.line_changes -WHERE (sign = -1) AND (file_extension IN ('h', 'cpp')) AND (line_type NOT IN ('Punct', 'Empty')) AND (author != prev_author) AND (prev_author != '') -GROUP BY - prev_author, - author -ORDER BY c DESC -LIMIT 1 BY prev_author -LIMIT 100 - -┌─prev_author──────────┬─author───────────┬─────c─┐ -│ Ivan │ Alexey Milovidov │ 18554 │ -│ Alexey Arno │ Alexey Milovidov │ 18475 │ -│ Michael Kolupaev │ Alexey Milovidov │ 14135 │ -│ Alexey Milovidov │ Nikolai Kochetov │ 13435 │ -│ Andrey Mironov │ Alexey Milovidov │ 10418 │ -│ proller │ Alexey Milovidov │ 7280 │ -│ Nikolai Kochetov │ Alexey Milovidov │ 6806 │ -│ alexey-milovidov │ Alexey Milovidov │ 5027 │ -│ Vitaliy Lyudvichenko │ Alexey Milovidov │ 4390 │ -│ Amos Bird │ Ivan Lezhankin │ 3125 │ -│ f1yegor │ Alexey Milovidov │ 3119 │ -│ Pavel Kartavyy │ Alexey Milovidov │ 3087 │ -│ Alexey Zatelepin │ Alexey Milovidov │ 2978 │ -│ alesapin │ Alexey Milovidov │ 2949 │ -│ Sergey Fedorov │ Alexey Milovidov │ 2727 │ -│ Ivan Lezhankin │ Alexey Milovidov │ 2618 │ -│ Vasily Nemkov │ Alexey Milovidov │ 2547 │ -│ Alexander Tokmakov │ Alexey Milovidov │ 2493 │ -│ Nikita Vasilev │ Maksim Kita │ 2420 │ -│ Anton Popov │ Amos Bird │ 2127 │ -└──────────────────────┴──────────────────┴───────┘ - -20 rows in set. Elapsed: 0.098 sec. Processed 7.54 million rows, 42.16 MB (76.67 million rows/s., 428.99 MB/s.) -``` - -Sankeyチャート(SuperSet)を使用すると、これをうまく視覚化できます。ここでは、各著者に対して上位3件のコード削除者を取得するために、`LIMIT BY` を3に増やして可視性を向上させます。 - -Superset authors matrix - -Alexeyは他の人のコードを削除するのが明らかに好きです。彼を除外してコード削除のバランスを見ることにしましょう。 - -Superset authors matrix v2 -### Who is the highest percentage contributor per day of week? {#who-is-the-highest-percentage-contributor-per-day-of-week} - -コミットの数で考慮する場合: - -[play](https://sql.clickhouse.com?query_id=WXPKFJCAHOKYKEVTWNFVCY) - -```sql -SELECT - day_of_week, - author, - count() AS c -FROM git.commits -GROUP BY - dayOfWeek(time) AS day_of_week, - author -ORDER BY - day_of_week ASC, - c DESC -LIMIT 1 BY day_of_week - -┌─day_of_week─┬─author───────────┬────c─┐ -│ 1 │ Alexey Milovidov │ 2204 │ -│ 2 │ Alexey Milovidov │ 1588 │ -│ 3 │ Alexey Milovidov │ 1725 │ -│ 4 │ Alexey Milovidov │ 1915 │ -│ 5 │ Alexey Milovidov │ 1940 │ -│ 6 │ Alexey Milovidov │ 1851 │ -│ 7 │ Alexey Milovidov │ 2400 │ -└─────────────┴──────────────────┴──────┘ - -7 rows in set. Elapsed: 0.012 sec. Processed 62.78 thousand rows, 395.47 KB (5.44 million rows/s., 34.27 MB/s.) -``` - -OK、ここには我々の創設者Alexeyによる最も長い貢献者の利点があります。分析を過去1年間に制限しましょう。 - -[play](https://sql.clickhouse.com?query_id=8YRJGHFTNJAWJ96XCJKKEH) - -```sql -SELECT - day_of_week, - author, - count() AS c -FROM git.commits -WHERE time > (now() - toIntervalYear(1)) -GROUP BY - dayOfWeek(time) AS day_of_week, - author -ORDER BY - day_of_week ASC, - c DESC -LIMIT 1 BY day_of_week - -┌─day_of_week─┬─author───────────┬───c─┐ -│ 1 │ Alexey Milovidov │ 198 │ -│ 2 │ alesapin │ 162 │ -│ 3 │ alesapin │ 163 │ -│ 4 │ Azat Khuzhin │ 166 │ -│ 5 │ alesapin │ 191 │ -│ 6 │ Alexey Milovidov │ 179 │ -│ 7 │ Alexey Milovidov │ 243 │ -└─────────────┴──────────────────┴─────┘ - -7 rows in set. Elapsed: 0.004 sec. Processed 21.82 thousand rows, 140.02 KB (4.88 million rows/s., 31.29 MB/s.) -``` - -これはまだ少し単純で、人々の仕事を反映していません。 - -より良い指標は、過去1年間の実行された全作業の割合として毎日最高の貢献者を特定することかもしれません。削除と追加のコードを同様に扱うことに注意してください。 - -[play](https://sql.clickhouse.com?query_id=VQF4KMRDSUEXGS1JFVDJHV) - -```sql -SELECT - top_author.day_of_week, - top_author.author, - top_author.author_work / all_work.total_work AS top_author_percent -FROM -( - SELECT - day_of_week, - author, - sum(lines_added) + sum(lines_deleted) AS author_work - FROM git.file_changes - WHERE time > (now() - toIntervalYear(1)) - GROUP BY - author, - dayOfWeek(time) AS day_of_week - ORDER BY - day_of_week ASC, - author_work DESC - LIMIT 1 BY day_of_week -) AS top_author -INNER JOIN -( - SELECT - day_of_week, - sum(lines_added) + sum(lines_deleted) AS total_work - FROM git.file_changes - WHERE time > (now() - toIntervalYear(1)) - GROUP BY dayOfWeek(time) AS day_of_week -) AS all_work USING (day_of_week) - -┌─day_of_week─┬─author──────────────┬──top_author_percent─┐ -│ 1 │ Alexey Milovidov │ 0.3168282877768332 │ -│ 2 │ Mikhail f. Shiryaev │ 0.3523434231193969 │ -│ 3 │ vdimir │ 0.11859742484577324 │ -│ 4 │ Nikolay Degterinsky │ 0.34577318920318467 │ -│ 5 │ Alexey Milovidov │ 0.13208704423684223 │ -│ 6 │ Alexey Milovidov │ 0.18895257783624633 │ -│ 7 │ Robert Schulze │ 0.3617405888930302 │ -└─────────────┴─────────────────────┴─────────────────────┘ - -7 rows in set. Elapsed: 0.014 sec. Processed 106.12 thousand rows, 1.38 MB (7.61 million rows/s., 98.65 MB/s.) -``` -### Distribution of code age across repository {#distribution-of-code-age-across-repository} - -現在のファイルに分析を制限します。簡潔さのために、結果を深さ2に制限し、ルートフォルダごとに5ファイルを制限します。必要に応じて調整してください。 - -[play](https://sql.clickhouse.com?query_id=6YWAUQYPZINZDJGBEZBNWG) - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - concat(root, '/', sub_folder) AS folder, - round(avg(days_present)) AS avg_age_of_files, - min(days_present) AS min_age_files, - max(days_present) AS max_age_files, - count() AS c -FROM -( - SELECT - path, - dateDiff('day', min(time), toDate('2022-11-03')) AS days_present - FROM git.file_changes - WHERE (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY path -) -GROUP BY - splitByChar('/', path)[1] AS root, - splitByChar('/', path)[2] AS sub_folder -ORDER BY - root ASC, - c DESC -LIMIT 5 BY root - -┌─folder───────────────────────────┬─avg_age_of_files─┬─min_age_files─┬─max_age_files─┬────c─┐ -│ base/base │ 387 │ 201 │ 397 │ 84 │ -│ base/glibc-compatibility │ 887 │ 59 │ 993 │ 19 │ -│ base/consistent-hashing │ 993 │ 993 │ 993 │ 5 │ -│ base/widechar_width │ 993 │ 993 │ 993 │ 2 │ -│ base/consistent-hashing-sumbur │ 993 │ 993 │ 993 │ 2 │ -│ docker/test │ 1043 │ 1043 │ 1043 │ 1 │ -│ programs/odbc-bridge │ 835 │ 91 │ 945 │ 25 │ -│ programs/copier │ 587 │ 14 │ 945 │ 22 │ -│ programs/library-bridge │ 155 │ 47 │ 608 │ 21 │ -│ programs/disks │ 144 │ 62 │ 150 │ 14 │ -│ programs/server │ 874 │ 709 │ 945 │ 10 │ -│ rust/BLAKE3 │ 52 │ 52 │ 52 │ 1 │ -│ src/Functions │ 752 │ 0 │ 944 │ 809 │ -│ src/Storages │ 700 │ 8 │ 944 │ 736 │ -│ src/Interpreters │ 684 │ 3 │ 944 │ 490 │ -│ src/Processors │ 703 │ 44 │ 944 │ 482 │ -│ src/Common │ 673 │ 7 │ 944 │ 473 │ -│ tests/queries │ 674 │ -5 │ 945 │ 3777 │ -│ tests/integration │ 656 │ 132 │ 945 │ 4 │ -│ utils/memcpy-bench │ 601 │ 599 │ 605 │ 10 │ -│ utils/keeper-bench │ 570 │ 569 │ 570 │ 7 │ -│ utils/durability-test │ 793 │ 793 │ 793 │ 4 │ -│ utils/self-extracting-executable │ 143 │ 143 │ 143 │ 3 │ -│ utils/self-extr-exec │ 224 │ 224 │ 224 │ 2 │ -└──────────────────────────────────┴──────────────────┴───────────────┴───────────────┴──────┘ - -24 rows in set. Elapsed: 0.129 sec. Processed 798.15 thousand rows, 15.11 MB (6.19 million rows/s., 117.08 MB/s.) -``` -### What percentage of code for an author has been removed by other authors? {#what-percentage-of-code-for-an-author-has-been-removed-by-other-authors} - -この質問については、著者によって書かれた行数を、他の貢献者によって削除された行数の合計で割ります。 - -[play](https://sql.clickhouse.com?query_id=T4DTWTB36WFSEYAZLMGRNF) - -```sql -SELECT - k, - written_code.c, - removed_code.c, - removed_code.c / written_code.c AS remove_ratio -FROM -( - SELECT - author AS k, - count() AS c - FROM git.line_changes - WHERE (sign = 1) AND (file_extension IN ('h', 'cpp')) AND (line_type NOT IN ('Punct', 'Empty')) - GROUP BY k -) AS written_code -INNER JOIN -( - SELECT - prev_author AS k, - count() AS c - FROM git.line_changes - WHERE (sign = -1) AND (file_extension IN ('h', 'cpp')) AND (line_type NOT IN ('Punct', 'Empty')) AND (author != prev_author) - GROUP BY k -) AS removed_code USING (k) -WHERE written_code.c > 1000 -ORDER BY remove_ratio DESC -LIMIT 10 - -┌─k──────────────────┬─────c─┬─removed_code.c─┬───────remove_ratio─┐ -│ Marek Vavruša │ 1458 │ 1318 │ 0.9039780521262003 │ -│ Ivan │ 32715 │ 27500 │ 0.8405930001528351 │ -│ artpaul │ 3450 │ 2840 │ 0.8231884057971014 │ -│ Silviu Caragea │ 1542 │ 1209 │ 0.7840466926070039 │ -│ Ruslan │ 1027 │ 802 │ 0.7809152872444012 │ -│ Tsarkova Anastasia │ 1755 │ 1364 │ 0.7772079772079772 │ -│ Vyacheslav Alipov │ 3526 │ 2727 │ 0.7733976176971072 │ -│ Marek Vavruša │ 1467 │ 1124 │ 0.7661895023858214 │ -│ f1yegor │ 7194 │ 5213 │ 0.7246316374756742 │ -│ kreuzerkrieg │ 3406 │ 2468 │ 0.724603640634175 │ -└────────────────────┴───────┴────────────────┴────────────────────┘ - -10 rows in set. Elapsed: 0.126 sec. Processed 15.07 million rows, 73.51 MB (119.97 million rows/s., 585.16 MB/s.) -``` -### List files that were rewritten most number of times? {#list-files-that-were-rewritten-most-number-of-times} - -この質問の最も単純なアプローチは、パスごとの行の変更回数を単純にカウントすることかもしれません(現在のファイルに制限されています)。例えば: - -```sql -WITH current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ) -SELECT - path, - count() AS c -FROM git.line_changes -WHERE (file_extension IN ('h', 'cpp', 'sql')) AND (path IN (current_files)) -GROUP BY path -ORDER BY c DESC -LIMIT 10 - -┌─path───────────────────────────────────────────────────┬─────c─┐ -│ src/Storages/StorageReplicatedMergeTree.cpp │ 21871 │ -│ src/Storages/MergeTree/MergeTreeData.cpp │ 17709 │ -│ programs/client/Client.cpp │ 15882 │ -│ src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp │ 14249 │ -│ src/Interpreters/InterpreterSelectQuery.cpp │ 12636 │ -│ src/Parsers/ExpressionListParsers.cpp │ 11794 │ -│ src/Analyzer/QueryAnalysisPass.cpp │ 11760 │ -│ src/Coordination/KeeperStorage.cpp │ 10225 │ -│ src/Functions/FunctionsConversion.h │ 9247 │ -│ src/Parsers/ExpressionElementParsers.cpp │ 8197 │ -└────────────────────────────────────────────────────────┴───────┘ - -10 rows in set. Elapsed: 0.160 sec. Processed 8.07 million rows, 98.99 MB (50.49 million rows/s., 619.49 MB/s.) -``` - -これは「書き換え」の概念を捉えていないことに注意してください。ただし、コミットの中でファイルの大部分が変更される場合です。このためには、より複雑なクエリが必要です。書き換えを、ファイルの50%以上が削除され、50%以上が追加された場合と考えます。このクエリは現在のファイルのみに制限されます。`path`と`commit_hash`でグループ化し、追加された行数と削除された行数を返すことで、ファイル変更をリストします。ウィンドウ関数を用いて、任意の時点でファイルの合計サイズを累積合計で推定し、ファイルサイズへの影響を`行の追加 - 行の削除`として評価します。この統計を使用して、各変更に対して追加されたまたは削除されたファイルの割合を計算できます。最後に、書き換えを構成するファイル変更の回数をカウントします。すなわち`(percent_add >= 0.5) AND (percent_delete >= 0.5) AND current_size > 50`です。ファイルの初期の貢献をカウントを回避するために50行以上である必要があります。これにより、小さなファイルが書き換えられる可能性が高くなるというバイアスも避けます。 - -[play](https://sql.clickhouse.com?query_id=5PL1QLNSH6QQTR8H9HINNP) - -```sql -WITH - current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ), - changes AS - ( - SELECT - path, - max(time) AS max_time, - commit_hash, - any(lines_added) AS num_added, - any(lines_deleted) AS num_deleted, - any(change_type) AS type - FROM git.file_changes - WHERE (change_type IN ('Add', 'Modify')) AND (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY - path, - commit_hash - ORDER BY - path ASC, - max_time ASC - ), - rewrites AS - ( - SELECT - path, - commit_hash, - max_time, - type, - num_added, - num_deleted, - sum(num_added - num_deleted) OVER (PARTITION BY path ORDER BY max_time ASC) AS current_size, - if(current_size > 0, num_added / current_size, 0) AS percent_add, - if(current_size > 0, num_deleted / current_size, 0) AS percent_delete - FROM changes - ) -SELECT - path, - count() AS num_rewrites -FROM rewrites -WHERE (type = 'Modify') AND (percent_add >= 0.5) AND (percent_delete >= 0.5) AND (current_size > 50) -GROUP BY path -ORDER BY num_rewrites DESC -LIMIT 10 - -┌─path──────────────────────────────────────────────────┬─num_rewrites─┐ -│ src/Storages/WindowView/StorageWindowView.cpp │ 8 │ -│ src/Functions/array/arrayIndex.h │ 7 │ -│ src/Dictionaries/CacheDictionary.cpp │ 6 │ -│ src/Dictionaries/RangeHashedDictionary.cpp │ 5 │ -│ programs/client/Client.cpp │ 4 │ -│ src/Functions/polygonPerimeter.cpp │ 4 │ -│ src/Functions/polygonsEquals.cpp │ 4 │ -│ src/Functions/polygonsWithin.cpp │ 4 │ -│ src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp │ 4 │ -│ src/Functions/polygonsSymDifference.cpp │ 4 │ -└───────────────────────────────────────────────────────┴──────────────┘ - -10 rows in set. Elapsed: 0.299 sec. Processed 798.15 thousand rows, 31.52 MB (2.67 million rows/s., 105.29 MB/s.) -``` -### What weekday does the code have the highest chance to stay in the repository? {#what-weekday-does-the-code-have-the-highest-chance-to-stay-in-the-repository} - -これに対しては、コードの行を一意に特定する必要があります。これは(同じ行がファイルに複数回現れる可能性があるため)、パスと行の内容を使用して見積もります。 - -追加された行をクエリし、これは削除された行と結合し、後者が前者よりも最近発生したケースにフィルタリングします。これにより、削除された行が得られ、これら2つのイベント間の時間を計算できます。 - -最後に、週の各曜日に対して行がリポジトリに滞留する平均日数を計算するために、このデータセットを集約します。 - -[play](https://sql.clickhouse.com?query_id=GVF23LEZTNZI22BT8LZBBE) - -```sql -SELECT - day_of_week_added, - count() AS num, - avg(days_present) AS avg_days_present -FROM -( - SELECT - added_code.line, - added_code.time AS added_day, - dateDiff('day', added_code.time, removed_code.time) AS days_present - FROM - ( - SELECT - path, - line, - max(time) AS time - FROM git.line_changes - WHERE (sign = 1) AND (line_type NOT IN ('Punct', 'Empty')) - GROUP BY - path, - line - ) AS added_code - INNER JOIN - ( - SELECT - path, - line, - max(time) AS time - FROM git.line_changes - WHERE (sign = -1) AND (line_type NOT IN ('Punct', 'Empty')) - GROUP BY - path, - line - ) AS removed_code USING (path, line) - WHERE removed_code.time > added_code.time -) -GROUP BY dayOfWeek(added_day) AS day_of_week_added - -┌─day_of_week_added─┬────num─┬───avg_days_present─┐ -│ 1 │ 171879 │ 193.81759260875384 │ -│ 2 │ 141448 │ 153.0931013517335 │ -│ 3 │ 161230 │ 137.61553681076722 │ -│ 4 │ 255728 │ 121.14149799787273 │ -│ 5 │ 203907 │ 141.60181847606998 │ -│ 6 │ 62305 │ 202.43449161383518 │ -│ 7 │ 70904 │ 220.0266134491707 │ -└───────────────────┴────────┴────────────────────┘ - -7 rows in set. Elapsed: 3.965 sec. Processed 15.07 million rows, 1.92 GB (3.80 million rows/s., 483.50 MB/s.) -``` -### 平均コード年齢でソートされたファイル {#files-sorted-by-average-code-age} - -このクエリは、[コードがリポジトリに留まる確率が最も高い曜日は何か](#what-weekday-does-the-code-have-the-highest-chance-to-stay-in-the-repository)という原則と同じです。パスと行内容を使用してコードの行をユニークに特定することを目的としています。これにより、行が追加されてから削除されるまでの時間を特定することができます。ただし、現在のファイルとコードのみにフィルタリングし、行ごとに各ファイルの時間を平均します。 - -[play](https://sql.clickhouse.com?query_id=3CYYT7HEHWRFHVCM9JCKSU) - -```sql -WITH - current_files AS - ( - SELECT path - FROM - ( - SELECT - old_path AS path, - max(time) AS last_time, - 2 AS change_type - FROM git.file_changes - GROUP BY old_path - UNION ALL - SELECT - path, - max(time) AS last_time, - argMax(change_type, time) AS change_type - FROM git.clickhouse_file_changes - GROUP BY path - ) - GROUP BY path - HAVING (argMax(change_type, last_time) != 2) AND (NOT match(path, '(^dbms/)|(^libs/)|(^tests/testflows/)|(^programs/server/store/)')) - ORDER BY path ASC - ), - lines_removed AS - ( - SELECT - added_code.path AS path, - added_code.line, - added_code.time AS added_day, - dateDiff('day', added_code.time, removed_code.time) AS days_present - FROM - ( - SELECT - path, - line, - max(time) AS time, - any(file_extension) AS file_extension - FROM git.line_changes - WHERE (sign = 1) AND (line_type NOT IN ('Punct', 'Empty')) - GROUP BY - path, - line - ) AS added_code - INNER JOIN - ( - SELECT - path, - line, - max(time) AS time - FROM git.line_changes - WHERE (sign = -1) AND (line_type NOT IN ('Punct', 'Empty')) - GROUP BY - path, - line - ) AS removed_code USING (path, line) - WHERE (removed_code.time > added_code.time) AND (path IN (current_files)) AND (file_extension IN ('h', 'cpp', 'sql')) - ) -SELECT - path, - avg(days_present) AS avg_code_age -FROM lines_removed -GROUP BY path -ORDER BY avg_code_age DESC -LIMIT 10 - -┌─path────────────────────────────────────────────────────────────┬──────avg_code_age─┐ -│ utils/corrector_utf8/corrector_utf8.cpp │ 1353.888888888889 │ -│ tests/queries/0_stateless/01288_shard_max_network_bandwidth.sql │ 881 │ -│ src/Functions/replaceRegexpOne.cpp │ 861 │ -│ src/Functions/replaceRegexpAll.cpp │ 861 │ -│ src/Functions/replaceOne.cpp │ 861 │ -│ utils/zookeeper-remove-by-list/main.cpp │ 838.25 │ -│ tests/queries/0_stateless/01356_state_resample.sql │ 819 │ -│ tests/queries/0_stateless/01293_create_role.sql │ 819 │ -│ src/Functions/ReplaceStringImpl.h │ 810 │ -│ src/Interpreters/createBlockSelector.cpp │ 795 │ -└─────────────────────────────────────────────────────────────────┴───────────────────┘ - -10 行の結果が含まれています。経過時間: 3.134 秒。処理された行数: 1613万、サイズ: 1.83 GB (毎秒 515 万行、毎秒 582.99 MB)。 -``` - -### 誰がより多くのテスト / CPP コード / コメントを書く傾向があるか {#who-tends-to-write-more-tests--cpp-code--comments} - -この質問にはいくつかのアプローチがあります。コードとテストの比率に焦点を当てると、このクエリは比較的シンプルです。`tests`を含むフォルダへの貢献の数をカウントし、全体の貢献数に対する比率を計算します。 - -特定の偏りを避けるため、20 回以上の変更を行ったユーザーに限定します。 - -[play](https://sql.clickhouse.com?query_id=JGKZSEQDPDTDKZXD3ZCGLE) - -```sql -SELECT - author, - countIf((file_extension IN ('h', 'cpp', 'sql', 'sh', 'py', 'expect')) AND (path LIKE '%tests%')) AS test, - countIf((file_extension IN ('h', 'cpp', 'sql')) AND (NOT (path LIKE '%tests%'))) AS code, - code / (code + test) AS ratio_code -FROM git.clickhouse_file_changes -GROUP BY author -HAVING code > 20 -ORDER BY code DESC -LIMIT 20 - -┌─author───────────────┬─test─┬──code─┬─────────ratio_code─┐ -│ Alexey Milovidov │ 6617 │ 41799 │ 0.8633303040317251 │ -│ Nikolai Kochetov │ 916 │ 13361 │ 0.9358408629263851 │ -│ alesapin │ 2408 │ 8796 │ 0.785076758300607 │ -│ kssenii │ 869 │ 6769 │ 0.8862267609321812 │ -│ Maksim Kita │ 799 │ 5862 │ 0.8800480408347096 │ -│ Alexander Tokmakov │ 1472 │ 5727 │ 0.7955271565495208 │ -│ Vitaly Baranov │ 1764 │ 5521 │ 0.7578586135895676 │ -│ Ivan Lezhankin │ 843 │ 4698 │ 0.8478613968597726 │ -│ Anton Popov │ 599 │ 4346 │ 0.8788675429726996 │ -│ Ivan │ 2630 │ 4269 │ 0.6187853312074214 │ -│ Azat Khuzhin │ 1664 │ 3697 │ 0.689610147360567 │ -│ Amos Bird │ 400 │ 2901 │ 0.8788245986064829 │ -│ proller │ 1207 │ 2377 │ 0.6632254464285714 │ -│ chertus │ 453 │ 2359 │ 0.8389046941678521 │ -│ alexey-milovidov │ 303 │ 2321 │ 0.8845274390243902 │ -│ Alexey Arno │ 169 │ 2310 │ 0.9318273497377975 │ -│ Vitaliy Lyudvichenko │ 334 │ 2283 │ 0.8723729461215132 │ -│ Robert Schulze │ 182 │ 2196 │ 0.9234650967199327 │ -│ CurtizJ │ 460 │ 2158 │ 0.8242933537051184 │ -│ Alexander Kuzmenkov │ 298 │ 2092 │ 0.8753138075313808 │ -└──────────────────────┴──────┴───────┴────────────────────┘ - -20 行の結果が含まれています。経過時間: 0.034 秒。処理された行数: 26.605万、サイズ: 4.65 MB (毎秒 793 万行、毎秒 138.76 MB)。 -``` - -この分布をヒストグラムとしてプロットすることができます。 - -[play](https://sql.clickhouse.com?query_id=S5AJIIRGSUAY1JXEVHQDAK) - -```sql -WITH ( - SELECT histogram(10)(ratio_code) AS hist - FROM - ( - SELECT - author, - countIf((file_extension IN ('h', 'cpp', 'sql', 'sh', 'py', 'expect')) AND (path LIKE '%tests%')) AS test, - countIf((file_extension IN ('h', 'cpp', 'sql')) AND (NOT (path LIKE '%tests%'))) AS code, - code / (code + test) AS ratio_code - FROM git.clickhouse_file_changes - GROUP BY author - HAVING code > 20 - ORDER BY code DESC - LIMIT 20 - ) - ) AS hist -SELECT - arrayJoin(hist).1 AS lower, - arrayJoin(hist).2 AS upper, - bar(arrayJoin(hist).3, 0, 100, 500) AS bar - -┌──────────────lower─┬──────────────upper─┬─bar───────────────────────────┐ -│ 0.6187853312074214 │ 0.6410053888179964 │ █████ │ -│ 0.6410053888179964 │ 0.6764177968945693 │ █████ │ -│ 0.6764177968945693 │ 0.7237343804750673 │ █████ │ -│ 0.7237343804750673 │ 0.7740802855073157 │ █████▋ │ -│ 0.7740802855073157 │ 0.807297655565091 │ ████████▋ │ -│ 0.807297655565091 │ 0.8338381996094653 │ ██████▎ │ -│ 0.8338381996094653 │ 0.8533566747727687 │ ████████▋ │ -│ 0.8533566747727687 │ 0.871392376017531 │ █████████▍ │ -│ 0.871392376017531 │ 0.904916108899021 │ ████████████████████████████▋ │ -│ 0.904916108899021 │ 0.9358408629263851 │ █████████████████▌ │ -└────────────────────┴────────────────────┴───────────────────────────────┘ - -10 行の結果が含まれています。経過時間: 0.051 秒。処理された行数: 26.605万、サイズ: 4.65 MB (毎秒 524 万行、毎秒 91.64 MB)。 -``` - -ほとんどの貢献者は、予想通り、テストよりも多くのコードを書いています。 - -コードを貢献するときに最も多くコメントを追加するのは誰でしょうか? - -[play](https://sql.clickhouse.com?query_id=EXPHDIURBTOXXOK1TGNNYD) - -```sql -SELECT - author, - avg(ratio_comments) AS avg_ratio_comments, - sum(code) AS code -FROM -( - SELECT - author, - commit_hash, - countIf(line_type = 'Comment') AS comments, - countIf(line_type = 'Code') AS code, - if(comments > 0, comments / (comments + code), 0) AS ratio_comments - FROM git.clickhouse_line_changes - GROUP BY - author, - commit_hash -) -GROUP BY author -ORDER BY code DESC -LIMIT 10 -┌─author─────────────┬──avg_ratio_comments─┬────code─┐ -│ Alexey Milovidov │ 0.1034915408309902 │ 1147196 │ -│ s-kat │ 0.1361718900215362 │ 614224 │ -│ Nikolai Kochetov │ 0.08722993407690126 │ 218328 │ -│ alesapin │ 0.1040477684726504 │ 198082 │ -│ Vitaly Baranov │ 0.06446875712939285 │ 161801 │ -│ Maksim Kita │ 0.06863376297549255 │ 156381 │ -│ Alexey Arno │ 0.11252677608033655 │ 146642 │ -│ Vitaliy Zakaznikov │ 0.06199215397180561 │ 138530 │ -│ kssenii │ 0.07455322590796751 │ 131143 │ -│ Artur │ 0.12383737231074826 │ 121484 │ -└────────────────────┴─────────────────────┴─────────┘ -10 行の結果が含まれています。経過時間: 0.290 秒。処理された行数: 754 万、サイズ: 394.57 MB (毎秒 2600 万行、毎秒 1.36 GB)。 -``` - -コードの貢献をもとにソートしています。意外と高い%は、私たちの最大の貢献者に見られ、私たちのコードが非常に読みやすい理由の一部です。 - -### 作者のコミットが時間に伴ってコード / コメントの割合に関してどう変化するか {#how-does-an-authors-commits-change-over-time-with-respect-to-codecomments-percentage} - -これを作者ごとに計算するのは簡単です。 - -```sql -SELECT - author, - countIf(line_type = 'Code') AS code_lines, - countIf((line_type = 'Comment') OR (line_type = 'Punct')) AS comments, - code_lines / (comments + code_lines) AS ratio_code, - toStartOfWeek(time) AS week -FROM git.line_changes -GROUP BY - time, - author -ORDER BY - author ASC, - time ASC -LIMIT 10 - -┌─author──────────────────────┬─code_lines─┬─comments─┬─────────ratio_code─┬───────week─┐ -│ 1lann │ 8 │ 0 │ 1 │ 2022-03-06 │ -│ 20018712 │ 2 │ 0 │ 1 │ 2020-09-13 │ -│ 243f6a8885a308d313198a2e037 │ 0 │ 2 │ 0 │ 2020-12-06 │ -│ 243f6a8885a308d313198a2e037 │ 0 │ 112 │ 0 │ 2020-12-06 │ -│ 243f6a8885a308d313198a2e037 │ 0 │ 14 │ 0 │ 2020-12-06 │ -│ 3ldar-nasyrov │ 2 │ 0 │ 1 │ 2021-03-14 │ -│ 821008736@qq.com │ 27 │ 2 │ 0.9310344827586207 │ 2019-04-21 │ -│ ANDREI STAROVEROV │ 182 │ 60 │ 0.7520661157024794 │ 2021-05-09 │ -│ ANDREI STAROVEROV │ 7 │ 0 │ 1 │ 2021-05-09 │ -│ ANDREI STAROVEROV │ 32 │ 12 │ 0.7272727272727273 │ 2021-05-09 │ -└─────────────────────────────┴────────────┴──────────┴────────────────────┴────────────┘ - -10 行の結果が含まれています。経過時間: 0.145 秒。処理された行数: 754 万、サイズ: 51.09 MB (毎秒 5183 万行、毎秒 351.44 MB)。 -``` - -理想的には、すべての作者がコミットを開始した最初の日から、これがどのように集計で変化するかを確認したいです。彼らは徐々に書くコメントの数を減らしているのでしょうか? - -これを計算するために、まず各作者のコメントの割合を時間の経過とともに算出します。これは、[誰がより多くのテスト / CPP コード / コメントを書く傾向があるか](#who-tends-to-write-more-tests--cpp-code--comments)に似ています。これらは各作者の開始日と結合され、コメントの割合を週のオフセットで計算することができます。 - -すべての作者にわたって、平均を週のオフセットで計算した後、結果をサンプリングして10週ごとに選択します。 - -[play](https://sql.clickhouse.com?query_id=SBHEWR8XC4PRHY13HPPKCN) - -```sql -WITH author_ratios_by_offset AS - ( - SELECT - author, - dateDiff('week', start_dates.start_date, contributions.week) AS week_offset, - ratio_code - FROM - ( - SELECT - author, - toStartOfWeek(min(time)) AS start_date - FROM git.line_changes - WHERE file_extension IN ('h', 'cpp', 'sql') - GROUP BY author AS start_dates - ) AS start_dates - INNER JOIN - ( - SELECT - author, - countIf(line_type = 'Code') AS code, - countIf((line_type = 'Comment') OR (line_type = 'Punct')) AS comments, - comments / (comments + code) AS ratio_code, - toStartOfWeek(time) AS week - FROM git.line_changes - WHERE (file_extension IN ('h', 'cpp', 'sql')) AND (sign = 1) - GROUP BY - time, - author - HAVING code > 20 - ORDER BY - author ASC, - time ASC - ) AS contributions USING (author) - ) -SELECT - week_offset, - avg(ratio_code) AS avg_code_ratio -FROM author_ratios_by_offset -GROUP BY week_offset -HAVING (week_offset % 10) = 0 -ORDER BY week_offset ASC -LIMIT 20 - -┌─week_offset─┬──────avg_code_ratio─┐ -│ 0 │ 0.21626798253005078 │ -│ 10 │ 0.18299433892099454 │ -│ 20 │ 0.22847255749045017 │ -│ 30 │ 0.2037816688365288 │ -│ 40 │ 0.1987063517030308 │ -│ 50 │ 0.17341406302829748 │ -│ 60 │ 0.1808884776496144 │ -│ 70 │ 0.18711773536450496 │ -│ 80 │ 0.18905573684766458 │ -│ 90 │ 0.2505147771581594 │ -│ 100 │ 0.2427673990917429 │ -│ 110 │ 0.19088569009169926 │ -│ 120 │ 0.14218574654598348 │ -│ 130 │ 0.20894252550489317 │ -│ 140 │ 0.22316626978848397 │ -│ 150 │ 0.1859507592277053 │ -│ 160 │ 0.22007759757363546 │ -│ 170 │ 0.20406936638195144 │ -│ 180 │ 0.1412102467834332 │ -│ 190 │ 0.20677550885049117 │ -└─────────────┴─────────────────────┘ - -20 行の結果が含まれています。経過時間: 0.167 秒。処理された行数: 1507万、サイズ: 101.74 MB (毎秒 905 万行、毎秒 610.98 MB)。 -``` - -励ましいことに、私たちのコメントの%はかなり一定しており、著者が貢献を続けるにつれて低下することはありません。 - -### コードが書き直されるまでの平均時間と中央値(コード減衰の半減期)は何か? {#what-is-the-average-time-before-code-will-be-rewritten-and-the-median-half-life-of-code-decay} - -前のクエリで示した[最も多く書き直されたファイルや複数の著者によるファイルの一覧](#list-files-that-were-rewritten-most-number-of-times)の同じ原則を使用して、すべてのファイルを考慮に入れて書き直しを特定できます。ウィンドウ関数を使用して各ファイルの書き直し間の時間を計算します。これにより、すべてのファイルでの平均と中央値を計算できます。 - -[play](https://sql.clickhouse.com?query_id=WSHUEPJP9TNJUH7QITWWOR) - -```sql -WITH - changes AS - ( - SELECT - path, - commit_hash, - max_time, - type, - num_added, - num_deleted, - sum(num_added - num_deleted) OVER (PARTITION BY path ORDER BY max_time ASC) AS current_size, - if(current_size > 0, num_added / current_size, 0) AS percent_add, - if(current_size > 0, num_deleted / current_size, 0) AS percent_delete - FROM - ( - SELECT - path, - max(time) AS max_time, - commit_hash, - any(lines_added) AS num_added, - any(lines_deleted) AS num_deleted, - any(change_type) AS type - FROM git.file_changes - WHERE (change_type IN ('Add', 'Modify')) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY - path, - commit_hash - ORDER BY - path ASC, - max_time ASC - ) - ), - rewrites AS - ( - SELECT - *, - any(max_time) OVER (PARTITION BY path ORDER BY max_time ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS previous_rewrite, - dateDiff('day', previous_rewrite, max_time) AS rewrite_days - FROM changes - WHERE (type = 'Modify') AND (percent_add >= 0.5) AND (percent_delete >= 0.5) AND (current_size > 50) - ) -SELECT - avgIf(rewrite_days, rewrite_days > 0) AS avg_rewrite_time, - quantilesTimingIf(0.5)(rewrite_days, rewrite_days > 0) AS half_life -FROM rewrites - -┌─avg_rewrite_time─┬─half_life─┐ -│ 122.2890625 │ [23] │ -└──────────────────┴───────────┘ - -1 行の結果が含まれています。経過時間: 0.388 秒。処理された行数: 26.605万、サイズ: 22.85 MB (毎秒 685.82 千行、毎秒 58.89 MB)。 -``` - -### いつコードを書くのが最も悪いか(最も書き直される可能性が高いコード) {#what-is-the-worst-time-to-write-code-in-sense-that-the-code-has-highest-chance-to-be-re-written} - -[コードが書き直されるまでの平均時間と中央値(コード減衰の半減期)](#what-is-the-average-time-before-code-will-be-rewritten-and-the-median-half-life-of-code-decay)と[最も多くの著者によって書き直されたファイルの一覧](#list-files-that-were-rewritten-most-number-of-times)と類似していますが、曜日ごとに集約します。必要に応じて調整してください(たとえば、月ごと)。 - -[play](https://sql.clickhouse.com?query_id=8PQNWEWHAJTGN6FTX59KH2) - -```sql -WITH - changes AS - ( - SELECT - path, - commit_hash, - max_time, - type, - num_added, - num_deleted, - sum(num_added - num_deleted) OVER (PARTITION BY path ORDER BY max_time ASC) AS current_size, - if(current_size > 0, num_added / current_size, 0) AS percent_add, - if(current_size > 0, num_deleted / current_size, 0) AS percent_delete - FROM - ( - SELECT - path, - max(time) AS max_time, - commit_hash, - any(file_lines_added) AS num_added, - any(file_lines_deleted) AS num_deleted, - any(file_change_type) AS type - FROM git.line_changes - WHERE (file_change_type IN ('Add', 'Modify')) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY - path, - commit_hash - ORDER BY - path ASC, - max_time ASC - ) - ), - rewrites AS - ( - SELECT any(max_time) OVER (PARTITION BY path ORDER BY max_time ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS previous_rewrite - FROM changes - WHERE (type = 'Modify') AND (percent_add >= 0.5) AND (percent_delete >= 0.5) AND (current_size > 50) - ) -SELECT - dayOfWeek(previous_rewrite) AS dayOfWeek, - count() AS num_re_writes -FROM rewrites -GROUP BY dayOfWeek - -┌─dayOfWeek─┬─num_re_writes─┐ -│ 1 │ 111 │ -│ 2 │ 121 │ -│ 3 │ 91 │ -│ 4 │ 111 │ -│ 5 │ 90 │ -│ 6 │ 64 │ -│ 7 │ 46 │ -└───────────┴───────────────┘ - -7 行の結果が含まれています。経過時間: 0.466 秒。処理された行数: 754 万、サイズ: 701.52 MB (毎秒 1615 万行、毎秒 1.50 GB)。 -``` - -### どの著者のコードが最も「粘着性」があるか {#which-authors-code-is-the-most-sticky} - -「粘着性」とは、著者のコードがどのくらいの期間書き直されずに保持されるかを定義します。前の質問[コードが書き直されるまでの平均時間と中央値(コード減衰の半減期)](#what-is-the-average-time-before-code-will-be-rewritten-and-the-median-half-life-of-code-decay)に類似しており、書き直しの基準として、ファイルへの追加が50%、削除も50%することを考慮しています。著者ごとに平均書き直し時間を計算し、2つ以上のファイルを持つ貢献者のみを考慮します。 - -[play](https://sql.clickhouse.com?query_id=BKHLVVWN5SET1VTIFQ8JVK) - -```sql -WITH - changes AS - ( - SELECT - path, - author, - commit_hash, - max_time, - type, - num_added, - num_deleted, - sum(num_added - num_deleted) OVER (PARTITION BY path ORDER BY max_time ASC) AS current_size, - if(current_size > 0, num_added / current_size, 0) AS percent_add, - if(current_size > 0, num_deleted / current_size, 0) AS percent_delete - FROM - ( - SELECT - path, - any(author) AS author, - max(time) AS max_time, - commit_hash, - any(file_lines_added) AS num_added, - any(file_lines_deleted) AS num_deleted, - any(file_change_type) AS type - FROM git.line_changes - WHERE (file_change_type IN ('Add', 'Modify')) AND (file_extension IN ('h', 'cpp', 'sql')) - GROUP BY - path, - commit_hash - ORDER BY - path ASC, - max_time ASC - ) - ), - rewrites AS - ( - SELECT - *, - any(max_time) OVER (PARTITION BY path ORDER BY max_time ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS previous_rewrite, - dateDiff('day', previous_rewrite, max_time) AS rewrite_days, - any(author) OVER (PARTITION BY path ORDER BY max_time ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS prev_author - FROM changes - WHERE (type = 'Modify') AND (percent_add >= 0.5) AND (percent_delete >= 0.5) AND (current_size > 50) - ) -SELECT - prev_author, - avg(rewrite_days) AS c, - uniq(path) AS num_files -FROM rewrites -GROUP BY prev_author -HAVING num_files > 2 -ORDER BY c DESC -LIMIT 10 - -┌─prev_author─────────┬──────────────────c─┬─num_files─┐ -│ Michael Kolupaev │ 304.6 │ 4 │ -│ alexey-milovidov │ 81.83333333333333 │ 4 │ -│ Alexander Kuzmenkov │ 64.5 │ 5 │ -│ Pavel Kruglov │ 55.8 │ 6 │ -│ Alexey Milovidov │ 48.416666666666664 │ 90 │ -│ Amos Bird │ 42.8 │ 4 │ -│ alesapin │ 38.083333333333336 │ 12 │ -│ Nikolai Kochetov │ 33.18421052631579 │ 26 │ -│ Alexander Tokmakov │ 31.866666666666667 │ 12 │ -│ Alexey Zatelepin │ 22.5 │ 4 │ -└─────────────────────┴────────────────────┴───────────┘ - -10 行の結果が含まれています。経過時間: 0.555 秒。処理された行数: 754 万、サイズ: 720.60 MB (毎秒 1358 万行、毎秒 1.30 GB)。 -``` - -### 作者ごとの連続コミット日数 {#most-consecutive-days-of-commits-by-an-author} - -このクエリでは、最初に作者がコミットした日を計算する必要があります。ウィンドウ関数を使用して、作者ごとにコミット日をパーティション化し、コミット間の日数を計算します。各コミットに対して、前回のコミットからの時間が1日であれば連続しているとマークし(1)、そうでなければ0とします。この結果を`consecutive_day`に保存します。 - -続いて、各著者の最長連続1の配列を計算するために、配列関数を使用します。最初に`groupArray`関数を使用して、著者のすべての`consecutive_day`値を集約します。この1と0の配列を0の値で分割し、サブ配列に分けます。最後に、最長のサブ配列を計算します。 - -[play](https://sql.clickhouse.com?query_id=S3E64UYCAMDAYJRSXINVFR) - -```sql -WITH commit_days AS - ( - SELECT - author, - day, - any(day) OVER (PARTITION BY author ORDER BY day ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS previous_commit, - dateDiff('day', previous_commit, day) AS days_since_last, - if(days_since_last = 1, 1, 0) AS consecutive_day - FROM - ( - SELECT - author, - toStartOfDay(time) AS day - FROM git.commits - GROUP BY - author, - day - ORDER BY - author ASC, - day ASC - ) - ) -SELECT - author, - arrayMax(arrayMap(x -> length(x), arraySplit(x -> (x = 0), groupArray(consecutive_day)))) - 1 AS max_consecutive_days -FROM commit_days -GROUP BY author -ORDER BY max_consecutive_days DESC -LIMIT 10 - -┌─author───────────┬─max_consecutive_days─┐ -│ kssenii │ 32 │ -│ Alexey Milovidov │ 30 │ -│ alesapin │ 26 │ -│ Azat Khuzhin │ 23 │ -│ Nikolai Kochetov │ 15 │ -│ feng lv │ 11 │ -│ alexey-milovidov │ 11 │ -│ Igor Nikonov │ 11 │ -│ Maksim Kita │ 11 │ -│ Nikita Vasilev │ 11 │ -└──────────────────┴──────────────────────┘ - -10 行の結果が含まれています。経過時間: 0.025 秒。処理された行数: 6.278万、サイズ: 395.47 KB (毎秒 254 万行、毎秒 16.02 MB)。 -``` -### Line by line commit history of a file {#line-by-line-commit-history-of-a-file} - -ファイルは名前を変更できます。これが発生すると、`path` カラムはファイルの新しいパスに設定され、`old_path` は以前の場所を表します。例えば: - -[play](https://sql.clickhouse.com?query_id=AKTW3Z8JZAPQ4H9BH2ZFRX) - -```sql -SELECT - time, - path, - old_path, - commit_hash, - commit_message -FROM git.file_changes -WHERE (path = 'src/Storages/StorageReplicatedMergeTree.cpp') AND (change_type = 'Rename') - -┌────────────────time─┬─path────────────────────────────────────────┬─old_path─────────────────────────────────────┬─commit_hash──────────────────────────────┬─commit_message─┐ -│ 2020-04-03 16:14:31 │ src/Storages/StorageReplicatedMergeTree.cpp │ dbms/Storages/StorageReplicatedMergeTree.cpp │ 06446b4f08a142d6f1bc30664c47ded88ab51782 │ dbms/ → src/ │ -└─────────────────────┴─────────────────────────────────────────────┴──────────────────────────────────────────────┴──────────────────────────────────────────┴────────────────┘ - -1 行がセットされました。経過時間: 0.135 秒。処理された行数: 266.05 千、容量: 20.73 MB (1.98 百万行/s., 154.04 MB/s.) -``` - -これにより、ファイルの完全な履歴を表示することが難しくなります。なぜなら、すべての行またはファイルの変更を結びつける単一の値がないからです。 - -これに対処するために、ユーザー定義関数 (UDF) を使用します。これらは現在、再帰的にすることはできないため、ファイルの履歴を特定するには、互いに明示的に呼び出す一連の UDF を定義する必要があります。 - -つまり、名前の変更を最大深度まで追跡できます - 以下の例は 5 深です。ファイルがこれ以上名前を変更される可能性は低いため、現時点ではこれで十分です。 - -```sql -CREATE FUNCTION file_path_history AS (n) -> if(empty(n), [], arrayConcat([n], file_path_history_01((SELECT if(empty(old_path), Null, old_path) FROM git.file_changes WHERE path = n AND (change_type = 'Rename' OR change_type = 'Add') LIMIT 1)))); -CREATE FUNCTION file_path_history_01 AS (n) -> if(isNull(n), [], arrayConcat([n], file_path_history_02((SELECT if(empty(old_path), Null, old_path) FROM git.file_changes WHERE path = n AND (change_type = 'Rename' OR change_type = 'Add') LIMIT 1)))); -CREATE FUNCTION file_path_history_02 AS (n) -> if(isNull(n), [], arrayConcat([n], file_path_history_03((SELECT if(empty(old_path), Null, old_path) FROM git.file_changes WHERE path = n AND (change_type = 'Rename' OR change_type = 'Add') LIMIT 1)))); -CREATE FUNCTION file_path_history_03 AS (n) -> if(isNull(n), [], arrayConcat([n], file_path_history_04((SELECT if(empty(old_path), Null, old_path) FROM git.file_changes WHERE path = n AND (change_type = 'Rename' OR change_type = 'Add') LIMIT 1)))); -CREATE FUNCTION file_path_history_04 AS (n) -> if(isNull(n), [], arrayConcat([n], file_path_history_05((SELECT if(empty(old_path), Null, old_path) FROM git.file_changes WHERE path = n AND (change_type = 'Rename' OR change_type = 'Add') LIMIT 1)))); -CREATE FUNCTION file_path_history_05 AS (n) -> if(isNull(n), [], [n]); -``` - -`file_path_history('src/Storages/StorageReplicatedMergeTree.cpp')` を呼び出すことで、名前の変更履歴を再帰的に探索します。各関数は `old_path` を用いて次のレベルを呼び出します。結果は `arrayConcat` を使用して結合されます。 - -例えば: - -```sql -SELECT file_path_history('src/Storages/StorageReplicatedMergeTree.cpp') AS paths - -┌─paths─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ ['src/Storages/StorageReplicatedMergeTree.cpp','dbms/Storages/StorageReplicatedMergeTree.cpp','dbms/src/Storages/StorageReplicatedMergeTree.cpp'] │ -└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -1 行がセットされました。経過時間: 0.074 秒。処理された行数: 344.06 千、容量: 6.27 MB (4.65 百万行/s., 84.71 MB/s.) -``` - -この機能を使用して、ファイルの完全な履歴に対するコミットを組み立てることができます。この例では、各 `path` 値に対して 1 つのコミットを示します。 - -```sql -SELECT - time, - substring(commit_hash, 1, 11) AS commit, - change_type, - author, - path, - commit_message -FROM git.file_changes -WHERE path IN file_path_history('src/Storages/StorageReplicatedMergeTree.cpp') -ORDER BY time DESC -LIMIT 1 BY path -FORMAT PrettyCompactMonoBlock - -┌────────────────time─┬─commit──────┬─change_type─┬─author─────────────┬─path─────────────────────────────────────────────┬─commit_message──────────────────────────────────────────────────────────────────┐ -│ 2022-10-30 16:30:51 │ c68ab231f91 │ Modify │ Alexander Tokmakov │ src/Storages/StorageReplicatedMergeTree.cpp │ fix accessing part in Deleting state │ -│ 2020-04-03 15:21:24 │ 38a50f44d34 │ Modify │ alesapin │ dbms/Storages/StorageReplicatedMergeTree.cpp │ Remove empty line │ -│ 2020-04-01 19:21:27 │ 1d5a77c1132 │ Modify │ alesapin │ dbms/src/Storages/StorageReplicatedMergeTree.cpp │ Tried to add ability to rename primary key columns but just banned this ability │ -└─────────────────────┴─────────────┴─────────────┴────────────────────┴──────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────┘ - -3 行がセットされました。経過時間: 0.170 秒。処理された行数: 611.53 千、容量: 41.76 MB (3.60 百万行/s., 246.07 MB/s.) -``` -## Unsolved Questions {#unsolved-questions} -### Git blame {#git-blame} - -これは、現在のところ配列関数で状態を保持できなため、正確な結果を得るのが特に難しいです。各反復で状態を保持できる `arrayFold` または `arrayReduce` を使用することで可能になるでしょう。 - -高レベルの分析に十分な近似解は次のようになります: - -```sql -SELECT - line_number_new, - argMax(author, time), - argMax(line, time) -FROM git.line_changes -WHERE path IN file_path_history('src/Storages/StorageReplicatedMergeTree.cpp') -GROUP BY line_number_new -ORDER BY line_number_new ASC -LIMIT 20 - -┌─line_number_new─┬─argMax(author, time)─┬─argMax(line, time)────────────────────────────────────────────┐ -│ 1 │ Alexey Milovidov │ #include │ -│ 2 │ s-kat │ #include │ -│ 3 │ Anton Popov │ #include │ -│ 4 │ Alexander Burmak │ #include │ -│ 5 │ avogar │ #include │ -│ 6 │ Alexander Burmak │ #include │ -│ 7 │ Alexander Burmak │ #include │ -│ 8 │ Alexander Burmak │ #include │ -│ 9 │ Alexander Burmak │ #include │ -│ 10 │ Alexander Burmak │ #include │ -│ 11 │ Alexander Burmak │ #include │ -│ 12 │ Nikolai Kochetov │ #include │ -│ 13 │ alesapin │ #include │ -│ 14 │ alesapin │ │ -│ 15 │ Alexey Milovidov │ #include │ -│ 16 │ Alexey Zatelepin │ #include │ -│ 17 │ CurtizJ │ #include │ -│ 18 │ Kirill Shvakov │ #include │ -│ 19 │ s-kat │ #include │ -│ 20 │ Nikita Mikhaylov │ #include │ -└─────────────────┴──────────────────────┴───────────────────────────────────────────────────────────────┘ -20 行がセットされました。経過時間: 0.547 秒。処理された行数: 7.88 百万、容量: 679.20 MB (14.42 百万行/s., 1.24 GB/s.) -``` - -ここでの正確で改善された解決策を歓迎します。 -## Related Content {#related-content} - -- Blog: [Git commits and our community](https://clickhouse.com/blog/clickhouse-git-community-commits) -- Blog: [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits) -- Blog: [Building a Real-time Analytics Apps with ClickHouse and Hex](https://clickhouse.com/blog/building-real-time-applications-with-clickhouse-and-hex-notebook-keeper-engine) -- Blog: [A Story of Open-source GitHub Activity using ClickHouse + Grafana](https://clickhouse.com/blog/introduction-to-clickhouse-and-grafana-webinar) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md.hash deleted file mode 100644 index 898c65b40ad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/github.md.hash +++ /dev/null @@ -1 +0,0 @@ -ec69f6a684c9d5cc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md deleted file mode 100644 index d64517bfd5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -description: 'Dataset containing 400 million images with English image captions' -sidebar_label: 'Laion-400M dataset' -slug: '/getting-started/example-datasets/laion-400m-dataset' -title: 'Laion-400M dataset' ---- - - - -[Laion-400Mデータセット](https://laion.ai/blog/laion-400-open-dataset/)は、英語の画像キャプションを持つ4億の画像を含んでいます。現在、Laionは[さらに大きなデータセット](https://laion.ai/blog/laion-5b/)を提供していますが、取り扱いは似ています。 - -このデータセットには、画像のURL、画像および画像キャプションの埋め込み、画像と画像キャプションの間の類似度スコア、さらに画像の幅/高さ、ライセンス、NSFWフラグなどのメタデータが含まれています。このデータセットを使用して、ClickHouseでの[近似最近傍検索](../../engines/table-engines/mergetree-family/annindexes.md)を示すことができます。 - -## データ準備 {#data-preparation} - -埋め込みとメタデータは、生のデータの別々のファイルに保存されています。データ準備ステップでは、データをダウンロードし、ファイルをマージし、CSVに変換してClickHouseにインポートします。以下の`download.sh`スクリプトを使用できます: - -```bash -number=${1} -if [[ $number == '' ]]; then - number=1 -fi; -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${number}.npy # 画像埋め込みをダウンロード -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${number}.npy # テキスト埋め込みをダウンロード -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${number}.parquet # メタデータをダウンロード -python3 process.py $number # ファイルをマージしてCSVに変換 -``` -スクリプト`process.py`は以下のように定義されています: - -```python -import pandas as pd -import numpy as np -import os -import sys - -str_i = str(sys.argv[1]) -npy_file = "img_emb_" + str_i + '.npy' -metadata_file = "metadata_" + str_i + '.parquet' -text_npy = "text_emb_" + str_i + '.npy' - - -# 全ファイルをロード -im_emb = np.load(npy_file) -text_emb = np.load(text_npy) -data = pd.read_parquet(metadata_file) - - -# ファイルを組み合わせる -data = pd.concat([data, pd.DataFrame({"image_embedding" : [*im_emb]}), pd.DataFrame({"text_embedding" : [*text_emb]})], axis=1, copy=False) - - -# ClickHouseにインポートするカラム -data = data[['url', 'caption', 'NSFW', 'similarity', "image_embedding", "text_embedding"]] - - -# np.arrayをリストに変換 -data['image_embedding'] = data['image_embedding'].apply(lambda x: list(x)) -data['text_embedding'] = data['text_embedding'].apply(lambda x: list(x)) - - -# キャプションに含まれる様々な引用符に対してこの小さなハックが必要 -data['caption'] = data['caption'].apply(lambda x: x.replace("'", " ").replace('"', " ")) - - -# データをCSVファイルとしてエクスポート -data.to_csv(str_i + '.csv', header=False) - - -# 生データファイルを削除 -os.system(f"rm {npy_file} {metadata_file} {text_npy}") -``` - -データ準備パイプラインを開始するには、次のコマンドを実行します: - -```bash -seq 0 409 | xargs -P1 -I{} bash -c './download.sh {}' -``` - -データセットは410のファイルに分割されており、各ファイルには約100万行が含まれています。データの小さなサブセットで作業したい場合は、リミットを調整するだけです。例:`seq 0 9 | ...`。 - -(上記のPythonスクリプトは非常に遅い(1ファイルあたり約2〜10分)、多くのメモリを消費し(ファイルごとに41 GB)、結果として生成されるCSVファイルは大きい(各10 GB)ため、注意が必要です。十分なRAMがある場合は、並列性を高めるために`-P1`の数値を増やします。これでもまだ遅い場合は、より良いインジェスト手順を考案することを検討してください。たとえば、.npyファイルをparquetに変換し、その後に他の処理をClickHouseで行うことが考えられます。) - -## テーブルの作成 {#create-table} - -インデックスなしでテーブルを作成するには、次のコマンドを実行します: - -```sql -CREATE TABLE laion -( - `id` Int64, - `url` String, - `caption` String, - `NSFW` String, - `similarity` Float32, - `image_embedding` Array(Float32), - `text_embedding` Array(Float32) -) -ENGINE = MergeTree -ORDER BY id -SETTINGS index_granularity = 8192 -``` - -CSVファイルをClickHouseにインポートするには、次のコマンドを実行します: - -```sql -INSERT INTO laion FROM INFILE '{path_to_csv_files}/*.csv' -``` - -## ANNインデックスなしでのブルートフォースANN検索の実行 {#run-a-brute-force-ann-search-without-ann-index} - -ブルートフォース近似最近傍検索を実行するには、次のコマンドを実行します: - -```sql -SELECT url, caption FROM laion ORDER BY L2Distance(image_embedding, {target:Array(Float32)}) LIMIT 30 -``` - -`target`は512要素の配列で、クライアントパラメータです。そのような配列を取得する便利な方法は、この記事の終わりに紹介します。今のところ、ランダムな猫の画像の埋め込みを`target`として実行できます。 - -**結果** - -```markdown -┌─url───────────────────────────────────────────────────────────────────────────────────────────────────────────┬─caption────────────────────────────────────────────────────────────────┐ -│ https://s3.amazonaws.com/filestore.rescuegroups.org/6685/pictures/animals/13884/13884995/63318230_463x463.jpg │ Adoptable Female Domestic Short Hair │ -│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/8/b/6/239905226.jpg │ Adopt A Pet :: Marzipan - New York, NY │ -│ http://d1n3ar4lqtlydb.cloudfront.net/9/2/4/248407625.jpg │ Adopt A Pet :: Butterscotch - New Castle, DE │ -│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/e/e/c/245615237.jpg │ Adopt A Pet :: Tiggy - Chicago, IL │ -│ http://pawsofcoronado.org/wp-content/uploads/2012/12/rsz_pumpkin.jpg │ Pumpkin an orange tabby kitten for adoption │ -│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/7/8/3/188700997.jpg │ Adopt A Pet :: Brian the Brad Pitt of cats - Frankfort, IL │ -│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/8/b/d/191533561.jpg │ Domestic Shorthair Cat for adoption in Mesa, Arizona - Charlie │ -│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/0/1/2/221698235.jpg │ Domestic Shorthair Cat for adoption in Marietta, Ohio - Daisy (Spayed) │ -└───────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────────────────┘ - -8 rows in set. Elapsed: 6.432 sec. Processed 19.65 million rows, 43.96 GB (3.06 million rows/s., 6.84 GB/s.) -``` - -## ANNインデックスを使用したANNの実行 {#run-a-ann-with-an-ann-index} - -ANNインデックスを持つ新しいテーブルを作成し、既存のテーブルからデータを挿入します: - -```sql -CREATE TABLE laion_annoy -( - `id` Int64, - `url` String, - `caption` String, - `NSFW` String, - `similarity` Float32, - `image_embedding` Array(Float32), - `text_embedding` Array(Float32), - INDEX annoy_image image_embedding TYPE annoy(), - INDEX annoy_text text_embedding TYPE annoy() -) -ENGINE = MergeTree -ORDER BY id -SETTINGS index_granularity = 8192; - -INSERT INTO laion_annoy SELECT * FROM laion; -``` - -デフォルトでは、AnnoyインデックスはL2距離をメトリックとして使用します。インデックスの作成や検索のためのさらなる調整方法については、Annoyインデックスの[ドキュメント](../../engines/table-engines/mergetree-family/annindexes.md)に記載されています。さて、同じクエリで再度確認してみましょう: - -```sql -SELECT url, caption FROM laion_annoy ORDER BY l2Distance(image_embedding, {target:Array(Float32)}) LIMIT 8 -``` - -**結果** - -```response -┌─url──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─caption──────────────────────────────────────────────────────────────┐ -│ http://tse1.mm.bing.net/th?id=OIP.R1CUoYp_4hbeFSHBaaB5-gHaFj │ bed bugs and pets can cats carry bed bugs pets adviser │ -│ http://pet-uploads.adoptapet.com/1/9/c/1963194.jpg?336w │ Domestic Longhair Cat for adoption in Quincy, Massachusetts - Ashley │ -│ https://thumbs.dreamstime.com/t/cat-bed-12591021.jpg │ Cat on bed Stock Image │ -│ https://us.123rf.com/450wm/penta/penta1105/penta110500004/9658511-portrait-of-british-short-hair-kitten-lieing-at-sofa-on-sun.jpg │ Portrait of british short hair kitten lieing at sofa on sun. │ -│ https://www.easypetmd.com/sites/default/files/Wirehaired%20Vizsla%20(2).jpg │ Vizsla (Wirehaired) image 3 │ -│ https://images.ctfassets.net/yixw23k2v6vo/0000000200009b8800000000/7950f4e1c1db335ef91bb2bc34428de9/dog-cat-flickr-Impatience_1.jpg?w=600&h=400&fm=jpg&fit=thumb&q=65&fl=progressive │ dog and cat image │ -│ https://i1.wallbox.ru/wallpapers/small/201523/eaa582ee76a31fd.jpg │ cats, kittens, faces, tonkinese │ -│ https://www.baxterboo.com/images/breeds/medium/cairn-terrier.jpg │ Cairn Terrier Photo │ -└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────────────────┘ - -8 rows in set. Elapsed: 0.641 sec. Processed 22.06 thousand rows, 49.36 MB (91.53 thousand rows/s., 204.81 MB/s.) -``` - -スピードは大幅に向上しましたが、精度が低下しました。これは、ANNインデックスが近似検索結果のみを提供するためです。例では類似画像埋め込みを検索しましたが、ポジティブな画像キャプション埋め込みをも検索することが可能です。 - -## UDFを使用した埋め込みの作成 {#creating-embeddings-with-udfs} - -通常、新しい画像や新しい画像キャプションのために埋め込みを作成し、データ内の類似画像/画像キャプションペアを検索したいと思います。[UDF](/sql-reference/functions/udf)を使用して、クライアントを離れることなく`target`ベクターを作成できます。データを作成し、検索のために新しい埋め込みを作成する際は、同じモデルを使用することが重要です。以下のスクリプトは、データセットの基盤となる`ViT-B/32`モデルを利用しています。 - -### テキスト埋め込み {#text-embeddings} - -最初に、次のPythonスクリプトをClickHouseデータパスの`user_scripts/`ディレクトリに保存し、実行可能にします(`chmod +x encode_text.py`)。 - -`encode_text.py`: - -```python -#!/usr/bin/python3 -import clip -import torch -import numpy as np -import sys - -if __name__ == '__main__': - device = "cuda" if torch.cuda.is_available() else "cpu" - model, preprocess = clip.load("ViT-B/32", device=device) - for text in sys.stdin: - inputs = clip.tokenize(text) - with torch.no_grad(): - text_features = model.encode_text(inputs)[0].tolist() - print(text_features) - sys.stdout.flush() -``` - -次に、ClickHouseサーバ構成ファイルの`/path/to/*_function.xml`で参照される場所に`encode_text_function.xml`を作成します。 - -```xml - - - executable - encode_text - Array(Float32) - - String - text - - TabSeparated - encode_text.py - 1000000 - - -``` - -これで、単純に次のように使用できます: - -```sql -SELECT encode_text('cat'); -``` -最初の実行は遅くなりますが、モデルをロードするためですが、繰り返しの実行は速くなります。その後、出力を`SET param_target=...`にコピーして、簡単にクエリを記述できます。 - -### 画像埋め込み {#image-embeddings} - -画像埋め込みも同様に作成できますが、画像キャプションテキストの代わりにローカル画像へのパスをPythonスクリプトに提供します。 - -`encode_image.py` - -```python -#!/usr/bin/python3 -import clip -import torch -import numpy as np -from PIL import Image -import sys - -if __name__ == '__main__': - device = "cuda" if torch.cuda.is_available() else "cpu" - model, preprocess = clip.load("ViT-B/32", device=device) - for text in sys.stdin: - image = preprocess(Image.open(text.strip())).unsqueeze(0).to(device) - with torch.no_grad(): - image_features = model.encode_image(image)[0].tolist() - print(image_features) - sys.stdout.flush() -``` - -`encode_image_function.xml` - -```xml - - - executable_pool - encode_image - Array(Float32) - - String - path - - TabSeparated - encode_image.py - 1000000 - - -``` - -次に、このクエリを実行します: - -```sql -SELECT encode_image('/path/to/your/image'); -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md.hash deleted file mode 100644 index 92da208c2a2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/laion.md.hash +++ /dev/null @@ -1 +0,0 @@ -937b172d68ea3a04 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md deleted file mode 100644 index f724f527a2f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md +++ /dev/null @@ -1,363 +0,0 @@ ---- -description: 'Dataset containing 1.3 million records of historical data on the menus - of hotels, restaurants and cafes with the dishes along with their prices.' -sidebar_label: 'New York Public Library "What''s on the Menu?" Dataset' -slug: '/getting-started/example-datasets/menus' -title: 'New York Public Library "What''s on the Menu?" Dataset' ---- - - - -The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices. - -Source: http://menus.nypl.org/data -The data is in public domain. - -The data is from library's archive and it may be incomplete and difficult for statistical analysis. Nevertheless it is also very yummy. -The size is just 1.3 million records about dishes in the menus — it's a very small data volume for ClickHouse, but it's still a good example. - -## ダウンロード データセット {#download-dataset} - -Run the command: - -```bash -wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz - -# Option: Validate the checksum -md5sum 2021_08_01_07_01_17_data.tgz - -# Checksum should be equal to: db6126724de939a5481e3160a2d67d15 -``` - -Replace the link to the up to date link from http://menus.nypl.org/data if needed. -Download size is about 35 MB. - -## データセットを展開 {#unpack-dataset} - -```bash -tar xvf 2021_08_01_07_01_17_data.tgz -``` - -Uncompressed size is about 150 MB. - -The data is normalized consisted of four tables: -- `Menu` — Information about menus: the name of the restaurant, the date when menu was seen, etc. -- `Dish` — Information about dishes: the name of the dish along with some characteristic. -- `MenuPage` — Information about the pages in the menus, because every page belongs to some menu. -- `MenuItem` — An item of the menu. A dish along with its price on some menu page: links to dish and menu page. - -## テーブルを作成 {#create-tables} - -We use [Decimal](../../sql-reference/data-types/decimal.md) data type to store prices. - -```sql -CREATE TABLE dish -( - id UInt32, - name String, - description String, - menus_appeared UInt32, - times_appeared Int32, - first_appeared UInt16, - last_appeared UInt16, - lowest_price Decimal64(3), - highest_price Decimal64(3) -) ENGINE = MergeTree ORDER BY id; - -CREATE TABLE menu -( - id UInt32, - name String, - sponsor String, - event String, - venue String, - place String, - physical_description String, - occasion String, - notes String, - call_number String, - keywords String, - language String, - date String, - location String, - location_type String, - currency String, - currency_symbol String, - status String, - page_count UInt16, - dish_count UInt16 -) ENGINE = MergeTree ORDER BY id; - -CREATE TABLE menu_page -( - id UInt32, - menu_id UInt32, - page_number UInt16, - image_id String, - full_height UInt16, - full_width UInt16, - uuid UUID -) ENGINE = MergeTree ORDER BY id; - -CREATE TABLE menu_item -( - id UInt32, - menu_page_id UInt32, - price Decimal64(3), - high_price Decimal64(3), - dish_id UInt32, - created_at DateTime, - updated_at DateTime, - xpos Float64, - ypos Float64 -) ENGINE = MergeTree ORDER BY id; -``` - -## データをインポート {#import-data} - -Upload data into ClickHouse, run: - -```bash -clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO dish FORMAT CSVWithNames" < Dish.csv -clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu FORMAT CSVWithNames" < Menu.csv -clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu_page FORMAT CSVWithNames" < MenuPage.csv -clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --date_time_input_format best_effort --query "INSERT INTO menu_item FORMAT CSVWithNames" < MenuItem.csv -``` - -We use [CSVWithNames](../../interfaces/formats.md#csvwithnames) format as the data is represented by CSV with header. - -We disable `format_csv_allow_single_quotes` as only double quotes are used for data fields and single quotes can be inside the values and should not confuse the CSV parser. - -We disable [input_format_null_as_default](/operations/settings/formats#input_format_null_as_default) as our data does not have [NULL](/operations/settings/formats#input_format_null_as_default). Otherwise ClickHouse will try to parse `\N` sequences and can be confused with `\` in data. - -The setting [date_time_input_format best_effort](/operations/settings/formats#date_time_input_format) allows to parse [DateTime](../../sql-reference/data-types/datetime.md) fields in wide variety of formats. For example, ISO-8601 without seconds like '2000-01-01 01:02' will be recognized. Without this setting only fixed DateTime format is allowed. - -## データを非正規化 {#denormalize-data} - -Data is presented in multiple tables in [normalized form](https://en.wikipedia.org/wiki/Database_normalization#Normal_forms). It means you have to perform [JOIN](/sql-reference/statements/select/join) if you want to query, e.g. dish names from menu items. -For typical analytical tasks it is way more efficient to deal with pre-JOINed data to avoid doing `JOIN` every time. It is called "denormalized" data. - -We will create a table `menu_item_denorm` where will contain all the data JOINed together: - -```sql -CREATE TABLE menu_item_denorm -ENGINE = MergeTree ORDER BY (dish_name, created_at) -AS SELECT - price, - high_price, - created_at, - updated_at, - xpos, - ypos, - dish.id AS dish_id, - dish.name AS dish_name, - dish.description AS dish_description, - dish.menus_appeared AS dish_menus_appeared, - dish.times_appeared AS dish_times_appeared, - dish.first_appeared AS dish_first_appeared, - dish.last_appeared AS dish_last_appeared, - dish.lowest_price AS dish_lowest_price, - dish.highest_price AS dish_highest_price, - menu.id AS menu_id, - menu.name AS menu_name, - menu.sponsor AS menu_sponsor, - menu.event AS menu_event, - menu.venue AS menu_venue, - menu.place AS menu_place, - menu.physical_description AS menu_physical_description, - menu.occasion AS menu_occasion, - menu.notes AS menu_notes, - menu.call_number AS menu_call_number, - menu.keywords AS menu_keywords, - menu.language AS menu_language, - menu.date AS menu_date, - menu.location AS menu_location, - menu.location_type AS menu_location_type, - menu.currency AS menu_currency, - menu.currency_symbol AS menu_currency_symbol, - menu.status AS menu_status, - menu.page_count AS menu_page_count, - menu.dish_count AS menu_dish_count -FROM menu_item - JOIN dish ON menu_item.dish_id = dish.id - JOIN menu_page ON menu_item.menu_page_id = menu_page.id - JOIN menu ON menu_page.menu_id = menu.id; -``` - -## データを検証 {#validate-data} - -Query: - -```sql -SELECT count() FROM menu_item_denorm; -``` - -Result: - -```text -┌─count()─┐ -│ 1329175 │ -└─────────┘ -``` - -## クエリを実行 {#run-queries} - -### 平均的な歴史的価格 {#query-averaged-historical-prices} - -Query: - -```sql -SELECT - round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, - count(), - round(avg(price), 2), - bar(avg(price), 0, 100, 100) -FROM menu_item_denorm -WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022) -GROUP BY d -ORDER BY d ASC; -``` - -Result: - -```text -┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 100, 100)─┐ -│ 1850 │ 618 │ 1.5 │ █▍ │ -│ 1860 │ 1634 │ 1.29 │ █▎ │ -│ 1870 │ 2215 │ 1.36 │ █▎ │ -│ 1880 │ 3909 │ 1.01 │ █ │ -│ 1890 │ 8837 │ 1.4 │ █▍ │ -│ 1900 │ 176292 │ 0.68 │ ▋ │ -│ 1910 │ 212196 │ 0.88 │ ▊ │ -│ 1920 │ 179590 │ 0.74 │ ▋ │ -│ 1930 │ 73707 │ 0.6 │ ▌ │ -│ 1940 │ 58795 │ 0.57 │ ▌ │ -│ 1950 │ 41407 │ 0.95 │ ▊ │ -│ 1960 │ 51179 │ 1.32 │ █▎ │ -│ 1970 │ 12914 │ 1.86 │ █▋ │ -│ 1980 │ 7268 │ 4.35 │ ████▎ │ -│ 1990 │ 11055 │ 6.03 │ ██████ │ -│ 2000 │ 2467 │ 11.85 │ ███████████▋ │ -│ 2010 │ 597 │ 25.66 │ █████████████████████████▋ │ -└──────┴─────────┴──────────────────────┴──────────────────────────────┘ -``` - -Take it with a grain of salt. - -### ハンバーガーの価格 {#query-burger-prices} - -Query: - -```sql -SELECT - round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, - count(), - round(avg(price), 2), - bar(avg(price), 0, 50, 100) -FROM menu_item_denorm -WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%burger%') -GROUP BY d -ORDER BY d ASC; -``` - -Result: - -```text -┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)───────────┐ -│ 1880 │ 2 │ 0.42 │ ▋ │ -│ 1890 │ 7 │ 0.85 │ █▋ │ -│ 1900 │ 399 │ 0.49 │ ▊ │ -│ 1910 │ 589 │ 0.68 │ █▎ │ -│ 1920 │ 280 │ 0.56 │ █ │ -│ 1930 │ 74 │ 0.42 │ ▋ │ -│ 1940 │ 119 │ 0.59 │ █▏ │ -│ 1950 │ 134 │ 1.09 │ ██▏ │ -│ 1960 │ 272 │ 0.92 │ █▋ │ -│ 1970 │ 108 │ 1.18 │ ██▎ │ -│ 1980 │ 88 │ 2.82 │ █████▋ │ -│ 1990 │ 184 │ 3.68 │ ███████▎ │ -│ 2000 │ 21 │ 7.14 │ ██████████████▎ │ -│ 2010 │ 6 │ 18.42 │ ████████████████████████████████████▋ │ -└──────┴─────────┴──────────────────────┴───────────────────────────────────────┘ -``` - -### ウォッカ {#query-vodka} - -Query: - -```sql -SELECT - round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, - count(), - round(avg(price), 2), - bar(avg(price), 0, 50, 100) -FROM menu_item_denorm -WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%vodka%') -GROUP BY d -ORDER BY d ASC; -``` - -Result: - -```text -┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)─┐ -│ 1910 │ 2 │ 0 │ │ -│ 1920 │ 1 │ 0.3 │ ▌ │ -│ 1940 │ 21 │ 0.42 │ ▋ │ -│ 1950 │ 14 │ 0.59 │ █▏ │ -│ 1960 │ 113 │ 2.17 │ ████▎ │ -│ 1970 │ 37 │ 0.68 │ █▎ │ -│ 1980 │ 19 │ 2.55 │ █████ │ -│ 1990 │ 86 │ 3.6 │ ███████▏ │ -│ 2000 │ 2 │ 3.98 │ ███████▊ │ -└──────┴─────────┴──────────────────────┴─────────────────────────────┘ -``` - -To get vodka we have to write `ILIKE '%vodka%'` and this definitely makes a statement. - -### キャビア {#query-caviar} - -Let's print caviar prices. Also let's print a name of any dish with caviar. - -Query: - -```sql -SELECT - round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, - count(), - round(avg(price), 2), - bar(avg(price), 0, 50, 100), - any(dish_name) -FROM menu_item_denorm -WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%caviar%') -GROUP BY d -ORDER BY d ASC; -``` - -Result: - -```text -┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)──────┬─any(dish_name)──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ 1090 │ 1 │ 0 │ │ Caviar │ -│ 1880 │ 3 │ 0 │ │ Caviar │ -│ 1890 │ 39 │ 0.59 │ █▏ │ Butter and caviar │ -│ 1900 │ 1014 │ 0.34 │ ▋ │ Anchovy Caviar on Toast │ -│ 1910 │ 1588 │ 1.35 │ ██▋ │ 1/1 Brötchen Caviar │ -│ 1920 │ 927 │ 1.37 │ ██▋ │ ASTRAKAN CAVIAR │ -│ 1930 │ 289 │ 1.91 │ ███▋ │ Astrachan caviar │ -│ 1940 │ 201 │ 0.83 │ █▋ │ (SPECIAL) Domestic Caviar Sandwich │ -│ 1950 │ 81 │ 2.27 │ ████▌ │ Beluga Caviar │ -│ 1960 │ 126 │ 2.21 │ ████▍ │ Beluga Caviar │ -│ 1970 │ 105 │ 0.95 │ █▊ │ BELUGA MALOSSOL CAVIAR AMERICAN DRESSING │ -│ 1980 │ 12 │ 7.22 │ ██████████████▍ │ Authentic Iranian Beluga Caviar the world's finest black caviar presented in ice garni and a sampling of chilled 100° Russian vodka │ -│ 1990 │ 74 │ 14.42 │ ████████████████████████████▋ │ Avocado Salad, Fresh cut avocado with caviare │ -│ 2000 │ 3 │ 7.82 │ ███████████████▋ │ Aufgeschlagenes Kartoffelsueppchen mit Forellencaviar │ -│ 2010 │ 6 │ 15.58 │ ███████████████████████████████▏ │ "OYSTERS AND PEARLS" "Sabayon" of Pearl Tapioca with Island Creek Oysters and Russian Sevruga Caviar │ -└──────┴─────────┴──────────────────────┴──────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -At least they have caviar with vodka. Very nice. - -## オンラインプレイグラウンド {#playground} - -The data is uploaded to ClickHouse Playground, [example](https://sql.clickhouse.com?query_id=KB5KQJJFNBKHE5GBUJCP1B). diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md.hash deleted file mode 100644 index 85211099b3c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/menus.md.hash +++ /dev/null @@ -1 +0,0 @@ -e3233ee6f40d6684 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md deleted file mode 100644 index db08bead68e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -description: 'Dataset consisting of two tables containing anonymized web analytics - data with hits and visits' -sidebar_label: 'Web Analytics Data' -slug: '/getting-started/example-datasets/metrica' -title: 'Anonymized Web Analytics' ---- - - - - -# 匿名化されたウェブ分析データ - -このデータセットは、ヒット(`hits_v1`)と訪問(`visits_v1`)の匿名化されたウェブ分析データを含む2つのテーブルで構成されています。 - -テーブルは圧縮された `tsv.xz` ファイルとしてダウンロードできます。この文書で扱ったサンプルに加えて、1億行を含む `hits` テーブルの拡張版(7.5GB)がTSV形式で[https://datasets.clickhouse.com/hits/tsv/hits_100m_obfuscated_v1.tsv.xz](https://datasets.clickhouse.com/hits/tsv/hits_100m_obfuscated_v1.tsv.xz)から利用可能です。 - -## データのダウンロードと取り込み {#download-and-ingest-the-data} - -### ヒットの圧縮TSVファイルをダウンロードする: {#download-the-hits-compressed-tsv-file} - -```bash -curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv - -# チェックサムを検証する -md5sum hits_v1.tsv - -# チェックサムは次のようになります: f3631b6295bf06989c1437491f7592cb -``` - -### データベースとテーブルを作成する {#create-the-database-and-table} - -```bash -clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" -``` - -hits_v1のために - -```bash -clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -``` - -または hits_100m_obfuscated の場合 - -```bash -clickhouse-client --query="CREATE TABLE default.hits_100m_obfuscated (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, Refresh UInt8, RefererCategoryID UInt16, RefererRegionID UInt32, URLCategoryID UInt16, URLRegionID UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, OriginalURL String, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), LocalEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, RemoteIP UInt32, WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming UInt32, DNSTiming UInt32, ConnectTiming UInt32, ResponseStartTiming UInt32, ResponseEndTiming UInt32, FetchTiming UInt32, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -``` - -### ヒットデータを取り込む: {#import-the-hits-data} - -```bash -cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 -``` - -行数を検証します - -```bash -clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" -``` - -```response -8873898 -``` - -### 訪問の圧縮TSVファイルをダウンロードする: {#download-the-visits-compressed-tsv-file} - -```bash -curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv - -# チェックサムを検証する -md5sum visits_v1.tsv - -# チェックサムは次のようになります: 6dafe1a0f24e59e3fc2d0fed85601de6 -``` - -### 訪問テーブルを作成する {#create-the-visits-table} - -```bash -clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -``` - -### 訪問データを取り込む {#import-the-visits-data} - -```bash -cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 -``` - -数を検証します -```bash -clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" -``` - -```response -1680609 -``` - -## 例としてのJOIN {#an-example-join} - -ヒットと訪問のデータセットはClickHouseのテストルーチンで使用されており、これはテストスイートのクエリの一部です。残りのテストはこのページの最後にある「次のステップ」セクションで参照されています。 - -```sql -clickhouse-client --query "SELECT - EventDate, - hits, - visits -FROM -( - SELECT - EventDate, - count() AS hits - FROM datasets.hits_v1 - GROUP BY EventDate -) ANY LEFT JOIN -( - SELECT - StartDate AS EventDate, - sum(Sign) AS visits - FROM datasets.visits_v1 - GROUP BY EventDate -) USING EventDate -ORDER BY hits DESC -LIMIT 10 -SETTINGS joined_subquery_requires_alias = 0 -FORMAT PrettyCompact" -``` - -```response -┌──EventDate─┬────hits─┬─visits─┐ -│ 2014-03-17 │ 1406958 │ 265108 │ -│ 2014-03-19 │ 1405797 │ 261624 │ -│ 2014-03-18 │ 1383658 │ 258723 │ -│ 2014-03-20 │ 1353623 │ 255328 │ -│ 2014-03-21 │ 1245779 │ 236232 │ -│ 2014-03-23 │ 1046491 │ 202212 │ -│ 2014-03-22 │ 1031592 │ 197354 │ -└────────────┴─────────┴────────┘ -``` - -## 次のステップ {#next-steps} - -[ClickHouseにおけるスパース主インデックスの実用的な導入ガイド](/guides/best-practices/sparse-primary-indexes.md)では、ヒットデータセットを使用して、ClickHouseのインデックスと従来のリレーショナルデータベースの違い、ClickHouseによるスパース主インデックスの構築と利用方法、インデックスのベストプラクティスについて説明しています。 - -これらのテーブルに対するクエリの追加例は、ClickHouseの[ステートフルテスト](https://github.com/ClickHouse/ClickHouse/blob/d7129855757f38ceec3e4ecc6dafacdabe9b178f/tests/queries/1_stateful/00172_parallel_join.sql)の中に見られます。 - -:::note -テストスイートではデータベース名 `test` が使用され、テーブル名は `hits` と `visits` です。データベースやテーブルの名前を変更したり、テストファイルのSQLを編集することができます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md.hash deleted file mode 100644 index 033755a738b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/metrica.md.hash +++ /dev/null @@ -1 +0,0 @@ -848d3084c9ed37c8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md deleted file mode 100644 index 0a944218590..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -description: '2.5 billion rows of climate data for the last 120 yrs' -sidebar_label: 'NOAA Global Historical Climatology Network' -sidebar_position: 1 -slug: '/getting-started/example-datasets/noaa' -title: 'NOAA Global Historical Climatology Network' ---- - - - -このデータセットには、過去120年間の気象測定値が含まれています。各行は、特定の時点と観測所の測定値を表しています。 - -より正確には、このデータの[出所](https://github.com/awslabs/open-data-docs/tree/main/docs/noaa/noaa-ghcn)に基づいて次のようになります: - -> GHCN-Dailyは、世界の陸地での日次観測を含むデータセットです。これは、世界中の地上観測所からの測定値を含み、その約3分の2は降水測定のみです (Menne et al., 2012)。GHCN-Dailyは、数多くのソースからの気象記録の複合体で、統合されて共通の品質保証レビューにかけられています (Durre et al., 2010)。アーカイブには、以下の気象要素が含まれています: - - - 日次最高気温 - - 日次最低気温 - - 観測時の気温 - - 降水量(つまり、雨、融雪) - - 降雪量 - - 雪の深さ - - 額外の要素(利用可能な場合) - -以下のセクションでは、このデータセットをClickHouseに取り込むために関与したステップの概要を提供します。各ステップについてより詳細に読みたい場合は、私たちのブログ投稿「["ClickHouseにおける100年以上の気象記録の探求"](https://clickhouse.com/blog/real-world-data-noaa-climate-data)」をご覧ください。 - -## データのダウンロード {#downloading-the-data} - -- ClickHouse用にクリーンアップ、再構成、強化された[事前準備されたバージョン](#pre-prepared-data)があります。このデータは1900年から2022年までをカバーしています。 -- [オリジナルデータをダウンロード](#original-data)し、ClickHouseが要求する形式に変換します。独自のカラムを追加したいユーザーは、このアプローチを検討するかもしれません。 - -### 事前準備されたデータ {#pre-prepared-data} - -より具体的には、Noaaによる品質保証チェックで失敗しなかった行が削除されています。データは、行ごとの測定から、ステーションIDと日付ごとの行に再構成されています。つまり、次のようになります。 - -```csv -"station_id","date","tempAvg","tempMax","tempMin","precipitation","snowfall","snowDepth","percentDailySun","averageWindSpeed","maxWindSpeed","weatherType" -"AEM00041194","2022-07-30",347,0,308,0,0,0,0,0,0,0 -"AEM00041194","2022-07-31",371,413,329,0,0,0,0,0,0,0 -"AEM00041194","2022-08-01",384,427,357,0,0,0,0,0,0,0 -"AEM00041194","2022-08-02",381,424,352,0,0,0,0,0,0,0 -``` - -これはクエリが簡単で、結果のテーブルがスパースでないことを保証します。最後に、データには緯度と経度が追加されています。 - -このデータは、以下のS3ロケーションで入手可能です。データをローカルファイルシステムにダウンロードして(ClickHouseクライアントを使用して挿入することができます)、または直接ClickHouseに挿入します([S3からの挿入を参照](#inserting-from-s3))。 - -ダウンロードするには: - -```bash -wget https://datasets-documentation.s3.eu-west-3.amazonaws.com/noaa/noaa_enriched.parquet -``` - -### オリジナルデータ {#original-data} - -以下は、ClickHouseにロードする準備のためにオリジナルデータをダウンロードして変換する手順を示しています。 - -#### ダウンロード {#download} - -オリジナルデータをダウンロードするには: - -```bash -for i in {1900..2023}; do wget https://noaa-ghcn-pds.s3.amazonaws.com/csv.gz/${i}.csv.gz; done -``` - -#### データのサンプリング {#sampling-the-data} - -```bash -$ clickhouse-local --query "SELECT * FROM '2021.csv.gz' LIMIT 10" --format PrettyCompact -┌─c1──────────┬───────c2─┬─c3───┬──c4─┬─c5───┬─c6───┬─c7─┬───c8─┐ -│ AE000041196 │ 20210101 │ TMAX │ 278 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AE000041196 │ 20210101 │ PRCP │ 0 │ D │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AE000041196 │ 20210101 │ TAVG │ 214 │ H │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041194 │ 20210101 │ TMAX │ 266 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041194 │ 20210101 │ TMIN │ 178 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041194 │ 20210101 │ PRCP │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041194 │ 20210101 │ TAVG │ 217 │ H │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041217 │ 20210101 │ TMAX │ 262 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041217 │ 20210101 │ TMIN │ 155 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -│ AEM00041217 │ 20210101 │ TAVG │ 202 │ H │ ᴺᵁᴸᴸ │ S │ ᴺᵁᴸᴸ │ -└─────────────┴──────────┴──────┴─────┴──────┴──────┴────┴──────┘ -``` - -[フォーマットのドキュメント](https://github.com/awslabs/open-data-docs/tree/main/docs/noaa/noaa-ghcn)を要約すると: - -フォーマットドキュメントとカラムを順番に要約すると: - - - 11文字のステーション識別コード。この中に一部有用な情報がエンコードされています。 - - YEAR/MONTH/DAY = YYYYMMDD形式の8文字の日付(例:19860529 = 1986年5月29日) - - ELEMENT = 要素タイプの4文字インジケーター。事実上、測定タイプです。利用可能な多くの測定値がありますが、以下のものを選択します: - - PRCP - 降水量(1/10 mm) - - SNOW - 降雪量(mm) - - SNWD - 雪の深さ(mm) - - TMAX - 最高気温(1/10 ℃) - - TAVG - 平均気温(1/10 ℃) - - TMIN - 最低気温(1/10 ℃) - - PSUN - 日次可能日照率(パーセント) - - AWND - 日次平均風速(1/10秒メートル) - - WSFG - ピーク突風風速(1/10秒メートル) - - WT** = 天候タイプ **は天候タイプを示します。天候タイプの完全なリストはこちら。 -- DATA VALUE = ELEMENT用の5文字データ値すなわち測定値の値。 -- M-FLAG = 測定フラグ(1文字)。これは10の可能な値があります。これらの値の一部は、データの精度に疑問があることを示します。この値が「P」に設定されているデータを受け入れます。これは、PRCP、SNOW、SNWD測定にのみ関連します。 -- Q-FLAGは測定品質フラグで、14の可能な値があります。我々は、空の値データすなわち、品質保証チェックで失敗しなかったもののデータにだけ興味があります。 -- S-FLAGは観測のソースフラグです。我々の分析には役立たないため無視します。 -- OBS-TIME = 観測時間を表す4文字(時分)形式のデータ(例:0700 =午前7時)。通常、古いデータには存在しません。我々の目的には無視します。 - -行ごとの測定では、ClickHouseでスパースなテーブル構造が生じることになります。時刻と観測所ごとの行に変換し、測定をカラムとして持つようにする必要があります。まず、データセットを問題のない行に制限します。つまり、`qFlag`が空の文字列である行に制限します。 - -#### データのクリーニング {#clean-the-data} - -[ClickHouse local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)を使用して、興味のある測定を表す行をフィルタリングし、品質要件を通過させることができます: - -```bash -clickhouse local --query "SELECT count() -FROM file('*.csv.gz', CSV, 'station_id String, date String, measurement String, value Int64, mFlag String, qFlag String, sFlag String, obsTime String') WHERE qFlag = '' AND (measurement IN ('PRCP', 'SNOW', 'SNWD', 'TMAX', 'TAVG', 'TMIN', 'PSUN', 'AWND', 'WSFG') OR startsWith(measurement, 'WT'))" - -2679264563 -``` - -26億以上の行があるため、すべてのファイルを解析する必要があるため、このクエリは遅いものです。我々の8コアのマシンでは、約160秒かかります。 - -### データのピボット {#pivot-data} - -行ごとの測定構造をClickHouseに使用することはできますが、将来のクエリを不必要に複雑にします。理想的には、各測定タイプと関連する値をカラムにする、ステーションIDと日付ごとの行が必要です。つまり、 - -```csv -"station_id","date","tempAvg","tempMax","tempMin","precipitation","snowfall","snowDepth","percentDailySun","averageWindSpeed","maxWindSpeed","weatherType" -"AEM00041194","2022-07-30",347,0,308,0,0,0,0,0,0,0 -"AEM00041194","2022-07-31",371,413,329,0,0,0,0,0,0,0 -"AEM00041194","2022-08-01",384,427,357,0,0,0,0,0,0,0 -"AEM00041194","2022-08-02",381,424,352,0,0,0,0,0,0,0 -``` - -ClickHouse localを使用して単純な`GROUP BY`によって、データをこの構造に再ピボットできます。メモリのオーバーヘッドを制限するために、1ファイルずつこれを行います。 - -```bash -for i in {1900..2022} -do -clickhouse-local --query "SELECT station_id, - toDate32(date) as date, - anyIf(value, measurement = 'TAVG') as tempAvg, - anyIf(value, measurement = 'TMAX') as tempMax, - anyIf(value, measurement = 'TMIN') as tempMin, - anyIf(value, measurement = 'PRCP') as precipitation, - anyIf(value, measurement = 'SNOW') as snowfall, - anyIf(value, measurement = 'SNWD') as snowDepth, - anyIf(value, measurement = 'PSUN') as percentDailySun, - anyIf(value, measurement = 'AWND') as averageWindSpeed, - anyIf(value, measurement = 'WSFG') as maxWindSpeed, - toUInt8OrZero(replaceOne(anyIf(measurement, startsWith(measurement, 'WT') AND value = 1), 'WT', '')) as weatherType -FROM file('$i.csv.gz', CSV, 'station_id String, date String, measurement String, value Int64, mFlag String, qFlag String, sFlag String, obsTime String') - WHERE qFlag = '' AND (measurement IN ('PRCP', 'SNOW', 'SNWD', 'TMAX', 'TAVG', 'TMIN', 'PSUN', 'AWND', 'WSFG') OR startsWith(measurement, 'WT')) -GROUP BY station_id, date -ORDER BY station_id, date FORMAT CSV" >> "noaa.csv"; -done -``` - -このクエリは、単一の50GBファイル`noaa.csv`を生成します。 - -### データの強化 {#enriching-the-data} - -データには、観測所ID以外には位置を示すものがありません。このIDには、国コードのプレフィックスが含まれています。理想的には、各観測所には緯度と経度が関連付けられている必要があります。この目的のために、NOAAは各観測所の詳細を、別の[ghcnd-stations.txt](https://github.com/awslabs/open-data-docs/tree/main/docs/noaa/noaa-ghcn#format-of-ghcnd-stationstxt-file)として便利に提供しています。このファイルには、我々の将来の分析に役立つ5つのカラムがあります:id、緯度、経度、高度、名前。 - -```bash -wget http://noaa-ghcn-pds.s3.amazonaws.com/ghcnd-stations.txt -``` - -```bash -clickhouse local --query "WITH stations AS (SELECT id, lat, lon, elevation, splitByString(' GSN ',name)[1] as name FROM file('ghcnd-stations.txt', Regexp, 'id String, lat Float64, lon Float64, elevation Float32, name String')) -SELECT station_id, - date, - tempAvg, - tempMax, - tempMin, - precipitation, - snowfall, - snowDepth, - percentDailySun, - averageWindSpeed, - maxWindSpeed, - weatherType, - tuple(lon, lat) as location, - elevation, - name -FROM file('noaa.csv', CSV, - 'station_id String, date Date32, tempAvg Int32, tempMax Int32, tempMin Int32, precipitation Int32, snowfall Int32, snowDepth Int32, percentDailySun Int8, averageWindSpeed Int32, maxWindSpeed Int32, weatherType UInt8') as noaa LEFT OUTER - JOIN stations ON noaa.station_id = stations.id INTO OUTFILE 'noaa_enriched.parquet' FORMAT Parquet SETTINGS format_regexp='^(.{11})\s+(\-?\d{1,2}\.\d{4})\s+(\-?\d{1,3}\.\d{1,4})\s+(\-?\d*\.\d*)\s+(.*)\s+(?:[\d]*)'" -``` -このクエリは数分かかり、6.4GBのファイル`noaa_enriched.parquet`を生成します。 - -## テーブルの作成 {#create-table} - -ClickHouse内にMergeTreeテーブルを作成します(ClickHouseクライアントから)。 - -```sql -CREATE TABLE noaa -( - `station_id` LowCardinality(String), - `date` Date32, - `tempAvg` Int32 COMMENT '平均気温(1/10℃)', - `tempMax` Int32 COMMENT '最高気温(1/10℃)', - `tempMin` Int32 COMMENT '最低気温(1/10℃)', - `precipitation` UInt32 COMMENT '降水量(1/10 mm)', - `snowfall` UInt32 COMMENT '降雪量(mm)', - `snowDepth` UInt32 COMMENT '雪の深さ(mm)', - `percentDailySun` UInt8 COMMENT '日次可能日照率(パーセント)', - `averageWindSpeed` UInt32 COMMENT '日次平均風速(1/10秒メートル)', - `maxWindSpeed` UInt32 COMMENT 'ピーク突風風速(1/10秒メートル)', - `weatherType` Enum8('通常' = 0, '霧' = 1, '濃霧' = 2, '雷' = 3, '小さな雹' = 4, '雹' = 5, '氷霧' = 6, '塵/灰' = 7, '煙/霞' = 8, '吹雪' = 9, '竜巻' = 10, '強風' = 11, '吹き上げる霧' = 12, '霧雨' = 13, '凍結霧雨' = 14, '雨' = 15, '凍結雨' = 16, '雪' = 17, '不明な降水' = 18, '地面霧' = 21, '凍結霧' = 22), - `location` Point, - `elevation` Float32, - `name` LowCardinality(String) -) ENGINE = MergeTree() ORDER BY (station_id, date); -``` - -## ClickHouseへの挿入 {#inserting-into-clickhouse} - -### ローカルファイルからの挿入 {#inserting-from-local-file} - -データは、次のようにローカルファイルから挿入できます(ClickHouseクライアントから): - -```sql -INSERT INTO noaa FROM INFILE '/noaa_enriched.parquet' -``` - -``は、ディスク上のローカルファイルへの完全なパスを表します。 - -このロードを高速化する方法は、[こちら](https://clickhouse.com/blog/real-world-data-noaa-climate-data#load-the-data)を参照してください。 - -### S3からの挿入 {#inserting-from-s3} - -```sql -INSERT INTO noaa SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/noaa/noaa_enriched.parquet') -``` -これを高速化する方法については、[大規模データのロードの調整に関するブログ投稿](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2)を参照してください。 - -## サンプルクエリ {#sample-queries} - -### 過去最高気温 {#highest-temperature-ever} - -```sql -SELECT - tempMax / 10 AS maxTemp, - location, - name, - date -FROM blogs.noaa -WHERE tempMax > 500 -ORDER BY - tempMax DESC, - date ASC -LIMIT 5 - -┌─maxTemp─┬─location──────────┬─name───────────────────────────────────────────┬───────date─┐ -│ 56.7 │ (-116.8667,36.45) │ CA GREENLAND RCH │ 1913-07-10 │ -│ 56.7 │ (-115.4667,32.55) │ MEXICALI (SMN) │ 1949-08-20 │ -│ 56.7 │ (-115.4667,32.55) │ MEXICALI (SMN) │ 1949-09-18 │ -│ 56.7 │ (-115.4667,32.55) │ MEXICALI (SMN) │ 1952-07-17 │ -│ 56.7 │ (-115.4667,32.55) │ MEXICALI (SMN) │ 1952-09-04 │ -└─────────┴───────────────────┴────────────────────────────────────────────────┴────────────┘ - -5行が設定されました。経過: 0.514秒。処理された行: 10.6億、4.27 GB(20.6億行/s, 8.29 GB/s)。 -``` - -2023年時点での[Furnace Creek](https://www.google.com/maps/place/36%C2%B027'00.0%22N+116%C2%B052'00.1%22W/@36.1329666,-116.1104099,8.95z/data=!4m5!3m4!1s0x0:0xf2ed901b860f4446!8m2!3d36.45!4d-116.8667)による[記録された記録](https://en.wikipedia.org/wiki/List_of_weather_records#Highest_temperatures_ever_recorded)と一貫性があります。 - -### 最高のスキーリゾート {#best-ski-resorts} - -スキーリゾートの[リスト](https://gist.githubusercontent.com/gingerwizard/dd022f754fd128fdaf270e58fa052e35/raw/622e03c37460f17ef72907afe554cb1c07f91f23/ski_resort_stats.csv)とそれぞれの場所を利用して、過去5年間に月ごとに最も雪が降ったトップ1000の気象観測所と結合します。この結合を[geoDistance](/sql-reference/functions/geo/coordinates/#geodistance)でソートし、距離が20km未満の結果に制限し、各リゾートごとにトップの結果を選択し、これを総降雪量でソートします。さらに、リゾートは、良好なスキー条件の広い指標として1800m以上のものに制限します。 - -```sql -SELECT - resort_name, - total_snow / 1000 AS total_snow_m, - resort_location, - month_year -FROM -( - WITH resorts AS - ( - SELECT - resort_name, - state, - (lon, lat) AS resort_location, - 'US' AS code - FROM url('https://gist.githubusercontent.com/gingerwizard/dd022f754fd128fdaf270e58fa052e35/raw/622e03c37460f17ef72907afe554cb1c07f91f23/ski_resort_stats.csv', CSVWithNames) - ) - SELECT - resort_name, - highest_snow.station_id, - geoDistance(resort_location.1, resort_location.2, station_location.1, station_location.2) / 1000 AS distance_km, - highest_snow.total_snow, - resort_location, - station_location, - month_year - FROM - ( - SELECT - sum(snowfall) AS total_snow, - station_id, - any(location) AS station_location, - month_year, - substring(station_id, 1, 2) AS code - FROM noaa - WHERE (date > '2017-01-01') AND (code = 'US') AND (elevation > 1800) - GROUP BY - station_id, - toYYYYMM(date) AS month_year - ORDER BY total_snow DESC - LIMIT 1000 - ) AS highest_snow - INNER JOIN resorts ON highest_snow.code = resorts.code - WHERE distance_km < 20 - ORDER BY - resort_name ASC, - total_snow DESC - LIMIT 1 BY - resort_name, - station_id -) -ORDER BY total_snow DESC -LIMIT 5 - -┌─resort_name──────────┬─total_snow_m─┬─resort_location─┬─month_year─┐ -│ Sugar Bowl, CA │ 7.799 │ (-120.3,39.27) │ 201902 │ -│ Donner Ski Ranch, CA │ 7.799 │ (-120.34,39.31) │ 201902 │ -│ Boreal, CA │ 7.799 │ (-120.35,39.33) │ 201902 │ -│ Homewood, CA │ 4.926 │ (-120.17,39.08) │ 201902 │ -│ Alpine Meadows, CA │ 4.926 │ (-120.22,39.17) │ 201902 │ -└──────────────────────┴──────────────┴─────────────────┴────────────┘ - -5行が設定されました。経過: 0.750秒。処理された行: 689.1百万、3.20 GB(918.20百万行/s, 4.26 GB/s)。 -ピークメモリ使用量: 67.66 MiB。 -``` - -## クレジット {#credits} - -このデータの準備、クリーンアップ、および配布におけるグローバル歴史気候ネットワークの努力に感謝します。あなた方の努力に感謝します。 - -Menne, M.J., I. Durre, B. Korzeniewski, S. McNeal, K. Thomas, X. Yin, S. Anthony, R. Ray, R.S. Vose, B.E.Gleason, and T.G. Houston, 2012: Global Historical Climatology Network - Daily (GHCN-Daily), Version 3. [十進法の後に使用されたサブセットを示します,例如: Version 3.25] NOAA National Centers for Environmental Information. http://doi.org/10.7289/V5D21VHZ [2020年8月17日] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md.hash deleted file mode 100644 index 913d2475b5b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/noaa.md.hash +++ /dev/null @@ -1 +0,0 @@ -6b760811cbfd7eed diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md deleted file mode 100644 index 692d5fb3d96..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -description: '2009年以降にニューヨーク市を発着する数十億のタクシーおよびフォーヒャイヤー車(Uber、Lyftなど)のトリップデータ' -sidebar_label: 'ニューヨークタクシーデータ' -sidebar_position: 2 -slug: '/getting-started/example-datasets/nyc-taxi' -title: 'New York Taxi Data' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The New York taxi data sample consists of 3+ billion taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009. This getting started guide uses a 3m row sample. - -The full dataset can be obtained in a couple of ways: - -- insert the data directly into ClickHouse Cloud from S3 or GCS -- download prepared partitions -- Alternatively users can query the full dataset in our demo environment at [sql.clickhouse.com](https://sql.clickhouse.com/?query=U0VMRUNUIGNvdW50KCkgRlJPTSBueWNfdGF4aS50cmlwcw&chart=eyJ0eXBlIjoibGluZSIsImNvbmZpZyI6eyJ0aXRsZSI6IlRlbXBlcmF0dXJlIGJ5IGNvdW50cnkgYW5kIHllYXIiLCJ4YXhpcyI6InllYXIiLCJ5YXhpcyI6ImNvdW50KCkiLCJzZXJpZXMiOiJDQVNUKHBhc3Nlbmdlcl9jb3VudCwgJ1N0cmluZycpIn19). - - -:::note -The example queries below were executed on a **Production** instance of ClickHouse Cloud. For more information see -["Playground specifications"](/getting-started/playground#specifications). -::: - - -## Create the table trips {#create-the-table-trips} - -Start by creating a table for the taxi rides: - -```sql - -CREATE DATABASE nyc_taxi; - -CREATE TABLE nyc_taxi.trips_small ( - trip_id UInt32, - pickup_datetime DateTime, - dropoff_datetime DateTime, - pickup_longitude Nullable(Float64), - pickup_latitude Nullable(Float64), - dropoff_longitude Nullable(Float64), - dropoff_latitude Nullable(Float64), - passenger_count UInt8, - trip_distance Float32, - fare_amount Float32, - extra Float32, - tip_amount Float32, - tolls_amount Float32, - total_amount Float32, - payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4, 'UNK' = 5), - pickup_ntaname LowCardinality(String), - dropoff_ntaname LowCardinality(String) -) -ENGINE = MergeTree -PRIMARY KEY (pickup_datetime, dropoff_datetime); -``` - -## Load the Data directly from Object Storage {#load-the-data-directly-from-object-storage} - -Users' can grab a small subset of the data (3 million rows) for getting familiar with it. The data is in TSV files in object storage, which is easily streamed into -ClickHouse Cloud using the `s3` table function. - -The same data is stored in both S3 and GCS; choose either tab. - - - - -The following command streams three files from an S3 bucket into the `trips_small` table (the `{0..2}` syntax is a wildcard for the values 0, 1, and 2): - -```sql -INSERT INTO nyc_taxi.trips_small -SELECT - trip_id, - pickup_datetime, - dropoff_datetime, - pickup_longitude, - pickup_latitude, - dropoff_longitude, - dropoff_latitude, - passenger_count, - trip_distance, - fare_amount, - extra, - tip_amount, - tolls_amount, - total_amount, - payment_type, - pickup_ntaname, - dropoff_ntaname -FROM s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_{0..2}.gz', - 'TabSeparatedWithNames' -); -``` - - - -The following command streams three files from a GCS bucket into the `trips` table (the `{0..2}` syntax is a wildcard for the values 0, 1, and 2): - -```sql -INSERT INTO nyc_taxi.trips_small -SELECT - trip_id, - pickup_datetime, - dropoff_datetime, - pickup_longitude, - pickup_latitude, - dropoff_longitude, - dropoff_latitude, - passenger_count, - trip_distance, - fare_amount, - extra, - tip_amount, - tolls_amount, - total_amount, - payment_type, - pickup_ntaname, - dropoff_ntaname -FROM gcs( - 'https://storage.googleapis.com/clickhouse-public-datasets/nyc-taxi/trips_{0..2}.gz', - 'TabSeparatedWithNames' -); -``` - - - -## Sample Queries {#sample-queries} - -The following queries are executed on the sample described above. Users can run the sample queries on the full dataset in [sql.clickhouse.com](https://sql.clickhouse.com/?query=U0VMRUNUIGNvdW50KCkgRlJPTSBueWNfdGF4aS50cmlwcw&chart=eyJ0eXBlIjoibGluZSIsImNvbmZpZyI6eyJ0aXRsZSI6IlRlbXBlcmF0dXJlIGJ5IGNvdW50cnkgYW5kIHllYXIiLCJ4YXhpcyI6InllYXIiLCJ5YXhpcyI6ImNvdW50KCkiLCJzZXJpZXMiOiJDQVNUKHBhc3Nlbmdlcl9jb3VudCwgJ1N0cmluZycpIn19), modifying the queries below to use the table `nyc_taxi.trips`. - -Let's see how many rows were inserted: - -```sql runnable -SELECT count() -FROM nyc_taxi.trips_small; -``` - -Each TSV file has about 1M rows, and the three files have 3,000,317 rows. Let's look at a few rows: - -```sql runnable -SELECT * -FROM nyc_taxi.trips_small -LIMIT 10; -``` - -Notice there are columns for the pickup and dropoff dates, geo coordinates, fare details, New York neighborhoods, and more. - - -Let's run a few queries. This query shows us the top 10 neighborhoods that have the most frequent pickups: - -```sql runnable -SELECT - pickup_ntaname, - count(*) AS count -FROM nyc_taxi.trips_small WHERE pickup_ntaname != '' -GROUP BY pickup_ntaname -ORDER BY count DESC -LIMIT 10; -``` - -This query shows the average fare based on the number of passengers: - -```sql runnable view='chart' chart_config='eyJ0eXBlIjoiYmFyIiwiY29uZmlnIjp7InhheGlzIjoicGFzc2VuZ2VyX2NvdW50IiwieWF4aXMiOiJhdmcodG90YWxfYW1vdW50KSIsInRpdGxlIjoiQXZlcmFnZSBmYXJlIGJ5IHBhc3NlbmdlciBjb3VudCJ9fQ' -SELECT - passenger_count, - avg(total_amount) -FROM nyc_taxi.trips_small -WHERE passenger_count < 10 -GROUP BY passenger_count; -``` - -Here's a correlation between the number of passengers and the distance of the trip: - -```sql runnable chart_config='eyJ0eXBlIjoiaG9yaXpvbnRhbCBiYXIiLCJjb25maWciOnsieGF4aXMiOiJwYXNzZW5nZXJfY291bnQiLCJ5YXhpcyI6ImRpc3RhbmNlIiwic2VyaWVzIjoiY291bnRyeSIsInRpdGxlIjoiQXZnIGZhcmUgYnkgcGFzc2VuZ2VyIGNvdW50In19' -SELECT - passenger_count, - avg(trip_distance) AS distance, - count() AS c -FROM nyc_taxi.trips_small -GROUP BY passenger_count -ORDER BY passenger_count ASC -``` - -## Download of Prepared Partitions {#download-of-prepared-partitions} - -:::note -The following steps provide information about the original dataset, and a method for loading prepared partitions into a self-managed ClickHouse server environment. -::: - -See https://github.com/toddwschneider/nyc-taxi-data and http://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html for the description of a dataset and instructions for downloading. - -Downloading will result in about 227 GB of uncompressed data in CSV files. The download takes about an hour over a 1 Gbit connection (parallel downloading from s3.amazonaws.com recovers at least half of a 1 Gbit channel). -Some of the files might not download fully. Check the file sizes and re-download any that seem doubtful. - -```bash -$ curl -O https://datasets.clickhouse.com/trips_mergetree/partitions/trips_mergetree.tar - -# Validate the checksum -$ md5sum trips_mergetree.tar - -# Checksum should be equal to: f3b8d469b41d9a82da064ded7245d12c -$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory -$ # check permissions of unpacked data, fix if required -$ sudo service clickhouse-server restart -$ clickhouse-client --query "select count(*) from datasets.trips_mergetree" -``` - -:::info -If you will run the queries described below, you have to use the full table name, `datasets.trips_mergetree`. -::: - -## Results on Single Server {#results-on-single-server} - -Q1: - -```sql -SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type; -``` - -0.490 seconds. - -Q2: - -```sql -SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count; -``` - -1.224 seconds. - -Q3: - -```sql -SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year; -``` - -2.104 seconds. - -Q4: - -```sql -SELECT passenger_count, toYear(pickup_date) AS year, round(trip_distance) AS distance, count(*) -FROM trips_mergetree -GROUP BY passenger_count, year, distance -ORDER BY year, count(*) DESC; -``` - -3.593 seconds. - -The following server was used: - -Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical cores total, 128 GiB RAM, 8x6 TB HD on hardware RAID-5 - -Execution time is the best of three runs. But starting from the second run, queries read data from the file system cache. No further caching occurs: the data is read out and processed in each run. - -Creating a table on three servers: - -On each server: - -```sql -CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192); -``` - -On the source server: - -```sql -CREATE TABLE trips_mergetree_x3 AS trips_mergetree_third ENGINE = Distributed(perftest, default, trips_mergetree_third, rand()); -``` - -The following query redistributes data: - -```sql -INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree; -``` - -This takes 2454 seconds. - -On three servers: - -Q1: 0.212 seconds. -Q2: 0.438 seconds. -Q3: 0.733 seconds. -Q4: 1.241 seconds. - -No surprises here, since the queries are scaled linearly. - -We also have the results from a cluster of 140 servers: - -Q1: 0.028 sec. -Q2: 0.043 sec. -Q3: 0.051 sec. -Q4: 0.072 sec. - -In this case, the query processing time is determined above all by network latency. -We ran queries using a client located in a different datacenter than where the cluster was located, which added about 20 ms of latency. - -## Summary {#summary} - -| servers | Q1 | Q2 | Q3 | Q4 | -|---------|-------|-------|-------|-------| -| 1, E5-2650v2 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3, E5-2650v2 | 0.212 | 0.438 | 0.733 | 1.241 | -| 1, AWS c5n.4xlarge | 0.249 | 1.279 | 1.738 | 3.527 | -| 1, AWS c5n.9xlarge | 0.130 | 0.584 | 0.777 | 1.811 | -| 3, AWS c5n.9xlarge | 0.057 | 0.231 | 0.285 | 0.641 | -| 140, E5-2650v2 | 0.028 | 0.043 | 0.051 | 0.072 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md.hash deleted file mode 100644 index 96d5a17ce11..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nyc-taxi.md.hash +++ /dev/null @@ -1 +0,0 @@ -cd230b9fe8d9551f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md deleted file mode 100644 index d23dfd8a2e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md +++ /dev/null @@ -1,650 +0,0 @@ ---- -description: 'Ingest and query Tab Separated Value data in 5 steps' -sidebar_label: 'NYPD Complaint Data' -slug: '/getting-started/example-datasets/nypd_complaint_data' -title: 'NYPD Complaint Data' ---- - - - -Tab区切り値、またはTSVファイルは一般的であり、ファイルの最初の行にフィールド見出しを含む場合があります。ClickHouseはTSVを取り込み、ファイルを取り込まずにTSVをクエリすることもできます。このガイドでは、これらの2つのケースの両方をカバーします。CSVファイルをクエリまたは取り込む必要がある場合は、同じ手法が機能し、単にフォーマット引数で`TSV`を`CSV`に置き換えるだけです。 - -このガイドを進める中で、以下を行います: -- **調査**:TSVファイルの構造と内容をクエリします。 -- **対象のClickHouseスキーマを決定**:適切なデータ型を選び、既存のデータをそれらの型にマッピングします。 -- **ClickHouseテーブルを作成**。 -- **データを前処理してストリーミング**し、ClickHouseに送信します。 -- **ClickHouseに対していくつかのクエリを実行**します。 - -このガイドで使用されるデータセットは、NYCオープンデータチームから提供されており、「ニューヨーク市警察(NYPD)に報告されたすべての有効な重罪、軽罪、違反事件に関するデータ」が含まれています。執筆時点で、データファイルのサイズは166MBですが、定期的に更新されています。 - -**出典**:[data.cityofnewyork.us](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243) -**利用規約**: https://www1.nyc.gov/home/terms-of-use.page - -## 前提条件 {#prerequisites} -- [NYPD Complaint Data Current (Year To Date)](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243)ページを訪れてデータセットをダウンロードし、エクスポートボタンをクリックして**TSV for Excel**を選択します。 -- [ClickHouseサーバーとクライアント](../../getting-started/install/install.mdx)をインストールします。 - -### このガイドで説明されているコマンドに関する注意 {#a-note-about-the-commands-described-in-this-guide} -このガイドには2種類のコマンドがあります: -- 一部のコマンドはTSVファイルをクエリしており、これらはコマンドプロンプトで実行されます。 -- 残りのコマンドはClickHouseをクエリしており、これらは`clickhouse-client`またはPlay UIで実行されます。 - -:::note -このガイドの例では、TSVファイルを`${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv`に保存したと仮定しています。必要に応じてコマンドを調整してください。 -::: - -## TSVファイルに慣れる {#familiarize-yourself-with-the-tsv-file} - -ClickHouseデータベースで作業を始める前に、データに慣れてください。 - -### ソースTSVファイルのフィールドを見る {#look-at-the-fields-in-the-source-tsv-file} - -これはTSVファイルをクエリするコマンドの例ですが、まだ実行しないでください。 -```sh -clickhouse-local --query \ -"describe file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames')" -``` - -サンプル応答 -```response -CMPLNT_NUM Nullable(Float64) -ADDR_PCT_CD Nullable(Float64) -BORO_NM Nullable(String) -CMPLNT_FR_DT Nullable(String) -CMPLNT_FR_TM Nullable(String) -``` - -:::tip -通常、上記のコマンドは、入力データのどのフィールドが数値で、どのフィールドが文字列、どのフィールドがタプルであるかを教えてくれます。これは常に当てはまるわけではありません。ClickHouseは数十億のレコードを含むデータセットと共に使用されることが多いため、スキーマを[推測するために](../../integrations/data-formats/json/inference)既定で100行を検査します。これは、数十億行を解析してスキーマを推測するのを避けるためです。以下の応答は、あなたが見るものと一致しないかもしれません。なぜならデータセットは毎年数回更新されているからです。データ辞書を見れば、CMPLNT_NUMがテキストとして指定されているのがわかり、数値ではありません。推論のデフォルトが100行であるのを`SETTINGS input_format_max_rows_to_read_for_schema_inference=2000`に上書きすることで、内容をより良く把握できます。 - -注:バージョン22.5以降、デフォルトはスキーマ推論のために25,000行になっていますので、古いバージョンを使用している場合や、25,000行以上のサンプリングが必要な場合にのみ設定を変更してください。 -::: - -コマンドプロンプトでこのコマンドを実行してください。ダウンロードしたTSVファイルのデータをクエリするために`clickhouse-local`を使用します。 -```sh -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"describe file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames')" -``` - -結果: -```response -CMPLNT_NUM Nullable(String) -ADDR_PCT_CD Nullable(Float64) -BORO_NM Nullable(String) -CMPLNT_FR_DT Nullable(String) -CMPLNT_FR_TM Nullable(String) -CMPLNT_TO_DT Nullable(String) -CMPLNT_TO_TM Nullable(String) -CRM_ATPT_CPTD_CD Nullable(String) -HADEVELOPT Nullable(String) -HOUSING_PSA Nullable(Float64) -JURISDICTION_CODE Nullable(Float64) -JURIS_DESC Nullable(String) -KY_CD Nullable(Float64) -LAW_CAT_CD Nullable(String) -LOC_OF_OCCUR_DESC Nullable(String) -OFNS_DESC Nullable(String) -PARKS_NM Nullable(String) -PATROL_BORO Nullable(String) -PD_CD Nullable(Float64) -PD_DESC Nullable(String) -PREM_TYP_DESC Nullable(String) -RPT_DT Nullable(String) -STATION_NAME Nullable(String) -SUSP_AGE_GROUP Nullable(String) -SUSP_RACE Nullable(String) -SUSP_SEX Nullable(String) -TRANSIT_DISTRICT Nullable(Float64) -VIC_AGE_GROUP Nullable(String) -VIC_RACE Nullable(String) -VIC_SEX Nullable(String) -X_COORD_CD Nullable(Float64) -Y_COORD_CD Nullable(Float64) -Latitude Nullable(Float64) -Longitude Nullable(Float64) -Lat_Lon Tuple(Nullable(Float64), Nullable(Float64)) -New Georeferenced Column Nullable(String) -``` - -この時点で、TSVファイルのカラムが[データセットのウェブページ](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243)の**このデータセットのカラム**セクションで指定されている名前とタイプと一致するか確認する必要があります。データ型はあまり具体的ではなく、すべての数値フィールドは`Nullable(Float64)`に設定され、他のすべてのフィールドは`Nullable(String)`です。データを格納するためにClickHouseテーブルを作成する際、より適切でパフォーマンスの良い型を指定できます。 - -### 適切なスキーマを決定する {#determine-the-proper-schema} - -フィールドに使用すべき型を判断するためには、データがどのようになっているかを知る必要があります。たとえば、フィールド`JURISDICTION_CODE`は数値ですが、`UInt8`にするべきか、`Enum`にするべきか、または`Float64`が適切でしょうか? - -```sql -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select JURISDICTION_CODE, count() FROM - file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') - GROUP BY JURISDICTION_CODE - ORDER BY JURISDICTION_CODE - FORMAT PrettyCompact" -``` - -結果: -```response -┌─JURISDICTION_CODE─┬─count()─┐ -│ 0 │ 188875 │ -│ 1 │ 4799 │ -│ 2 │ 13833 │ -│ 3 │ 656 │ -│ 4 │ 51 │ -│ 6 │ 5 │ -│ 7 │ 2 │ -│ 9 │ 13 │ -│ 11 │ 14 │ -│ 12 │ 5 │ -│ 13 │ 2 │ -│ 14 │ 70 │ -│ 15 │ 20 │ -│ 72 │ 159 │ -│ 87 │ 9 │ -│ 88 │ 75 │ -│ 97 │ 405 │ -└───────────────────┴─────────┘ -``` - -クエリの応答は、`JURISDICTION_CODE`が`UInt8`に適していることを示しています。 - -同様に、いくつかの`String`フィールドを見て、それらが`DateTime`または[LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)フィールドに適しているかどうかを確認します。 - -たとえば、フィールド`PARKS_NM`は「発生地点のNYC公園、遊び場、または緑地の名称(適用される場合。州立公園は含まれません)」と記述されています。ニューヨーク市の公園の名前は`LowCardinality(String)`に適しているかもしれません: - -```sh -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select count(distinct PARKS_NM) FROM - file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') - FORMAT PrettyCompact" -``` - -結果: -```response -┌─uniqExact(PARKS_NM)─┐ -│ 319 │ -└─────────────────────┘ -``` - -いくつかの公園の名前を見てみましょう: -```sql -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select distinct PARKS_NM FROM - file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') - LIMIT 10 - FORMAT PrettyCompact" -``` - -結果: -```response -┌─PARKS_NM───────────────────┐ -│ (null) │ -│ ASSER LEVY PARK │ -│ JAMES J WALKER PARK │ -│ BELT PARKWAY/SHORE PARKWAY │ -│ PROSPECT PARK │ -│ MONTEFIORE SQUARE │ -│ SUTTON PLACE PARK │ -│ JOYCE KILMER PARK │ -│ ALLEY ATHLETIC PLAYGROUND │ -│ ASTORIA PARK │ -└────────────────────────────┘ -``` - -執筆時点のデータセットには、`PARK_NM`列に数百の異なる公園と遊び場しかありません。この数は、`LowCardinality`における推奨値である10,000以上の異なる文字列を下回る小さな数です。 - -### DateTimeフィールド {#datetime-fields} -[データセットのこのカラム](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243)セクションに基づいて、報告されたイベントの開始および終了のための日時フィールドがあります。`CMPLNT_FR_DT`および`CMPLT_TO_DT`の最小値と最大値を見れば、フィールドが常に埋まっているかどうかを判断できます: - -```sh title="CMPLNT_FR_DT" -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select min(CMPLNT_FR_DT), max(CMPLNT_FR_DT) FROM -file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -FORMAT PrettyCompact" -``` - -結果: -```response -┌─min(CMPLNT_FR_DT)─┬─max(CMPLNT_FR_DT)─┐ -│ 01/01/1973 │ 12/31/2021 │ -└───────────────────┴───────────────────┘ -``` - -```sh title="CMPLNT_TO_DT" -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select min(CMPLNT_TO_DT), max(CMPLNT_TO_DT) FROM -file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -FORMAT PrettyCompact" -``` - -結果: -```response -┌─min(CMPLNT_TO_DT)─┬─max(CMPLNT_TO_DT)─┐ -│ │ 12/31/2021 │ -└───────────────────┴───────────────────┘ -``` - -```sh title="CMPLNT_FR_TM" -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select min(CMPLNT_FR_TM), max(CMPLNT_FR_TM) FROM -file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -FORMAT PrettyCompact" -``` - -結果: -```response -┌─min(CMPLNT_FR_TM)─┬─max(CMPLNT_FR_TM)─┐ -│ 00:00:00 │ 23:59:00 │ -└───────────────────┴───────────────────┘ -``` - -```sh title="CMPLNT_TO_TM" -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select min(CMPLNT_TO_TM), max(CMPLNT_TO_TM) FROM -file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -FORMAT PrettyCompact" -``` - -結果: -```response -┌─min(CMPLNT_TO_TM)─┬─max(CMPLNT_TO_TM)─┐ -│ (null) │ 23:59:00 │ -└───────────────────┴───────────────────┘ -``` - -## プランを立てる {#make-a-plan} - -上記の調査に基づいて: -- `JURISDICTION_CODE`は`UInt8`型にキャストすべきです。 -- `PARKS_NM`は`LowCardinality(String)`にキャストすべきです。 -- `CMPLNT_FR_DT`と`CMPLNT_FR_TM`は常に埋まっている(恐らくデフォルトの時刻`00:00:00`を含む)。 -- `CMPLNT_TO_DT`と`CMPLNT_TO_TM`は空であるかもしれません。 -- 日付と時刻はソースの異なるフィールドに保存されている。 -- 日付は`mm/dd/yyyy`形式。 -- 時間は`hh:mm:ss`形式。 -- 日付と時間はDateTime型に結合できます。 -- 1970年1月1日以前の日付がいくつか存在するため、64ビットDateTimeが必要です。 - -:::note -型に変更を加えるべき点は他にも多くあります。それらはすべて、同じ調査手順に従うことでわかります。フィールド内の異なる文字列の数、数値の最小値と最大値を調べ、決定を下してください。以下のガイドに示されるテーブルスキーマには、多くの低いカーディナリティ文字列と符号なし整数フィールドが含まれ、非常に少ない浮動小数点数が含まれます。 -::: - -## 日付と時間フィールドを結合する {#concatenate-the-date-and-time-fields} - -日付と時間フィールド`CMPLNT_FR_DT`と`CMPLNT_FR_TM`を`DateTime`にキャストできる単一の`String`に結合するには、次の2つのフィールドを結合演算子`CMPLNT_FR_DT || ' ' || CMPLNT_FR_TM`で結合します。`CMPLNT_TO_DT`と`CMPLNT_TO_TM`フィールドも同様に処理されます。 - -```sh -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select CMPLNT_FR_DT || ' ' || CMPLNT_FR_TM AS complaint_begin FROM -file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -LIMIT 10 -FORMAT PrettyCompact" -``` - -結果: -```response -┌─complaint_begin─────┐ -│ 07/29/2010 00:01:00 │ -│ 12/01/2011 12:00:00 │ -│ 04/01/2017 15:00:00 │ -│ 03/26/2018 17:20:00 │ -│ 01/01/2019 00:00:00 │ -│ 06/14/2019 00:00:00 │ -│ 11/29/2021 20:00:00 │ -│ 12/04/2021 00:35:00 │ -│ 12/05/2021 12:50:00 │ -│ 12/07/2021 20:30:00 │ -└─────────────────────┘ -``` - -## 日付と時間のStringをDateTime64型に変換する {#convert-the-date-and-time-string-to-a-datetime64-type} - -ガイドの前の方で、TSVファイルには1970年1月1日以前の日付があることがわかっているため、日付には64ビットDateTime型が必要になります。また、日付は`MM/DD/YYYY`から`YYYY/MM/DD`フォーマットに変換する必要があります。これらの両方は[`parseDateTime64BestEffort()`](../../sql-reference/functions/type-conversion-functions.md#parsedatetime64besteffort)で実行できます。 - -```sh -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"WITH (CMPLNT_FR_DT || ' ' || CMPLNT_FR_TM) AS CMPLNT_START, - (CMPLNT_TO_DT || ' ' || CMPLNT_TO_TM) AS CMPLNT_END -select parseDateTime64BestEffort(CMPLNT_START) AS complaint_begin, - parseDateTime64BestEffortOrNull(CMPLNT_END) AS complaint_end -FROM file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') -ORDER BY complaint_begin ASC -LIMIT 25 -FORMAT PrettyCompact" -``` - -2行目と3行目には前のステップからの結合が含まれ、4行目と5行目は文字列を`DateTime64`に解析します。苦情の終了時間は必ずしも存在するわけではないため、`parseDateTime64BestEffortOrNull`が使用されます。 - -結果: -```response -┌─────────complaint_begin─┬───────────complaint_end─┐ -│ 1925-01-01 10:00:00.000 │ 2021-02-12 09:30:00.000 │ -│ 1925-01-01 11:37:00.000 │ 2022-01-16 11:49:00.000 │ -│ 1925-01-01 15:00:00.000 │ 2021-12-31 00:00:00.000 │ -│ 1925-01-01 15:00:00.000 │ 2022-02-02 22:00:00.000 │ -│ 1925-01-01 19:00:00.000 │ 2022-04-14 05:00:00.000 │ -│ 1955-09-01 19:55:00.000 │ 2022-08-01 00:45:00.000 │ -│ 1972-03-17 11:40:00.000 │ 2022-03-17 11:43:00.000 │ -│ 1972-05-23 22:00:00.000 │ 2022-05-24 09:00:00.000 │ -│ 1972-05-30 23:37:00.000 │ 2022-05-30 23:50:00.000 │ -│ 1972-07-04 02:17:00.000 │ ᴺᵁᴸᴸ │ -│ 1973-01-01 00:00:00.000 │ ᴺᵁᴸᴸ │ -│ 1975-01-01 00:00:00.000 │ ᴺᵁᴸᴸ │ -│ 1976-11-05 00:01:00.000 │ 1988-10-05 23:59:00.000 │ -│ 1977-01-01 00:00:00.000 │ 1977-01-01 23:59:00.000 │ -│ 1977-12-20 00:01:00.000 │ ᴺᵁᴸᴸ │ -│ 1981-01-01 00:01:00.000 │ ᴺᵁᴸᴸ │ -│ 1981-08-14 00:00:00.000 │ 1987-08-13 23:59:00.000 │ -│ 1983-01-07 00:00:00.000 │ 1990-01-06 00:00:00.000 │ -│ 1984-01-01 00:01:00.000 │ 1984-12-31 23:59:00.000 │ -│ 1985-01-01 12:00:00.000 │ 1987-12-31 15:00:00.000 │ -│ 1985-01-11 09:00:00.000 │ 1985-12-31 12:00:00.000 │ -│ 1986-03-16 00:05:00.000 │ 2022-03-16 00:45:00.000 │ -│ 1987-01-07 00:00:00.000 │ 1987-01-09 00:00:00.000 │ -│ 1988-04-03 18:30:00.000 │ 2022-08-03 09:45:00.000 │ -│ 1988-07-29 12:00:00.000 │ 1990-07-27 22:00:00.000 │ -└─────────────────────────┴─────────────────────────┘ -``` -:::note -上記のように`1925`と表示される日付は、データのエラーによるものです。オリジナルデータには、`1019`から`1022`の年に日付があるいくつかのレコードがあり、それは`2019`から`2022`であるべきです。これらは64ビットDateTimeで保存されるため、1925年1月1日として保持されています。 -::: - -## テーブルを作成する {#create-a-table} - -上記で決定したカラムに対するデータ型は、以下のテーブルスキーマに反映されます。また、テーブルに使用する`ORDER BY`および`PRIMARY KEY`についても決定する必要があります。`ORDER BY`または`PRIMARY KEY`のいずれかは必ず指定しなければなりません。以下は、`ORDER BY`に含めるカラムを決定するためのガイドラインであり、この文書の最後の*次のステップ*セクションに詳細情報があります。 - -### Order ByとPrimary Keyの句 {#order-by-and-primary-key-clauses} - -- `ORDER BY`のタプルには、クエリフィルターで使用されるフィールドを含めるべきです。 -- ディスク上の圧縮を最大化するために、`ORDER BY`のタプルはカーディナリティの昇順で並べるべきです。 -- もし存在する場合、`PRIMARY KEY`タプルは`ORDER BY`タプルのサブセットでなければなりません。 -- `ORDER BY`のみが指定されている場合、同じタプルが`PRIMARY KEY`として使用されます。 -- プライマリキーインデックスは、指定された場合に`PRIMARY KEY`タプルを使用して作成され、それ以外の場合は`ORDER BY`タプルを使用して作成されます。 -- `PRIMARY KEY`インデックスは、主メモリに保持されます。 - -データセットを見て、クエリで回答される可能性のある質問を考えた場合、私たちはニューヨーク市の5つの区で報告されている犯罪の種類に着目することになるかもしれません。これらのフィールドは、`ORDER BY`に含めることができます: - -| カラム | 説明(データ辞書から) | -| ----------- | ------------------------------------------ | -| OFNS_DESC | キーコードに対応する犯罪の説明 | -| RPT_DT | 警察に報告された日付 | -| BORO_NM | 事件が発生した区の名前 | - -3つの候補カラムのカーディナリティをTSVファイルにクエリしてみましょう: - -```bash -clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ ---query \ -"select formatReadableQuantity(uniq(OFNS_DESC)) as cardinality_OFNS_DESC, - formatReadableQuantity(uniq(RPT_DT)) as cardinality_RPT_DT, - formatReadableQuantity(uniq(BORO_NM)) as cardinality_BORO_NM - FROM - file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames') - FORMAT PrettyCompact" -``` - -結果: -```response -┌─cardinality_OFNS_DESC─┬─cardinality_RPT_DT─┬─cardinality_BORO_NM─┐ -│ 60.00 │ 306.00 │ 6.00 │ -└───────────────────────┴────────────────────┴─────────────────────┘ -``` -カーディナリティ別に並べると、`ORDER BY`は次のようになります: - -```sql -ORDER BY ( BORO_NM, OFNS_DESC, RPT_DT ) -``` -:::note -以下のテーブルは、より読みやすいカラム名を使用します。上記の名前は、 - -```sql -ORDER BY ( borough, offense_description, date_reported ) -``` - -とマッピングされます。 -::: - -データ型に対する変更と`ORDER BY`タプルを組み合わせることで、このテーブル構造が得られます: - -```sql -CREATE TABLE NYPD_Complaint ( - complaint_number String, - precinct UInt8, - borough LowCardinality(String), - complaint_begin DateTime64(0,'America/New_York'), - complaint_end DateTime64(0,'America/New_York'), - was_crime_completed String, - housing_authority String, - housing_level_code UInt32, - jurisdiction_code UInt8, - jurisdiction LowCardinality(String), - offense_code UInt8, - offense_level LowCardinality(String), - location_descriptor LowCardinality(String), - offense_description LowCardinality(String), - park_name LowCardinality(String), - patrol_borough LowCardinality(String), - PD_CD UInt16, - PD_DESC String, - location_type LowCardinality(String), - date_reported Date, - transit_station LowCardinality(String), - suspect_age_group LowCardinality(String), - suspect_race LowCardinality(String), - suspect_sex LowCardinality(String), - transit_district UInt8, - victim_age_group LowCardinality(String), - victim_race LowCardinality(String), - victim_sex LowCardinality(String), - NY_x_coordinate UInt32, - NY_y_coordinate UInt32, - Latitude Float64, - Longitude Float64 -) ENGINE = MergeTree - ORDER BY ( borough, offense_description, date_reported ) -``` - -### テーブルのプライマリキーを検出する {#finding-the-primary-key-of-a-table} - -ClickHouseの`system`データベース、特に`system.table`には、作成したテーブルに関するすべての情報があります。このクエリは`ORDER BY`(ソートキー)および`PRIMARY KEY`を表示します: -```sql -SELECT - partition_key, - sorting_key, - primary_key, - table -FROM system.tables -WHERE table = 'NYPD_Complaint' -FORMAT Vertical -``` - -応答 - -```response -Query id: 6a5b10bf-9333-4090-b36e-c7f08b1d9e01 - -Row 1: -────── -partition_key: -sorting_key: borough, offense_description, date_reported -primary_key: borough, offense_description, date_reported -table: NYPD_Complaint - -1 row in set. Elapsed: 0.001 sec. -``` - -## データを前処理してインポートする {#preprocess-import-data} - -データの前処理には`clickhouse-local`ツールを使用し、アップロードには`clickhouse-client`を使用します。 - -### `clickhouse-local`で使用する引数 {#clickhouse-local-arguments-used} - -:::tip -`table='input'`は以下の`clickhouse-local`の引数に登場します。clickhouse-localは提供された入力(`cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv`)を受け取り、その入力をテーブルに挿入します。デフォルトでは、テーブル名は`table`です。このガイドでは、データフローを明確にするためにテーブル名を`input`に設定しています。clickhouse-localの最終引数は、テーブルから選択するクエリ(`FROM input`)で、これが`clickhouse-client`にパイプされて`NYPD_Complaint`テーブルを埋めます。 -::: - -```sql -cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv \ - | clickhouse-local --table='input' --input-format='TSVWithNames' \ - --input_format_max_rows_to_read_for_schema_inference=2000 \ - --query " - WITH (CMPLNT_FR_DT || ' ' || CMPLNT_FR_TM) AS CMPLNT_START, - (CMPLNT_TO_DT || ' ' || CMPLNT_TO_TM) AS CMPLNT_END - SELECT - CMPLNT_NUM AS complaint_number, - ADDR_PCT_CD AS precinct, - BORO_NM AS borough, - parseDateTime64BestEffort(CMPLNT_START) AS complaint_begin, - parseDateTime64BestEffortOrNull(CMPLNT_END) AS complaint_end, - CRM_ATPT_CPTD_CD AS was_crime_completed, - HADEVELOPT AS housing_authority_development, - HOUSING_PSA AS housing_level_code, - JURISDICTION_CODE AS jurisdiction_code, - JURIS_DESC AS jurisdiction, - KY_CD AS offense_code, - LAW_CAT_CD AS offense_level, - LOC_OF_OCCUR_DESC AS location_descriptor, - OFNS_DESC AS offense_description, - PARKS_NM AS park_name, - PATROL_BORO AS patrol_borough, - PD_CD, - PD_DESC, - PREM_TYP_DESC AS location_type, - toDate(parseDateTimeBestEffort(RPT_DT)) AS date_reported, - STATION_NAME AS transit_station, - SUSP_AGE_GROUP AS suspect_age_group, - SUSP_RACE AS suspect_race, - SUSP_SEX AS suspect_sex, - TRANSIT_DISTRICT AS transit_district, - VIC_AGE_GROUP AS victim_age_group, - VIC_RACE AS victim_race, - VIC_SEX AS victim_sex, - X_COORD_CD AS NY_x_coordinate, - Y_COORD_CD AS NY_y_coordinate, - Latitude, - Longitude - FROM input" \ - | clickhouse-client --query='INSERT INTO NYPD_Complaint FORMAT TSV' -``` - -## データを検証する {#validate-data} - -:::note -データセットは年に1回以上変更されるため、あなたのカウントはこの文書にあるものと一致しないかもしれません。 -::: - -クエリ: - -```sql -SELECT count() -FROM NYPD_Complaint -``` - -結果: - -```text -┌─count()─┐ -│ 208993 │ -└─────────┘ - -1 row in set. Elapsed: 0.001 sec. -``` - -ClickHouse内のデータセットのサイズは、元のTSVファイルのわずか12%です。元のTSVファイルのサイズとテーブルのサイズを比較します: - -クエリ: - -```sql -SELECT formatReadableSize(total_bytes) -FROM system.tables -WHERE name = 'NYPD_Complaint' -``` - -結果: -```text -┌─formatReadableSize(total_bytes)─┐ -│ 8.63 MiB │ -└─────────────────────────────────┘ -``` - - -## 一部のクエリを実行する {#run-queries} - -### クエリ1. 月ごとの苦情数を比較する {#query-1-compare-the-number-of-complaints-by-month} - -クエリ: - -```sql -SELECT - dateName('month', date_reported) AS month, - count() AS complaints, - bar(complaints, 0, 50000, 80) -FROM NYPD_Complaint -GROUP BY month -ORDER BY complaints DESC -``` - -結果: -```response -Query id: 7fbd4244-b32a-4acf-b1f3-c3aa198e74d9 - -┌─month─────┬─complaints─┬─bar(count(), 0, 50000, 80)───────────────────────────────┐ -│ March │ 34536 │ ███████████████████████████████████████████████████████▎ │ -│ May │ 34250 │ ██████████████████████████████████████████████████████▋ │ -│ April │ 32541 │ ████████████████████████████████████████████████████ │ -│ January │ 30806 │ █████████████████████████████████████████████████▎ │ -│ February │ 28118 │ ████████████████████████████████████████████▊ │ -│ November │ 7474 │ ███████████▊ │ -│ December │ 7223 │ ███████████▌ │ -│ October │ 7070 │ ███████████▎ │ -│ September │ 6910 │ ███████████ │ -│ August │ 6801 │ ██████████▊ │ -│ June │ 6779 │ ██████████▋ │ -│ July │ 6485 │ ██████████▍ │ -└───────────┴────────────┴──────────────────────────────────────────────────────────┘ - -12 rows in set. Elapsed: 0.006 sec. Processed 208.99 thousand rows, 417.99 KB (37.48 million rows/s., 74.96 MB/s.) -``` - -### クエリ2. 区ごとの苦情の総数を比較する {#query-2-compare-total-number-of-complaints-by-borough} - -クエリ: - -```sql -SELECT - borough, - count() AS complaints, - bar(complaints, 0, 125000, 60) -FROM NYPD_Complaint -GROUP BY borough -ORDER BY complaints DESC -``` - -結果: -```response -Query id: 8cdcdfd4-908f-4be0-99e3-265722a2ab8d - -┌─borough───────┬─complaints─┬─bar(count(), 0, 125000, 60)──┐ -│ BROOKLYN │ 57947 │ ███████████████████████████▋ │ -│ MANHATTAN │ 53025 │ █████████████████████████▍ │ -│ QUEENS │ 44875 │ █████████████████████▌ │ -│ BRONX │ 44260 │ █████████████████████▏ │ -│ STATEN ISLAND │ 8503 │ ████ │ -│ (null) │ 383 │ ▏ │ -└───────────────┴────────────┴──────────────────────────────┘ - -6 rows in set. Elapsed: 0.008 sec. Processed 208.99 thousand rows, 209.43 KB (27.14 million rows/s., 27.20 MB/s.) -``` - -## 次のステップ {#next-steps} - -[ClickHouseにおけるスパースプライマリインデックスの実践的な紹介](/guides/best-practices/sparse-primary-indexes.md)では、ClickHouseのインデックスが従来のリレーショナルデータベースと比較して異なる点、ClickHouseがスパースプライマリインデックスをどのように構築および使用するか、そしてインデクシングのベストプラクティスについて説明します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md.hash deleted file mode 100644 index cbb907970ea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/nypd_complaint_data.md.hash +++ /dev/null @@ -1 +0,0 @@ -0596da80b81c6b77 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md deleted file mode 100644 index aea8b5a3b90..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -description: 'Dataset containing the on-time performance of airline flights' -sidebar_label: 'OnTime Airline Flight Data' -slug: '/getting-started/example-datasets/ontime' -title: 'OnTime' ---- - - - -このデータセットは、交通統計局のデータを含んでいます。 - -## テーブルの作成 {#creating-a-table} - -```sql -CREATE TABLE `ontime` -( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `Reporting_Airline` LowCardinality(String), - `DOT_ID_Reporting_Airline` Int32, - `IATA_CODE_Reporting_Airline` LowCardinality(String), - `Tail_Number` LowCardinality(String), - `Flight_Number_Reporting_Airline` LowCardinality(String), - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` LowCardinality(String), - `OriginState` FixedString(2), - `OriginStateFips` FixedString(2), - `OriginStateName` LowCardinality(String), - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` LowCardinality(String), - `DestState` FixedString(2), - `DestStateFips` FixedString(2), - `DestStateName` LowCardinality(String), - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` LowCardinality(String), - `DepTimeBlk` LowCardinality(String), - `TaxiOut` Int32, - `WheelsOff` LowCardinality(String), - `WheelsOn` LowCardinality(String), - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` LowCardinality(String), - `ArrTimeBlk` LowCardinality(String), - `Cancelled` Int8, - `CancellationCode` FixedString(1), - `Diverted` Int8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` Int8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` Int16, - `TotalAddGTime` Int16, - `LongestAddGTime` Int16, - `DivAirportLandings` Int8, - `DivReachedDest` Int8, - `DivActualElapsedTime` Int16, - `DivArrDelay` Int16, - `DivDistance` Int16, - `Div1Airport` LowCardinality(String), - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` Int16, - `Div1TotalGTime` Int16, - `Div1LongestGTime` Int16, - `Div1WheelsOff` Int16, - `Div1TailNum` LowCardinality(String), - `Div2Airport` LowCardinality(String), - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` Int16, - `Div2TotalGTime` Int16, - `Div2LongestGTime` Int16, - `Div2WheelsOff` Int16, - `Div2TailNum` LowCardinality(String), - `Div3Airport` LowCardinality(String), - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` Int16, - `Div3TotalGTime` Int16, - `Div3LongestGTime` Int16, - `Div3WheelsOff` Int16, - `Div3TailNum` LowCardinality(String), - `Div4Airport` LowCardinality(String), - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` Int16, - `Div4TotalGTime` Int16, - `Div4LongestGTime` Int16, - `Div4WheelsOff` Int16, - `Div4TailNum` LowCardinality(String), - `Div5Airport` LowCardinality(String), - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` Int16, - `Div5TotalGTime` Int16, - `Div5LongestGTime` Int16, - `Div5WheelsOff` Int16, - `Div5TailNum` LowCardinality(String) -) ENGINE = MergeTree - ORDER BY (Year, Quarter, Month, DayofMonth, FlightDate, IATA_CODE_Reporting_Airline); -``` - -## 生データからのインポート {#import-from-raw-data} - -データをダウンロードします: - -```bash -wget --no-check-certificate --continue https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_{1987..2022}_{1..12}.zip -``` - -複数のスレッドを用いたデータの読み込み: - -```bash -ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_csv_empty_as_default 1 --query='INSERT INTO ontime FORMAT CSVWithNames'" -``` - -(サーバーでメモリ不足やその他の問題が発生する場合は、 `-P $(nproc)` 部分を削除してください) - -## 保存したコピーからのインポート {#import-from-a-saved-copy} - -別の方法として、次のクエリを用いて保存したコピーからデータをインポートすることができます: - -```sql -INSERT INTO ontime SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/ontime/csv_by_year/*.csv.gz', CSVWithNames) SETTINGS max_insert_threads = 40; -``` - -スナップショットは2022-05-29に作成されました。 - -## クエリ {#queries} - -Q0. - -```sql -SELECT avg(c1) -FROM -( - SELECT Year, Month, count(*) AS c1 - FROM ontime - GROUP BY Year, Month -); -``` - -Q1. 2000年から2008年までの1日あたりのフライト数 - -```sql -SELECT DayOfWeek, count(*) AS c -FROM ontime -WHERE Year>=2000 AND Year<=2008 -GROUP BY DayOfWeek -ORDER BY c DESC; -``` - -Q2. 10分以上遅延したフライト数、曜日別、2000-2008年 - -```sql -SELECT DayOfWeek, count(*) AS c -FROM ontime -WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 -GROUP BY DayOfWeek -ORDER BY c DESC; -``` - -Q3. 空港別の遅延数、2000-2008年 - -```sql -SELECT Origin, count(*) AS c -FROM ontime -WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 -GROUP BY Origin -ORDER BY c DESC -LIMIT 10; -``` - -Q4. 2007年のキャリア別遅延数 - -```sql -SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) -FROM ontime -WHERE DepDelay>10 AND Year=2007 -GROUP BY Carrier -ORDER BY count(*) DESC; -``` - -Q5. 2007年のキャリア別遅延の割合 - -```sql -SELECT Carrier, c, c2, c*100/c2 as c3 -FROM -( - SELECT - IATA_CODE_Reporting_Airline AS Carrier, - count(*) AS c - FROM ontime - WHERE DepDelay>10 - AND Year=2007 - GROUP BY Carrier -) q -JOIN -( - SELECT - IATA_CODE_Reporting_Airline AS Carrier, - count(*) AS c2 - FROM ontime - WHERE Year=2007 - GROUP BY Carrier -) qq USING Carrier -ORDER BY c3 DESC; -``` - -より良いバージョンの同じクエリ: - -```sql -SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 -FROM ontime -WHERE Year=2007 -GROUP BY Carrier -ORDER BY c3 DESC -``` - -Q6. 同じリクエストをより広い年範囲で、2000-2008年 - -```sql -SELECT Carrier, c, c2, c*100/c2 as c3 -FROM -( - SELECT - IATA_CODE_Reporting_Airline AS Carrier, - count(*) AS c - FROM ontime - WHERE DepDelay>10 - AND Year>=2000 AND Year<=2008 - GROUP BY Carrier -) q -JOIN -( - SELECT - IATA_CODE_Reporting_Airline AS Carrier, - count(*) AS c2 - FROM ontime - WHERE Year>=2000 AND Year<=2008 - GROUP BY Carrier -) qq USING Carrier -ORDER BY c3 DESC; -``` - -より良いバージョンの同じクエリ: - -```sql -SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 -FROM ontime -WHERE Year>=2000 AND Year<=2008 -GROUP BY Carrier -ORDER BY c3 DESC; -``` - -Q7. 10分以上遅延したフライトの割合、年別 - -```sql -SELECT Year, c1/c2 -FROM -( - select - Year, - count(*)*100 as c1 - from ontime - WHERE DepDelay>10 - GROUP BY Year -) q -JOIN -( - select - Year, - count(*) as c2 - from ontime - GROUP BY Year -) qq USING (Year) -ORDER BY Year; -``` - -より良いバージョンの同じクエリ: - -```sql -SELECT Year, avg(DepDelay>10)*100 -FROM ontime -GROUP BY Year -ORDER BY Year; -``` - -Q8. 様々な年範囲での直接接続されている都市数による人気のある目的地 - -```sql -SELECT DestCityName, uniqExact(OriginCityName) AS u -FROM ontime -WHERE Year >= 2000 and Year <= 2010 -GROUP BY DestCityName -ORDER BY u DESC LIMIT 10; -``` - -Q9. - -```sql -SELECT Year, count(*) AS c1 -FROM ontime -GROUP BY Year; -``` - -Q10. - -```sql -SELECT - min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, - sum(ArrDelayMinutes>30) AS flights_delayed, - round(sum(ArrDelayMinutes>30)/count(*),2) AS rate -FROM ontime -WHERE - DayOfWeek NOT IN (6,7) AND OriginState NOT IN ('AK', 'HI', 'PR', 'VI') - AND DestState NOT IN ('AK', 'HI', 'PR', 'VI') - AND FlightDate < '2010-01-01' -GROUP by Carrier -HAVING cnt>100000 and max(Year)>1990 -ORDER by rate DESC -LIMIT 1000; -``` - -ボーナス: - -```sql -SELECT avg(cnt) -FROM -( - SELECT Year,Month,count(*) AS cnt - FROM ontime - WHERE DepDel15=1 - GROUP BY Year,Month -); - -SELECT avg(c1) FROM -( - SELECT Year,Month,count(*) AS c1 - FROM ontime - GROUP BY Year,Month -); - -SELECT DestCityName, uniqExact(OriginCityName) AS u -FROM ontime -GROUP BY DestCityName -ORDER BY u DESC -LIMIT 10; - -SELECT OriginCityName, DestCityName, count() AS c -FROM ontime -GROUP BY OriginCityName, DestCityName -ORDER BY c DESC -LIMIT 10; - -SELECT OriginCityName, count() AS c -FROM ontime -GROUP BY OriginCityName -ORDER BY c DESC -LIMIT 10; -``` - -データをPlaygroundで操作することもできます。 [例](https://sql.clickhouse.com?query_id=M4FSVBVMSHY98NKCQP8N4K)。 - -このパフォーマンステストはVadim Tkachenkoによって作成されました。次を参照してください: - -- https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/ -- https://www.percona.com/blog/2009/10/26/air-traffic-queries-in-luciddb/ -- https://www.percona.com/blog/2009/11/02/air-traffic-queries-in-infinidb-early-alpha/ -- https://www.percona.com/blog/2014/04/21/using-apache-hadoop-and-impala-together-with-mysql-for-data-analysis/ -- https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ -- http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md.hash deleted file mode 100644 index f32f97dc2fc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/ontime.md.hash +++ /dev/null @@ -1 +0,0 @@ -9a929aafea10d051 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md deleted file mode 100644 index 1af140e432e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md +++ /dev/null @@ -1,423 +0,0 @@ ---- -description: 'The data in this dataset is derived and cleaned from the full OpenSky - dataset to illustrate the development of air traffic during the COVID-19 pandemic.' -sidebar_label: 'Air Traffic Data' -slug: '/getting-started/example-datasets/opensky' -title: 'Crowdsourced air traffic data from The OpenSky Network 2020' ---- - - - -The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic. - -Source: https://zenodo.org/records/5092942 - -Martin Strohmeier, Xavier Olive, Jannis Luebbe, Matthias Schaefer, and Vincent Lenders -"Crowdsourced air traffic data from the OpenSky Network 2019–2020" -Earth System Science Data 13(2), 2021 -https://doi.org/10.5194/essd-13-357-2021 - -## ダウンロードデータセット {#download-dataset} - -コマンドを実行します: - -```bash -wget -O- https://zenodo.org/records/5092942 | grep -oE 'https://zenodo.org/records/5092942/files/flightlist_[0-9]+_[0-9]+\.csv\.gz' | xargs wget -``` - -ダウンロードには良好なインターネット接続で約2分かかります。合計サイズ4.3 GBの30ファイルがあります。 - -## テーブルを作成 {#create-table} - -```sql -CREATE TABLE opensky -( - callsign String, - number String, - icao24 String, - registration String, - typecode String, - origin String, - destination String, - firstseen DateTime, - lastseen DateTime, - day DateTime, - latitude_1 Float64, - longitude_1 Float64, - altitude_1 Float64, - latitude_2 Float64, - longitude_2 Float64, - altitude_2 Float64 -) ENGINE = MergeTree ORDER BY (origin, destination, callsign); -``` - -## データをインポート {#import-data} - -ClickHouseにデータを並行してアップロードします: - -```bash -ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"' -``` - -- ここでは、ファイルのリスト(`ls -1 flightlist_*.csv.gz`)を並行処理のために`xargs`に渡します。 -`xargs -P100`は最大100の並行ワーカーを使用することを指定しますが、ファイルは30だけなので、ワーカーの数は30だけになります。 -- 各ファイルについて、`xargs`は`bash -c`でスクリプトを実行します。スクリプトでは`{}`の形の置換があり、`xargs`コマンドはファイル名をそれに置き換えます(`-I{}`で`xargs`に要求しています)。 -- スクリプトはファイルをデコンプレッションして(`gzip -c -d "{}"`)標準出力(`-c`パラメータ)に出力し、その出力を`clickhouse-client`にリダイレクトします。 -- また、ISO-8601形式のタイムゾーンオフセットを認識するために、[DateTime](../../sql-reference/data-types/datetime.md)フィールドを拡張パーサー([--date_time_input_format best_effort](/operations/settings/formats#date_time_input_format))で解析するように要求しました。 - -最後に、`clickhouse-client`が挿入を行います。入力データは[CSVWithNames](../../interfaces/formats.md#csvwithnames)形式で読み取ります。 - -並行アップロードには24秒かかります。 - -並行アップロードが好まれない場合は、こちらがシーケンシャルバリアントです: - -```bash -for file in flightlist_*.csv.gz; do gzip -c -d "$file" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"; done -``` - -## データの検証 {#validate-data} - -クエリ: - -```sql -SELECT count() FROM opensky; -``` - -結果: - -```text -┌──count()─┐ -│ 66010819 │ -└──────────┘ -``` - -ClickHouseのデータセットサイズはわずか2.66 GiBです。確認してください。 - -クエリ: - -```sql -SELECT formatReadableSize(total_bytes) FROM system.tables WHERE name = 'opensky'; -``` - -結果: - -```text -┌─formatReadableSize(total_bytes)─┐ -│ 2.66 GiB │ -└─────────────────────────────────┘ -``` - -## いくつかのクエリを実行 {#run-queries} - -総移動距離は680億キロメートルです。 - -クエリ: - -```sql -SELECT formatReadableQuantity(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)) / 1000) FROM opensky; -``` - -結果: - -```text -┌─formatReadableQuantity(divide(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)), 1000))─┐ -│ 68.72 billion │ -└──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -平均フライト距離は約1000 kmです。 - -クエリ: - -```sql -SELECT round(avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)), 2) FROM opensky; -``` - -結果: - -```text - ┌─round(avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)), 2)─┐ -1. │ 1041090.67 │ -- 1.04 million - └──────────────────────────────────────────────────────────────────────────────┘ -``` - -### 最も多忙な出発空港と平均距離 {#busy-airports-average-distance} - -クエリ: - -```sql -SELECT - origin, - count(), - round(avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2))) AS distance, - bar(distance, 0, 10000000, 100) AS bar -FROM opensky -WHERE origin != '' -GROUP BY origin -ORDER BY count() DESC -LIMIT 100; -``` - -結果: - -```text - ┌─origin─┬─count()─┬─distance─┬─bar────────────────────────────────────┐ - 1. │ KORD │ 745007 │ 1546108 │ ███████████████▍ │ - 2. │ KDFW │ 696702 │ 1358721 │ █████████████▌ │ - 3. │ KATL │ 667286 │ 1169661 │ ███████████▋ │ - 4. │ KDEN │ 582709 │ 1287742 │ ████████████▊ │ - 5. │ KLAX │ 581952 │ 2628393 │ ██████████████████████████▎ │ - 6. │ KLAS │ 447789 │ 1336967 │ █████████████▎ │ - 7. │ KPHX │ 428558 │ 1345635 │ █████████████▍ │ - 8. │ KSEA │ 412592 │ 1757317 │ █████████████████▌ │ - 9. │ KCLT │ 404612 │ 880355 │ ████████▋ │ - 10. │ VIDP │ 363074 │ 1445052 │ ██████████████▍ │ - 11. │ EDDF │ 362643 │ 2263960 │ ██████████████████████▋ │ - 12. │ KSFO │ 361869 │ 2445732 │ ████████████████████████▍ │ - 13. │ KJFK │ 349232 │ 2996550 │ █████████████████████████████▊ │ - 14. │ KMSP │ 346010 │ 1287328 │ ████████████▋ │ - 15. │ LFPG │ 344748 │ 2206203 │ ██████████████████████ │ - 16. │ EGLL │ 341370 │ 3216593 │ ████████████████████████████████▏ │ - 17. │ EHAM │ 340272 │ 2116425 │ █████████████████████▏ │ - 18. │ KEWR │ 337696 │ 1826545 │ ██████████████████▎ │ - 19. │ KPHL │ 320762 │ 1291761 │ ████████████▊ │ - 20. │ OMDB │ 308855 │ 2855706 │ ████████████████████████████▌ │ - 21. │ UUEE │ 307098 │ 1555122 │ ███████████████▌ │ - 22. │ KBOS │ 304416 │ 1621675 │ ████████████████▏ │ - 23. │ LEMD │ 291787 │ 1695097 │ ████████████████▊ │ - 24. │ YSSY │ 272979 │ 1875298 │ ██████████████████▋ │ - 25. │ KMIA │ 265121 │ 1923542 │ ███████████████████▏ │ - 26. │ ZGSZ │ 263497 │ 745086 │ ███████▍ │ - 27. │ EDDM │ 256691 │ 1361453 │ █████████████▌ │ - 28. │ WMKK │ 254264 │ 1626688 │ ████████████████▎ │ - 29. │ CYYZ │ 251192 │ 2175026 │ █████████████████████▋ │ - 30. │ KLGA │ 248699 │ 1106935 │ ███████████ │ - 31. │ VHHH │ 248473 │ 3457658 │ ██████████████████████████████████▌ │ - 32. │ RJTT │ 243477 │ 1272744 │ ████████████▋ │ - 33. │ KBWI │ 241440 │ 1187060 │ ███████████▋ │ - 34. │ KIAD │ 239558 │ 1683485 │ ████████████████▋ │ - 35. │ KIAH │ 234202 │ 1538335 │ ███████████████▍ │ - 36. │ KFLL │ 223447 │ 1464410 │ ██████████████▋ │ - 37. │ KDAL │ 212055 │ 1082339 │ ██████████▋ │ - 38. │ KDCA │ 207883 │ 1013359 │ ██████████▏ │ - 39. │ LIRF │ 207047 │ 1427965 │ ██████████████▎ │ - 40. │ PANC │ 206007 │ 2525359 │ █████████████████████████▎ │ - 41. │ LTFJ │ 205415 │ 860470 │ ████████▌ │ - 42. │ KDTW │ 204020 │ 1106716 │ ███████████ │ - 43. │ VABB │ 201679 │ 1300865 │ █████████████ │ - 44. │ OTHH │ 200797 │ 3759544 │ █████████████████████████████████████▌ │ - 45. │ KMDW │ 200796 │ 1232551 │ ████████████▎ │ - 46. │ KSAN │ 198003 │ 1495195 │ ██████████████▊ │ - 47. │ KPDX │ 197760 │ 1269230 │ ████████████▋ │ - 48. │ SBGR │ 197624 │ 2041697 │ ████████████████████▍ │ - 49. │ VOBL │ 189011 │ 1040180 │ ██████████▍ │ - 50. │ LEBL │ 188956 │ 1283190 │ ████████████▋ │ - 51. │ YBBN │ 188011 │ 1253405 │ ████████████▌ │ - 52. │ LSZH │ 187934 │ 1572029 │ ███████████████▋ │ - 53. │ YMML │ 187643 │ 1870076 │ ██████████████████▋ │ - 54. │ RCTP │ 184466 │ 2773976 │ ███████████████████████████▋ │ - 55. │ KSNA │ 180045 │ 778484 │ ███████▋ │ - 56. │ EGKK │ 176420 │ 1694770 │ ████████████████▊ │ - 57. │ LOWW │ 176191 │ 1274833 │ ████████████▋ │ - 58. │ UUDD │ 176099 │ 1368226 │ █████████████▋ │ - 59. │ RKSI │ 173466 │ 3079026 │ ██████████████████████████████▋ │ - 60. │ EKCH │ 172128 │ 1229895 │ ████████████▎ │ - 61. │ KOAK │ 171119 │ 1114447 │ ███████████▏ │ - 62. │ RPLL │ 170122 │ 1440735 │ ██████████████▍ │ - 63. │ KRDU │ 167001 │ 830521 │ ████████▎ │ - 64. │ KAUS │ 164524 │ 1256198 │ ████████████▌ │ - 65. │ KBNA │ 163242 │ 1022726 │ ██████████▏ │ - 66. │ KSDF │ 162655 │ 1380867 │ █████████████▋ │ - 67. │ ENGM │ 160732 │ 910108 │ █████████ │ - 68. │ LIMC │ 160696 │ 1564620 │ ███████████████▋ │ - 69. │ KSJC │ 159278 │ 1081125 │ ██████████▋ │ - 70. │ KSTL │ 157984 │ 1026699 │ ██████████▎ │ - 71. │ UUWW │ 156811 │ 1261155 │ ████████████▌ │ - 72. │ KIND │ 153929 │ 987944 │ █████████▊ │ - 73. │ ESSA │ 153390 │ 1203439 │ ████████████ │ - 74. │ KMCO │ 153351 │ 1508657 │ ███████████████ │ - 75. │ KDVT │ 152895 │ 74048 │ ▋ │ - 76. │ VTBS │ 152645 │ 2255591 │ ██████████████████████▌ │ - 77. │ CYVR │ 149574 │ 2027413 │ ████████████████████▎ │ - 78. │ EIDW │ 148723 │ 1503985 │ ███████████████ │ - 79. │ LFPO │ 143277 │ 1152964 │ ███████████▌ │ - 80. │ EGSS │ 140830 │ 1348183 │ █████████████▍ │ - 81. │ KAPA │ 140776 │ 420441 │ ████▏ │ - 82. │ KHOU │ 138985 │ 1068806 │ ██████████▋ │ - 83. │ KTPA │ 138033 │ 1338223 │ █████████████▍ │ - 84. │ KFFZ │ 137333 │ 55397 │ ▌ │ - 85. │ NZAA │ 136092 │ 1581264 │ ███████████████▋ │ - 86. │ YPPH │ 133916 │ 1271550 │ ████████████▋ │ - 87. │ RJBB │ 133522 │ 1805623 │ ██████████████████ │ - 88. │ EDDL │ 133018 │ 1265919 │ ████████████▋ │ - 89. │ ULLI │ 130501 │ 1197108 │ ███████████▊ │ - 90. │ KIWA │ 127195 │ 250876 │ ██▌ │ - 91. │ KTEB │ 126969 │ 1189414 │ ███████████▊ │ - 92. │ VOMM │ 125616 │ 1127757 │ ███████████▎ │ - 93. │ LSGG │ 123998 │ 1049101 │ ██████████▍ │ - 94. │ LPPT │ 122733 │ 1779187 │ █████████████████▋ │ - 95. │ WSSS │ 120493 │ 3264122 │ ████████████████████████████████▋ │ - 96. │ EBBR │ 118539 │ 1579939 │ ███████████████▋ │ - 97. │ VTBD │ 118107 │ 661627 │ ██████▌ │ - 98. │ KVNY │ 116326 │ 692960 │ ██████▊ │ - 99. │ EDDT │ 115122 │ 941740 │ █████████▍ │ -100. │ EFHK │ 114860 │ 1629143 │ ████████████████▎ │ - └────────┴─────────┴──────────┴────────────────────────────────────────┘ -``` - -### 3つの主要なモスクワ空港からのフライト数、週別 {#flights-from-moscow} - -クエリ: - -```sql -SELECT - toMonday(day) AS k, - count() AS c, - bar(c, 0, 10000, 100) AS bar -FROM opensky -WHERE origin IN ('UUEE', 'UUDD', 'UUWW') -GROUP BY k -ORDER BY k ASC; -``` - -結果: - -```text - ┌──────────k─┬────c─┬─bar──────────────────────────────────────────────────────────────────────────┐ - 1. │ 2018-12-31 │ 5248 │ ████████████████████████████████████████████████████▍ │ - 2. │ 2019-01-07 │ 6302 │ ███████████████████████████████████████████████████████████████ │ - 3. │ 2019-01-14 │ 5701 │ █████████████████████████████████████████████████████████ │ - 4. │ 2019-01-21 │ 5638 │ ████████████████████████████████████████████████████████▍ │ - 5. │ 2019-01-28 │ 5731 │ █████████████████████████████████████████████████████████▎ │ - 6. │ 2019-02-04 │ 5683 │ ████████████████████████████████████████████████████████▋ │ - 7. │ 2019-02-11 │ 5759 │ █████████████████████████████████████████████████████████▌ │ - 8. │ 2019-02-18 │ 5736 │ █████████████████████████████████████████████████████████▎ │ - 9. │ 2019-02-25 │ 5873 │ ██████████████████████████████████████████████████████████▋ │ - 10. │ 2019-03-04 │ 5965 │ ███████████████████████████████████████████████████████████▋ │ - 11. │ 2019-03-11 │ 5900 │ ███████████████████████████████████████████████████████████ │ - 12. │ 2019-03-18 │ 5823 │ ██████████████████████████████████████████████████████████▏ │ - 13. │ 2019-03-25 │ 5899 │ ██████████████████████████████████████████████████████████▊ │ - 14. │ 2019-04-01 │ 6043 │ ████████████████████████████████████████████████████████████▍ │ - 15. │ 2019-04-08 │ 6098 │ ████████████████████████████████████████████████████████████▊ │ - 16. │ 2019-04-15 │ 6196 │ █████████████████████████████████████████████████████████████▊ │ - 17. │ 2019-04-22 │ 6486 │ ████████████████████████████████████████████████████████████████▋ │ - 18. │ 2019-04-29 │ 6682 │ ██████████████████████████████████████████████████████████████████▋ │ - 19. │ 2019-05-06 │ 6739 │ ███████████████████████████████████████████████████████████████████▍ │ - 20. │ 2019-05-13 │ 6600 │ ██████████████████████████████████████████████████████████████████ │ - 21. │ 2019-05-20 │ 6575 │ █████████████████████████████████████████████████████████████████▋ │ - 22. │ 2019-05-27 │ 6786 │ ███████████████████████████████████████████████████████████████████▋ │ - 23. │ 2019-06-03 │ 6872 │ ████████████████████████████████████████████████████████████████████▋ │ - 24. │ 2019-06-10 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │ - 25. │ 2019-06-17 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │ - 26. │ 2019-06-24 │ 6852 │ ████████████████████████████████████████████████████████████████████▌ │ - 27. │ 2019-07-01 │ 7248 │ ████████████████████████████████████████████████████████████████████████▍ │ - 28. │ 2019-07-08 │ 7284 │ ████████████████████████████████████████████████████████████████████████▋ │ - 29. │ 2019-07-15 │ 7142 │ ███████████████████████████████████████████████████████████████████████▍ │ - 30. │ 2019-07-22 │ 7108 │ ███████████████████████████████████████████████████████████████████████ │ - 31. │ 2019-07-29 │ 7251 │ ████████████████████████████████████████████████████████████████████████▌ │ - 32. │ 2019-08-05 │ 7403 │ ██████████████████████████████████████████████████████████████████████████ │ - 33. │ 2019-08-12 │ 7457 │ ██████████████████████████████████████████████████████████████████████████▌ │ - 34. │ 2019-08-19 │ 7502 │ ███████████████████████████████████████████████████████████████████████████ │ - 35. │ 2019-08-26 │ 7540 │ ███████████████████████████████████████████████████████████████████████████▍ │ - 36. │ 2019-09-02 │ 7237 │ ████████████████████████████████████████████████████████████████████████▎ │ - 37. │ 2019-09-09 │ 7328 │ █████████████████████████████████████████████████████████████████████████▎ │ - 38. │ 2019-09-16 │ 5566 │ ███████████████████████████████████████████████████████▋ │ - 39. │ 2019-09-23 │ 7049 │ ██████████████████████████████████████████████████████████████████████▍ │ - 40. │ 2019-09-30 │ 6880 │ ████████████████████████████████████████████████████████████████████▋ │ - 41. │ 2019-10-07 │ 6518 │ █████████████████████████████████████████████████████████████████▏ │ - 42. │ 2019-10-14 │ 6688 │ ██████████████████████████████████████████████████████████████████▊ │ - 43. │ 2019-10-21 │ 6667 │ ██████████████████████████████████████████████████████████████████▋ │ - 44. │ 2019-10-28 │ 6303 │ ███████████████████████████████████████████████████████████████ │ - 45. │ 2019-11-04 │ 6298 │ ██████████████████████████████████████████████████████████████▊ │ - 46. │ 2019-11-11 │ 6137 │ █████████████████████████████████████████████████████████████▎ │ - 47. │ 2019-11-18 │ 6051 │ ████████████████████████████████████████████████████████████▌ │ - 48. │ 2019-11-25 │ 5820 │ ██████████████████████████████████████████████████████████▏ │ - 49. │ 2019-12-02 │ 5942 │ ███████████████████████████████████████████████████████████▍ │ - 50. │ 2019-12-09 │ 4891 │ ████████████████████████████████████████████████▊ │ - 51. │ 2019-12-16 │ 5682 │ ████████████████████████████████████████████████████████▋ │ - 52. │ 2019-12-23 │ 6111 │ █████████████████████████████████████████████████████████████ │ - 53. │ 2019-12-30 │ 5870 │ ██████████████████████████████████████████████████████████▋ │ - 54. │ 2020-01-06 │ 5953 │ ███████████████████████████████████████████████████████████▌ │ - 55. │ 2020-01-13 │ 5698 │ ████████████████████████████████████████████████████████▊ │ - 56. │ 2020-01-20 │ 5339 │ █████████████████████████████████████████████████████▍ │ - 57. │ 2020-01-27 │ 5566 │ ███████████████████████████████████████████████████████▋ │ - 58. │ 2020-02-03 │ 5801 │ ██████████████████████████████████████████████████████████ │ - 59. │ 2020-02-10 │ 5692 │ ████████████████████████████████████████████████████████▊ │ - 60. │ 2020-02-17 │ 5912 │ ███████████████████████████████████████████████████████████ │ - 61. │ 2020-02-24 │ 6031 │ ████████████████████████████████████████████████████████████▎ │ - 62. │ 2020-03-02 │ 6105 │ █████████████████████████████████████████████████████████████ │ - 63. │ 2020-03-09 │ 5823 │ ██████████████████████████████████████████████████████████▏ │ - 64. │ 2020-03-16 │ 4659 │ ██████████████████████████████████████████████▌ │ - 65. │ 2020-03-23 │ 3720 │ █████████████████████████████████████▏ │ - 66. │ 2020-03-30 │ 1720 │ █████████████████▏ │ - 67. │ 2020-04-06 │ 849 │ ████████▍ │ - 68. │ 2020-04-13 │ 710 │ ███████ │ - 69. │ 2020-04-20 │ 725 │ ███████▏ │ - 70. │ 2020-04-27 │ 920 │ █████████▏ │ - 71. │ 2020-05-04 │ 859 │ ████████▌ │ - 72. │ 2020-05-11 │ 1047 │ ██████████▍ │ - 73. │ 2020-05-18 │ 1135 │ ███████████▎ │ - 74. │ 2020-05-25 │ 1266 │ ████████████▋ │ - 75. │ 2020-06-01 │ 1793 │ █████████████████▊ │ - 76. │ 2020-06-08 │ 1979 │ ███████████████████▋ │ - 77. │ 2020-06-15 │ 2297 │ ██████████████████████▊ │ - 78. │ 2020-06-22 │ 2788 │ ███████████████████████████▊ │ - 79. │ 2020-06-29 │ 3389 │ █████████████████████████████████▊ │ - 80. │ 2020-07-06 │ 3545 │ ███████████████████████████████████▍ │ - 81. │ 2020-07-13 │ 3569 │ ███████████████████████████████████▋ │ - 82. │ 2020-07-20 │ 3784 │ █████████████████████████████████████▋ │ - 83. │ 2020-07-27 │ 3960 │ ███████████████████████████████████████▌ │ - 84. │ 2020-08-03 │ 4323 │ ███████████████████████████████████████████▏ │ - 85. │ 2020-08-10 │ 4581 │ █████████████████████████████████████████████▋ │ - 86. │ 2020-08-17 │ 4791 │ ███████████████████████████████████████████████▊ │ - 87. │ 2020-08-24 │ 4928 │ █████████████████████████████████████████████████▎ │ - 88. │ 2020-08-31 │ 4687 │ ██████████████████████████████████████████████▋ │ - 89. │ 2020-09-07 │ 4643 │ ██████████████████████████████████████████████▍ │ - 90. │ 2020-09-14 │ 4594 │ █████████████████████████████████████████████▊ │ - 91. │ 2020-09-21 │ 4478 │ ████████████████████████████████████████████▋ │ - 92. │ 2020-09-28 │ 4382 │ ███████████████████████████████████████████▋ │ - 93. │ 2020-10-05 │ 4261 │ ██████████████████████████████████████████▌ │ - 94. │ 2020-10-12 │ 4243 │ ██████████████████████████████████████████▍ │ - 95. │ 2020-10-19 │ 3941 │ ███████████████████████████████████████▍ │ - 96. │ 2020-10-26 │ 3616 │ ████████████████████████████████████▏ │ - 97. │ 2020-11-02 │ 3586 │ ███████████████████████████████████▋ │ - 98. │ 2020-11-09 │ 3403 │ ██████████████████████████████████ │ - 99. │ 2020-11-16 │ 3336 │ █████████████████████████████████▎ │ -100. │ 2020-11-23 │ 3230 │ ████████████████████████████████▎ │ -101. │ 2020-11-30 │ 3183 │ ███████████████████████████████▋ │ -102. │ 2020-12-07 │ 3285 │ ████████████████████████████████▋ │ -103. │ 2020-12-14 │ 3367 │ █████████████████████████████████▋ │ -104. │ 2020-12-21 │ 3748 │ █████████████████████████████████████▍ │ -105. │ 2020-12-28 │ 3986 │ ███████████████████████████████████████▋ │ -106. │ 2021-01-04 │ 3906 │ ███████████████████████████████████████ │ -107. │ 2021-01-11 │ 3425 │ ██████████████████████████████████▎ │ -108. │ 2021-01-18 │ 3144 │ ███████████████████████████████▍ │ -109. │ 2021-01-25 │ 3115 │ ███████████████████████████████▏ │ -110. │ 2021-02-01 │ 3285 │ ████████████████████████████████▋ │ -111. │ 2021-02-08 │ 3321 │ █████████████████████████████████▏ │ -112. │ 2021-02-15 │ 3475 │ ██████████████████████████████████▋ │ -113. │ 2021-02-22 │ 3549 │ ███████████████████████████████████▍ │ -114. │ 2021-03-01 │ 3755 │ █████████████████████████████████████▌ │ -115. │ 2021-03-08 │ 3080 │ ██████████████████████████████▋ │ -116. │ 2021-03-15 │ 3789 │ █████████████████████████████████████▊ │ -117. │ 2021-03-22 │ 3804 │ ██████████████████████████████████████ │ -118. │ 2021-03-29 │ 4238 │ ██████████████████████████████████████████▍ │ -119. │ 2021-04-05 │ 4307 │ ███████████████████████████████████████████ │ -120. │ 2021-04-12 │ 4225 │ ██████████████████████████████████████████▎ │ -121. │ 2021-04-19 │ 4391 │ ███████████████████████████████████████████▊ │ -122. │ 2021-04-26 │ 4868 │ ████████████████████████████████████████████████▋ │ -123. │ 2021-05-03 │ 4977 │ █████████████████████████████████████████████████▋ │ -124. │ 2021-05-10 │ 5164 │ ███████████████████████████████████████████████████▋ │ -125. │ 2021-05-17 │ 4986 │ █████████████████████████████████████████████████▋ │ -126. │ 2021-05-24 │ 5024 │ ██████████████████████████████████████████████████▏ │ -127. │ 2021-05-31 │ 4824 │ ████████████████████████████████████████████████▏ │ -128. │ 2021-06-07 │ 5652 │ ████████████████████████████████████████████████████████▌ │ -129. │ 2021-06-14 │ 5613 │ ████████████████████████████████████████████████████████▏ │ -130. │ 2021-06-21 │ 6061 │ ████████████████████████████████████████████████████████████▌ │ -131. │ 2021-06-28 │ 2554 │ █████████████████████████▌ │ - └────────────┴──────┴──────────────────────────────────────────────────────────────────────────────┘ -``` - -### オンラインプレイグラウンド {#playground} - -このデータセットに対して他のクエリをテストするために、インタラクティブリソース[オンラインプレイグラウンド](https://sql.clickhouse.com)を使用できます。たとえば、[このように](https://sql.clickhouse.com?query_id=BIPDVQNIGVEZFQYFEFQB7O)。ただし、ここでは一時テーブルを作成することはできません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md.hash deleted file mode 100644 index c9b6703cd20..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/opensky.md.hash +++ /dev/null @@ -1 +0,0 @@ -f42ec65336d5f0e7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md deleted file mode 100644 index 587692ebffd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md +++ /dev/null @@ -1,333 +0,0 @@ ---- -description: 'The RecipeNLG dataset, containing 2.2 million recipes' -sidebar_label: 'Recipes Dataset' -slug: '/getting-started/example-datasets/recipes' -title: 'Recipes Dataset' ---- - - - -The RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB. -## Download and Unpack the Dataset {#download-and-unpack-the-dataset} - -1. ダウンロードページに移動します [https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset). -2. 利用規約に同意し、zipファイルをダウンロードします。 -3. オプション: `md5sum dataset.zip` を使用してzipファイルを検証します。値は `3a168dfd0912bb034225619b3586ce76` と等しいはずです。 -4. `unzip dataset.zip` を使用してzipファイルを解凍します。`dataset` ディレクトリ内に `full_dataset.csv` ファイルが生成されます。 -## Create a Table {#create-a-table} - -clickhouse-client を実行し、次のCREATEクエリを実行します: - -```sql -CREATE TABLE recipes -( - title String, - ingredients Array(String), - directions Array(String), - link String, - source LowCardinality(String), - NER Array(String) -) ENGINE = MergeTree ORDER BY title; -``` -## Insert the Data {#insert-the-data} - -次のコマンドを実行します: - -```bash -clickhouse-client --query " - INSERT INTO recipes - SELECT - title, - JSONExtract(ingredients, 'Array(String)'), - JSONExtract(directions, 'Array(String)'), - link, - source, - JSONExtract(NER, 'Array(String)') - FROM input('num UInt32, title String, ingredients String, directions String, link String, source LowCardinality(String), NER String') - FORMAT CSVWithNames -" --input_format_with_names_use_header 0 --format_csv_allow_single_quote 0 --input_format_allow_errors_num 10 < full_dataset.csv -``` - -これはカスタムCSVを解析する方法の例であり、複数の調整を必要とします。 - -説明: -- データセットはCSV形式ですが、挿入時にいくつかの前処理が必要です。私たちはテーブル関数 [input](../../sql-reference/table-functions/input.md) を使用して前処理を実行します; -- CSVファイルの構造はテーブル関数 `input` の引数で指定されます; -- フィールド `num`(行番号)は不要です - 私たちはファイルから解析して無視します; -- `FORMAT CSVWithNames` を使用しますが、CSVのヘッダーは無視されます(コマンドラインパラメーター `--input_format_with_names_use_header 0` によって)、ヘッダーは最初のフィールドの名前を含んでいないためです; -- ファイルはCSV文字列を囲むためにダブルクオートのみを使用しています。一部の文字列はダブルクオートで囲まれていないため、単一引用符を文字列の囲みとして解析してはいけません - そのため、`--format_csv_allow_single_quote 0` パラメーターも追加しています; -- CSVからの一部の文字列は解析できません。なぜなら、それらは値の先頭に `\M/` シーケンスを含んでいるからです; CSV内でバックスラッシュで始まる唯一の値は `\N` であり、これはSQL NULLとして解析されます。`--input_format_allow_errors_num 10` パラメーターを追加し、最大10件の不正なレコードをスキップできます; -- 材料、手順、NERフィールドに配列があります; これらの配列は通常とは異なる形式で表現されています: JSONとして文字列にシリアライズされ、その後CSVに配置されます - これをStringとして解析し、次に [JSONExtract](../../sql-reference/functions/json-functions.md) 関数を使用してArrayに変換します。 -## Validate the Inserted Data {#validate-the-inserted-data} - -行数を確認することで検証します: - -クエリ: - -```sql -SELECT count() FROM recipes; -``` - -結果: - -```text -┌─count()─┐ -│ 2231142 │ -└─────────┘ -``` -## Example Queries {#example-queries} -### Top Components by the Number of Recipes: {#top-components-by-the-number-of-recipes} - -この例では、[arrayJoin](../../sql-reference/functions/array-join.md) 関数を使用して配列を行セットに展開する方法を学びます。 - -クエリ: - -```sql -SELECT - arrayJoin(NER) AS k, - count() AS c -FROM recipes -GROUP BY k -ORDER BY c DESC -LIMIT 50 -``` - -結果: - -```text -┌─k────────────────────┬──────c─┐ -│ salt │ 890741 │ -│ sugar │ 620027 │ -│ butter │ 493823 │ -│ flour │ 466110 │ -│ eggs │ 401276 │ -│ onion │ 372469 │ -│ garlic │ 358364 │ -│ milk │ 346769 │ -│ water │ 326092 │ -│ vanilla │ 270381 │ -│ olive oil │ 197877 │ -│ pepper │ 179305 │ -│ brown sugar │ 174447 │ -│ tomatoes │ 163933 │ -│ egg │ 160507 │ -│ baking powder │ 148277 │ -│ lemon juice │ 146414 │ -│ Salt │ 122558 │ -│ cinnamon │ 117927 │ -│ sour cream │ 116682 │ -│ cream cheese │ 114423 │ -│ margarine │ 112742 │ -│ celery │ 112676 │ -│ baking soda │ 110690 │ -│ parsley │ 102151 │ -│ chicken │ 101505 │ -│ onions │ 98903 │ -│ vegetable oil │ 91395 │ -│ oil │ 85600 │ -│ mayonnaise │ 84822 │ -│ pecans │ 79741 │ -│ nuts │ 78471 │ -│ potatoes │ 75820 │ -│ carrots │ 75458 │ -│ pineapple │ 74345 │ -│ soy sauce │ 70355 │ -│ black pepper │ 69064 │ -│ thyme │ 68429 │ -│ mustard │ 65948 │ -│ chicken broth │ 65112 │ -│ bacon │ 64956 │ -│ honey │ 64626 │ -│ oregano │ 64077 │ -│ ground beef │ 64068 │ -│ unsalted butter │ 63848 │ -│ mushrooms │ 61465 │ -│ Worcestershire sauce │ 59328 │ -│ cornstarch │ 58476 │ -│ green pepper │ 58388 │ -│ Cheddar cheese │ 58354 │ -└──────────────────────┴────────┘ - -50 rows in set. Elapsed: 0.112 sec. Processed 2.23 million rows, 361.57 MB (19.99 million rows/s., 3.24 GB/s.) -``` -### いちごを使った最も複雑なレシピ {#the-most-complex-recipes-with-strawberry} - -```sql -SELECT - title, - length(NER), - length(directions) -FROM recipes -WHERE has(NER, 'strawberry') -ORDER BY length(directions) DESC -LIMIT 10 -``` - -結果: - -```text -┌─title────────────────────────────────────────────────────────────┬─length(NER)─┬─length(directions)─┐ -│ チョコレート・ストロベリー・オレンジ ウェディングケーキ │ 24 │ 126 │ -│ ストロベリークリームチーズクランブルタルト │ 19 │ 47 │ -│ シャルロットスタイルのアイスクリーム │ 11 │ 45 │ -│ 罪深いほど美味しいミリオンレイヤーチョコレートレイヤーケーキ、ストロベリーを添えて │ 31 │ 45 │ -│ エルダーフラワーシャーベットを添えた甘いベリー │ 24 │ 44 │ -│ チョコレートストロベリームースケーキ │ 15 │ 42 │ -│ ルバーブシャルロット、ストロベリーとラム添え │ 20 │ 42 │ -│ シェフジョーのストロベリーバニラタルト │ 7 │ 37 │ -│ オールドファッションアイスクリームサンデーケーキ │ 17 │ 37 │ -│ スイカケーキ │ 16 │ 36 │ -└──────────────────────────────────────────────────────────────────┴─────────────┴────────────────────┘ - -10行のセットです。経過時間: 0.215秒。処理した行数: 223万行、サイズ: 1.48 GB (10.35百万行/秒、6.86 GB/秒) -``` - -この例では、[has](../../sql-reference/functions/array-functions.md#hasarr-elem) 関数を使用して、配列要素でフィルタリングし、指示の数でソートします。 - -126のステップが必要なウェディングケーキがあります!その指示を表示します: - -クエリ: - -```sql -SELECT arrayJoin(directions) -FROM recipes -WHERE title = 'Chocolate-Strawberry-Orange Wedding Cake' -``` - -結果: - -```text -┌─arrayJoin(directions)───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ オーブンの中央に1つ、下部の1/3に1つのラックを配置し、350Fに予熱する。 │ -│ 直径5インチ、高さ2インチのケーキ型1つ、直径8インチ、高さ2インチのケーキ型1つ、直径12インチ、高さ2インチのケーキ型1つにバターを塗る。 │ -│ 型に小麦粉を振り入れ、底にクッキングシートを敷く。 │ -│ 1/3カップのオレンジジュースと2オンスの無糖チョコレートを重めの小鍋に入れる。 │ -│ 中火から弱火でチョコレートが溶けるまで混ぜる。 │ -│ 火から下ろす。 │ -│ 1 2/3カップのオレンジジュースを少しずつ加える。 │ -│ 3カップの小麦粉、2/3カップのココア、2 teaspoonsの重曹、1 teaspoonの塩、1/2 teaspoonのベーキングパウダーを中ボウルにふるい入れる。 │ -│ 電動ミキサーを使用して、大きなボウルで1カップ(2本)のバターと3カップの砂糖を混ぜる(混合物はざらついて見える)。 │ -│ 卵4個を1個ずつ加え、各卵を混ぜる。 │ -│ 1 tablespoonのオレンジの皮と1 tablespoonのバニラエッセンスを加える。 │ -│ 乾燥材料をオレンジジュースの混合物と交互に3回に分けて加え、各回後によく混ぜる。 │ -│ 1カップのチョコレートチップを加える。 │ -│ 準備した5インチの型には1カップと2 tablespoonsの生地、8インチの型には3カップの生地、12インチの型には残りの生地を(約6カップ)加える。 │ -│ 5インチと8インチの型をオーブンの中央のラックに置く。 │ -│ 12インチの型はオーブンの下段に置く。 │ -│ ケーキが中心に差し込んだテスターがきれいに出てくるまで約35分焼く。 │ -│ 型からケーキをラックに移し、完全に冷やす。 │ -│ 6インチの段ボールケーキの中央に直径4インチの円をマークする。 │ -│ マークした円を切り抜く。 │ -│ 8インチの段ボールケーキの中央に直径7インチの円をマークする。 │ -│ マークした円を切り抜く。 │ -│ 12インチの段ボールケーキの中央に直径11インチの円をマークする。 │ -│ マークした円を切り抜く。 │ -│ 5インチケーキの側面を切り離す。 │ -│ 4インチの段ボールを型の上に置く。 │ -│ 段ボールと型を一緒に持ち、ケーキを段ボールの上にひっくり返す。 │ -│ クッキングシートを剥がす。ケーキを段ボールごとアルミホイルで包む。 │ -│ 上記の手順を繰り返し、8インチケーキには7インチの段ボール、12インチケーキには11インチの段ボールを使用してひっくり返し、クッキングシートを剥がしてアルミホイルで包む。 │ -│ 残りの材料を使って、さらに1バッチのケーキ生地を作り、上記の手順で3つのケーキ層を焼く。 │ -│ 型でケーキを冷やす。 │ -│ 型のケーキをしっかりとアルミホイルで覆う。 │ -│ (事前に準備できる。 │ -│ 室温で最大1日放置するか、すべてのケーキ層を二重に包んで最大1週間冷凍する。 │ -│ 使用する前にケーキ層を室温に戻します。) │ -│ 最初の12インチのケーキをそのダンボールの上に作業台に置く。 │ -│ 2 3/4カップのガナッシュをケーキの上に均等に広げ、端まで伸ばす。 │ -│ 2/3カップのジャムをガナッシュの上に広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ 1 3/4カップのホワイトチョコレートフロスティングをジャムの上にスプーンで落とす。 │ -│ フロスティングをジャムの上に優しく広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ 2番目の12インチダンボールの上に少しココアパウダーをふる。 │ -│ 2番目の12インチケーキの側面を切り離す。 │ -│ ダンボールの上にココア側を下にして置く。 │ -│ ケーキをダンボールの上にひっくり返す。 │ -│ クッキングシートを剥がす。 │ -│ ケーキをダンボールから丁寧に滑らせ、最初の12インチケーキのフィリングの上に置く。 │ -│ 冷蔵する。 │ -│ 最初の8インチケーキをその段ボールの上に作業台に置く。 │ -│ 1カップのガナッシュをケーキの上に端まで広げる。 │ -│ 1/4カップのジャムを広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ 1カップのホワイトチョコレートのフロスティングをジャムの上にスプーンで落とす。 │ -│ フロスティングをジャムの上に優しく広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ 2番目の8インチダンボールの上に少しココアをふる。 │ -│ 2番目の8インチケーキの側面を切り離す。 │ -│ ダンボールの上にココア側を下にして置く。 │ -│ ケーキをダンボールの上にひっくり返す。 │ -│ クッキングシートを剥がす。 │ -│ ケーキをダンボールから滑らせて、最初の8インチケーキのフィリングの上に置く。 │ -│ 冷蔵する。 │ -│ 最初の5インチケーキをその段ボールの上に作業台に置く。 │ -│ ケーキの上に1/2カップのガナッシュを端まで広げる。 │ -│ 2 tablespoonsのジャムを広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ 1/3カップのホワイトチョコレートフロスティングをジャムの上にスプーンで落とす。 │ -│ フロスティングをジャムの上に優しく広げ、端に1/2インチのチョコレートの縁を残す。 │ -│ ココアを2番目の6インチダンボールの上にふる。 │ -│ 2番目の5インチケーキの側面を切り離す。 │ -│ ダンボールの上にココア側を下にして置く。 │ -│ ケーキをダンボールの上にひっくり返す。 │ -│ クッキングシートを剥がす。 │ -│ ケーキをダンボールから滑らせて、最初の5インチケーキのフィリングの上に置く。 │ -│ すべてのケーキを1時間冷やしてフィリングを固める。 │ -│ 12インチの多層ケーキをそのダンボールの上に回転するケーキスタンドに置く。 │ -│ 2 2/3カップのフロスティングをケーキの上と側面に第一コートとして広げる。 │ -│ ケーキを冷蔵する。 │ -│ 8インチの多層ケーキをそのダンボールの上にケーキスタンドに置く。 │ -│ 1 1/4カップのフロスティングをケーキの上と側面に第一コートとして広げる。 │ -│ ケーキを冷蔵する。 │ -│ 5インチの多層ケーキをその段ボールの上にケーキスタンドに置く。 │ -│ 3/4カップのフロスティングをケーキの上と側面に第一コートとして広げる。 │ -│ すべてのケーキがフロスティングの最初のコートが固まるまで、約1時間冷蔵する。 │ -│ (ケーキはこの時点まで最大1日前に作成できます。カバーして冷蔵庫に保管してください。) │ -│ フロスティングの2回目のバッチを準備し、残りのフロスティング材料を使用して最初のバッチの指示に従う。 │ -│ 小さな星の先端のついた絞り袋に2カップのフロスティングを入れる。 │ -│ 12インチケーキをその段ボールの上に大きな平皿に置く。 │ -│ 平皿をケーキスタンドの上に置く。 │ -│ アイススパチュラを使用して、ケーキの上と側面に2 1/2カップのフロスティングを広げ、上を平滑にする。 │ -│ 絞り袋を使ってケーキの上のエッジの周りに装飾的なボーダーを絞り出す。 │ -│ ケーキを皿の上で冷蔵する。 │ -│ 8インチケーキをその段ボールの上にケーキスタンドに置く。 │ -│ アイススパチュラを使用して、ケーキの上と側面に1 1/2カップのフロスティングを広げ、上を平滑にする。 │ -│ 絞り袋を使用してケーキの上のエッジの周りに装飾的なボーダーを絞り出す。 │ -│ ケーキをその段ボールの上で冷蔵する。 │ -│ 5インチケーキをその段ボールの上にケーキスタンドに置く。 │ -│ アイススパチュラを使用して、ケーキの上と側面に3/4カップのフロスティングを広げ、上を平滑にする。 │ -│ 絞り袋を使用してケーキの上のエッジの周りに装飾的なボーダーを絞り出し、必要に応じて袋にフロスティングを追加する。 │ -│ ケーキをその段ボールの上で冷蔵する。 │ -│ フロスティングが固まるまで、すべてのケーキを冷蔵しておく。約2時間。 │ -│ (最大2日前に準備できます。 │ -│ ゆるくカバーし、冷蔵庫で保管してください。) │ -│ 12インチケーキを作業台の皿の上に置く。 │ -│ 1本の木製のダウエルを真っ直ぐ下に押し込み、ケーキの中央を完全に貫通させる。 │ -│ ダウエルをフロスティングの上部から1/4インチの高さにマーク。 │ -│ ダウエルを取り外し、マークした位置で鋸歯状のナイフで切断する。 │ -│ 同じ長さに4本のダウエルを切る。 │ -│ 1本の切断したダウエルをケーキの中央に戻し、 │ -│ 残りの4本の切断したダウエルをケーキに押し込み、ケーキの端から3 1/2インチ内側に位置させ、均等に間隔をあける。 │ -│ 8インチケーキをその段ボールの上に作業台に置く。 │ -│ 1本のダウエルを真っ直ぐ下に押し込み、ケーキの中央を完全に貫通させる。 │ -│ ダウエルをフロスティングの上部から1/4インチの高さにマーク。 │ -│ ダウエルを取り外し、マークした位置で鋸歯状のナイフで切断する。 │ -│ 同じ長さに3本のダウエルを切る。 │ -│ 1本の切断したダウエルをケーキの中央に戻し、 │ -│ 残りの3本の切断したダウエルをケーキに押し込み、端から2 1/2インチ内側に位置させ、均等に間隔をあける。 │ -│ 大きな金属スパチュラを使って、8インチケーキを12インチケーキのダウエルの上に注意深く置き、中心を合わせます。 │ -│ 5インチケーキを8インチケーキのダウエルの上に注意深く置き、中心を合わせます。 │ -│ シトラスストリッパーを使用して、オレンジからオレンジの皮の長いストリップを切り取ります。 │ -│ ストリップを長いセグメントに切断します。 │ -│ オレンジの皮のコイルを作るには、皮のセグメントを木製のスプーンのハンドルに巻きつけます;皮がコイル状の形を保つように、ハンドルから優しく滑らせます。 │ -│ ケーキをオレンジの皮のコイル、アイビーやミントの枝、一部のベリーで飾ります。 │ -│ (組み立てたケーキは最大8時間前に作ることができます。 │ -│ 暖かい室温で放置します。) │ -│ 上部と中央のケーキの層を取り外します。 │ -│ ケーキからダウエルを取り外します。 │ -│ 上部と中央のケーキをスライスします。 │ -│ 12インチケーキを切るには:端から3インチ内側に始めてナイフを真っ直ぐ下に挿入し、上から下まで切り込み、中央に6インチの直径の円を作ります。 │ -│ ケーキの外側の部分をスライスし、内側の部分もスライスし、ストロベリーと一緒に提供します。 │ -└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -126行のセットです。経過時間: 0.011秒。処理した行数: 8.19千行、サイズ: 5.34 MB (737.75千行/秒、480.59 MB/秒) -``` -### オンラインプレイグラウンド {#online-playground} - -データセットは、[オンラインプレイグラウンド](https://sql.clickhouse.com?query_id=HQXNQZE26Z1QWYP9KC76ML)でも利用可能です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md.hash deleted file mode 100644 index 94b55d85243..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/recipes.md.hash +++ /dev/null @@ -1 +0,0 @@ -53ab58cc63c863b7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md deleted file mode 100644 index 464b0de4d3a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md +++ /dev/null @@ -1,728 +0,0 @@ ---- -description: 'Dataset containing publicly available comments on Reddit from December - 2005 to March 2023 with over 14B rows of data in JSON format' -sidebar_label: 'Reddit comments' -slug: '/getting-started/example-datasets/reddit-comments' -title: 'Reddit comments dataset' ---- - - - -このデータセットには、2005年12月から2023年3月までのReddit上の公開コメントが含まれており、14B行以上のデータがあります。生データは圧縮ファイルのJSON形式で、行は以下のようになります。 - -```json -{"controversiality":0,"body":"A look at Vietnam and Mexico exposes the myth of market liberalisation.","subreddit_id":"t5_6","link_id":"t3_17863","stickied":false,"subreddit":"reddit.com","score":2,"ups":2,"author_flair_css_class":null,"created_utc":1134365188,"author_flair_text":null,"author":"frjo","id":"c13","edited":false,"parent_id":"t3_17863","gilded":0,"distinguished":null,"retrieved_on":1473738411} -{"created_utc":1134365725,"author_flair_css_class":null,"score":1,"ups":1,"subreddit":"reddit.com","stickied":false,"link_id":"t3_17866","subreddit_id":"t5_6","controversiality":0,"body":"The site states \"What can I use it for? Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more...\", just like any other new breeed of sites that want us to store everything we have on the web. And they even guarantee multiple levels of security and encryption etc. But what prevents these web site operators fom accessing and/or stealing Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more, for competitive or personal gains...? I am pretty sure that most of them are honest, but what's there to prevent me from setting up a good useful site and stealing all your data? Call me paranoid - I am.","retrieved_on":1473738411,"distinguished":null,"gilded":0,"id":"c14","edited":false,"parent_id":"t3_17866","author":"zse7zse","author_flair_text":null} -{"gilded":0,"distinguished":null,"retrieved_on":1473738411,"author":"[deleted]","author_flair_text":null,"edited":false,"id":"c15","parent_id":"t3_17869","subreddit":"reddit.com","score":0,"ups":0,"created_utc":1134366848,"author_flair_css_class":null,"body":"Jython related topics by Frank Wierzbicki","controversiality":0,"subreddit_id":"t5_6","stickied":false,"link_id":"t3_17869"} -{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"[deleted]","edited":false,"parent_id":"t3_17870","id":"c16","subreddit":"reddit.com","created_utc":1134367660,"author_flair_css_class":null,"score":1,"ups":1,"body":"[deleted]","controversiality":0,"stickied":false,"link_id":"t3_17870","subreddit_id":"t5_6"} -{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"rjoseph","edited":false,"id":"c17","parent_id":"t3_17817","subreddit":"reddit.com","author_flair_css_class":null,"created_utc":1134367754,"score":1,"ups":1,"body":"Saft is by far the best extension you could tak onto your Safari","controversiality":0,"link_id":"t3_17817","stickied":false,"subreddit_id":"t5_6"} -``` - -Perconaへの感謝を込めて、このデータセットを取り込む動機に関しては、こちらを参照してください [motivation behind ingesting this dataset](https://www.percona.com/blog/big-data-set-reddit-comments-analyzing-clickhouse/)。このデータはダウンロードされ、S3バケットに保存されています。 -## テーブルの作成 {#creating-a-table} - -:::note -以下のコマンドは、最小メモリが720GBに設定されたClickHouse CloudのProductionインスタンスで実行されました。自分のクラスタでこれを実行するには、`s3Cluster`関数の呼び出し内の`default`を自分のクラスタの名前に置き換えてください。クラスタを持っていない場合は、`s3Cluster`関数を`s3`関数に置き換えてください。 -::: - -1. Redditデータ用のテーブルを作成しましょう: - -```sql -CREATE TABLE reddit -( - subreddit LowCardinality(String), - subreddit_id LowCardinality(String), - subreddit_type Enum('public' = 1, 'restricted' = 2, 'user' = 3, 'archived' = 4, 'gold_restricted' = 5, 'private' = 6), - author LowCardinality(String), - body String CODEC(ZSTD(6)), - created_date Date DEFAULT toDate(created_utc), - created_utc DateTime, - retrieved_on DateTime, - id String, - parent_id String, - link_id String, - score Int32, - total_awards_received UInt16, - controversiality UInt8, - gilded UInt8, - collapsed_because_crowd_control UInt8, - collapsed_reason Enum('' = 0, 'comment score below threshold' = 1, 'may be sensitive content' = 2, 'potentially toxic' = 3, 'potentially toxic content' = 4), - distinguished Enum('' = 0, 'moderator' = 1, 'admin' = 2, 'special' = 3), - removal_reason Enum('' = 0, 'legal' = 1), - author_created_utc DateTime, - author_fullname LowCardinality(String), - author_patreon_flair UInt8, - author_premium UInt8, - can_gild UInt8, - can_mod_post UInt8, - collapsed UInt8, - is_submitter UInt8, - _edited String, - locked UInt8, - quarantined UInt8, - no_follow UInt8, - send_replies UInt8, - stickied UInt8, - author_flair_text LowCardinality(String) -) -ENGINE = MergeTree -ORDER BY (subreddit, created_date, author); -``` - -:::note -S3内のファイルの名前は`RC_YYYY-MM`で始まり、`YYYY-MM`は`2005-12`から`2023-02`まで変わります。ただし、圧縮形式は何度か変更されるため、ファイルの拡張子は一貫していません。例えば: - -- ファイル名は最初は`RC_2005-12.bz2`から`RC_2017-11.bz2`です。 -- 次に、`RC_2017-12.xz`から`RC_2018-09.xz`のようになります。 -- 最後に、`RC_2018-10.zst`から`RC_2023-02.zst`となります。 -::: -## データの読み込み {#load-data} - -2. 一か月分のデータから始めますが、すべての行を単に挿入したい場合は、以下のステップ8に進んでください。次のファイルには、2017年12月からの860万件のレコードがあります: - -```sql -INSERT INTO reddit - SELECT * - FROM s3( - 'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2017-12.xz', - 'JSONEachRow' - ); - -``` - -3. リソースに応じて時間がかかりますが、完了したら正常に動作したことを確認してください: - -```sql -SELECT formatReadableQuantity(count()) -FROM reddit; -``` - -```response -┌─formatReadableQuantity(count())─┐ -│ 85.97 million │ -└─────────────────────────────────┘ -``` - -4. 2017年12月にいくつのユニークなsubredditがあったか見てみましょう: - -```sql -SELECT uniqExact(subreddit) -FROM reddit; -``` - -```response -┌─uniqExact(subreddit)─┐ -│ 91613 │ -└──────────────────────┘ - -1行セット。経過時間: 1.572秒。85.97百万行、367.43 MBを処理しました。(54.71百万行/s、 233.80 MB/s) -``` -## 例クエリ {#example-queries} - -5. このクエリは、コメント数の観点からトップ10のsubredditを返します: - -```sql -SELECT - subreddit, - count() AS c -FROM reddit -GROUP BY subreddit -ORDER BY c DESC -LIMIT 20; -``` - -```response -┌─subreddit───────┬───────c─┐ -│ AskReddit │ 5245881 │ -│ politics │ 1753120 │ -│ nfl │ 1220266 │ -│ nba │ 960388 │ -│ The_Donald │ 931857 │ -│ news │ 796617 │ -│ worldnews │ 765709 │ -│ CFB │ 710360 │ -│ gaming │ 602761 │ -│ movies │ 601966 │ -│ soccer │ 590628 │ -│ Bitcoin │ 583783 │ -│ pics │ 563408 │ -│ StarWars │ 562514 │ -│ funny │ 547563 │ -│ leagueoflegends │ 517213 │ -│ teenagers │ 492020 │ -│ DestinyTheGame │ 477377 │ -│ todayilearned │ 472650 │ -│ videos │ 450581 │ -└─────────────────┴─────────┘ - -20行セット。経過時間: 0.368秒。85.97百万行、367.43 MBを処理しました。(233.34百万行/s、 997.25 MB/s) -``` - -6. 2017年12月のコメント数の観点からのトップ10の著者は以下の通りです: - -```sql -SELECT - author, - count() AS c -FROM reddit -GROUP BY author -ORDER BY c DESC -LIMIT 10; -``` - -```response -┌─author──────────┬───────c─┐ -│ [deleted] │ 5913324 │ -│ AutoModerator │ 784886 │ -│ ImagesOfNetwork │ 83241 │ -│ BitcoinAllBot │ 54484 │ -│ imguralbumbot │ 45822 │ -│ RPBot │ 29337 │ -│ WikiTextBot │ 25982 │ -│ Concise_AMA_Bot │ 19974 │ -│ MTGCardFetcher │ 19103 │ -│ TotesMessenger │ 19057 │ -└─────────────────┴─────────┘ - -10行セット。経過時間: 8.143秒。85.97百万行、711.05 MBを処理しました。(10.56百万行/s、 87.32 MB/s) -``` -## データセット全体の読み込み {#loading-the-entire-dataset} - -7. 既にいくつかのデータを挿入しましたが、最初からやり直します: - -```sql -TRUNCATE TABLE reddit; -``` - -8. このデータセットは楽しそうで、素晴らしい情報が見つかるようです。ですので、2005年から2023年までの全データセットを挿入しましょう。実用的な理由から、データを年ごとに挿入するのがうまくいきます。最初は... - -```sql -INSERT INTO reddit - SELECT * - FROM s3Cluster( - 'default', - 'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2005*', - 'JSONEachRow' - ) - SETTINGS zstd_window_log_max = 31; -``` - -...そして最後は: - -```sql -INSERT INTO reddit -SELECT * -FROM s3Cluster( - 'default', - 'https://clickhouse-public-datasets.s3.amazonaws.com/reddit/original/RC_2023*', - 'JSONEachRow' - ) -SETTINGS zstd_window_log_max = 31; -``` - -クラスタを持っていない場合は、`s3Cluster`の代わりに`s3`を使用してください: - -```sql -INSERT INTO reddit -SELECT * -FROM s3( - 'https://clickhouse-public-datasets.s3.amazonaws.com/reddit/original/RC_2005*', - 'JSONEachRow' - ) -SETTINGS zstd_window_log_max = 31; -``` - -8. 正常に動作したか確認するために、年ごとの行数を確認します(2023年2月時点): - -```sql -SELECT - toYear(created_utc) AS year, - formatReadableQuantity(count()) -FROM reddit -GROUP BY year; -``` - -```response - -┌─year─┬─formatReadableQuantity(count())─┐ -│ 2005 │ 1.07 thousand │ -│ 2006 │ 417.18 thousand │ -│ 2007 │ 2.46 million │ -│ 2008 │ 7.24 million │ -│ 2009 │ 18.86 million │ -│ 2010 │ 42.93 million │ -│ 2011 │ 28.91 million │ -│ 2012 │ 260.31 million │ -│ 2013 │ 402.21 million │ -│ 2014 │ 531.80 million │ -│ 2015 │ 667.76 million │ -│ 2016 │ 799.90 million │ -│ 2017 │ 972.86 million │ -│ 2018 │ 1.24 billion │ -│ 2019 │ 1.66 billion │ -│ 2020 │ 2.16 billion │ -│ 2021 │ 2.59 billion │ -│ 2022 │ 2.82 billion │ -│ 2023 │ 474.86 million │ -└──────┴─────────────────────────────────┘ -``` - -9. 挿入された行数と、テーブルが使用しているディスクスペースを確認しましょう: - -```sql -SELECT - sum(rows) AS count, - formatReadableQuantity(count), - formatReadableSize(sum(bytes)) AS disk_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size -FROM system.parts -WHERE (table = 'reddit') AND active; -``` - -ディスクストレージの圧縮は、非圧縮サイズの約1/3であることに注意してください: - -```response -┌───────count─┬─formatReadableQuantity(sum(rows))─┬─disk_size─┬─uncompressed_size─┐ -│ 14688534662 │ 14.69 billion │ 1.03 TiB │ 3.26 TiB │ -└─────────────┴───────────────────────────────────┴───────────┴───────────────────┘ - -1行セット。経過時間: 0.005秒。 -``` -## 例のクエリ - コメント、著者、サブレディット毎月の数 {#example-query-comments} - -10. 次のクエリは、各月のコメント、著者、サブレディットの数を示しています: - -```sql -SELECT - toStartOfMonth(created_utc) AS firstOfMonth, - count() AS c, - bar(c, 0, 50000000, 25) AS bar_count, - uniq(author) AS authors, - bar(authors, 0, 5000000, 25) AS bar_authors, - uniq(subreddit) AS subreddits, - bar(subreddits, 0, 100000, 25) AS bar_subreddits -FROM reddit -GROUP BY firstOfMonth -ORDER BY firstOfMonth ASC; -``` - -これは14.69億行すべてを処理しなければならない大規模なクエリですが、印象的な応答時間(約48秒)を得ることができます: - -```response -┌─firstOfMonth─┬─────────c─┬─bar_count─────────────────┬──authors─┬─bar_authors───────────────┬─subreddits─┬─bar_subreddits────────────┐ -│ 2005-12-01 │ 1075 │ │ 394 │ │ 1 │ │ -│ 2006-01-01 │ 3666 │ │ 791 │ │ 2 │ │ -│ 2006-02-01 │ 9095 │ │ 1464 │ │ 18 │ │ -│ 2006-03-01 │ 13859 │ │ 1958 │ │ 15 │ │ -│ 2006-04-01 │ 19090 │ │ 2334 │ │ 21 │ │ -│ 2006-05-01 │ 26859 │ │ 2698 │ │ 21 │ │ -│ 2006-06-01 │ 29163 │ │ 3043 │ │ 19 │ │ -│ 2006-07-01 │ 37031 │ │ 3532 │ │ 22 │ │ -│ 2006-08-01 │ 50559 │ │ 4750 │ │ 24 │ │ -│ 2006-09-01 │ 50675 │ │ 4908 │ │ 21 │ │ -│ 2006-10-01 │ 54148 │ │ 5654 │ │ 31 │ │ -│ 2006-11-01 │ 62021 │ │ 6490 │ │ 23 │ │ -│ 2006-12-01 │ 61018 │ │ 6707 │ │ 24 │ │ -│ 2007-01-01 │ 81341 │ │ 7931 │ │ 23 │ │ -│ 2007-02-01 │ 95634 │ │ 9020 │ │ 21 │ │ -│ 2007-03-01 │ 112444 │ │ 10842 │ │ 23 │ │ -│ 2007-04-01 │ 126773 │ │ 10701 │ │ 26 │ │ -│ 2007-05-01 │ 170097 │ │ 11365 │ │ 25 │ │ -│ 2007-06-01 │ 178800 │ │ 11267 │ │ 22 │ │ -│ 2007-07-01 │ 203319 │ │ 12482 │ │ 25 │ │ -│ 2007-08-01 │ 225111 │ │ 14124 │ │ 30 │ │ -│ 2007-09-01 │ 259497 │ ▏ │ 15416 │ │ 33 │ │ -│ 2007-10-01 │ 274170 │ ▏ │ 15302 │ │ 36 │ │ -│ 2007-11-01 │ 372983 │ ▏ │ 15134 │ │ 43 │ │ -│ 2007-12-01 │ 363390 │ ▏ │ 15915 │ │ 31 │ │ -│ 2008-01-01 │ 452990 │ ▏ │ 18857 │ │ 126 │ │ -│ 2008-02-01 │ 441768 │ ▏ │ 18266 │ │ 173 │ │ -│ 2008-03-01 │ 463728 │ ▏ │ 18947 │ │ 292 │ │ -│ 2008-04-01 │ 468317 │ ▏ │ 18590 │ │ 323 │ │ -│ 2008-05-01 │ 536380 │ ▎ │ 20861 │ │ 375 │ │ -│ 2008-06-01 │ 577684 │ ▎ │ 22557 │ │ 575 │ ▏ │ -│ 2008-07-01 │ 592610 │ ▎ │ 23123 │ │ 657 │ ▏ │ -│ 2008-08-01 │ 595959 │ ▎ │ 23729 │ │ 707 │ ▏ │ -│ 2008-09-01 │ 680892 │ ▎ │ 26374 │ ▏ │ 801 │ ▏ │ -│ 2008-10-01 │ 789874 │ ▍ │ 28970 │ ▏ │ 893 │ ▏ │ -│ 2008-11-01 │ 792310 │ ▍ │ 30272 │ ▏ │ 1024 │ ▎ │ -│ 2008-12-01 │ 850359 │ ▍ │ 34073 │ ▏ │ 1103 │ ▎ │ -│ 2009-01-01 │ 1051649 │ ▌ │ 38978 │ ▏ │ 1316 │ ▎ │ -│ 2009-02-01 │ 944711 │ ▍ │ 43390 │ ▏ │ 1132 │ ▎ │ -│ 2009-03-01 │ 1048643 │ ▌ │ 46516 │ ▏ │ 1203 │ ▎ │ -│ 2009-04-01 │ 1094599 │ ▌ │ 48284 │ ▏ │ 1334 │ ▎ │ -│ 2009-05-01 │ 1201257 │ ▌ │ 52512 │ ▎ │ 1395 │ ▎ │ -│ 2009-06-01 │ 1258750 │ ▋ │ 57728 │ ▎ │ 1473 │ ▎ │ -│ 2009-07-01 │ 1470290 │ ▋ │ 60098 │ ▎ │ 1686 │ ▍ │ -│ 2009-08-01 │ 1750688 │ ▉ │ 67347 │ ▎ │ 1777 │ ▍ │ -│ 2009-09-01 │ 2032276 │ █ │ 78051 │ ▍ │ 1784 │ ▍ │ -│ 2009-10-01 │ 2242017 │ █ │ 93409 │ ▍ │ 2071 │ ▌ │ -│ 2009-11-01 │ 2207444 │ █ │ 95940 │ ▍ │ 2141 │ ▌ │ -│ 2009-12-01 │ 2560510 │ █▎ │ 104239 │ ▌ │ 2141 │ ▌ │ -│ 2010-01-01 │ 2884096 │ █▍ │ 114314 │ ▌ │ 2313 │ ▌ │ -│ 2010-02-01 │ 2687779 │ █▎ │ 115683 │ ▌ │ 2522 │ ▋ │ -│ 2010-03-01 │ 3228254 │ █▌ │ 125775 │ ▋ │ 2890 │ ▋ │ -│ 2010-04-01 │ 3209898 │ █▌ │ 128936 │ ▋ │ 3170 │ ▊ │ -│ 2010-05-01 │ 3267363 │ █▋ │ 131851 │ ▋ │ 3166 │ ▊ │ -│ 2010-06-01 │ 3532867 │ █▊ │ 139522 │ ▋ │ 3301 │ ▊ │ -│ 2010-07-01 │ 806612 │ ▍ │ 76486 │ ▍ │ 1955 │ ▍ │ -│ 2010-08-01 │ 4247982 │ ██ │ 164071 │ ▊ │ 3653 │ ▉ │ -│ 2010-09-01 │ 4704069 │ ██▎ │ 186613 │ ▉ │ 4009 │ █ │ -│ 2010-10-01 │ 5032368 │ ██▌ │ 203800 │ █ │ 4154 │ █ │ -│ 2010-11-01 │ 5689002 │ ██▊ │ 226134 │ █▏ │ 4383 │ █ │ -│ 2010-12-01 │ 3642690 │ █▊ │ 196847 │ ▉ │ 3914 │ ▉ │ -│ 2011-01-01 │ 3924540 │ █▉ │ 215057 │ █ │ 4240 │ █ │ -│ 2011-02-01 │ 3859131 │ █▉ │ 223485 │ █ │ 4371 │ █ │ -│ 2011-03-01 │ 2877996 │ █▍ │ 208607 │ █ │ 3870 │ ▉ │ -│ 2011-04-01 │ 3859131 │ █▉ │ 248931 │ █▏ │ 4881 │ █▏ │ -│ 2011-06-01 │ 3859131 │ █▉ │ 267197 │ █▎ │ 5255 │ █▎ │ -│ 2011-08-01 │ 2943405 │ █▍ │ 259428 │ █▎ │ 5806 │ █▍ │ -│ 2011-10-01 │ 3859131 │ █▉ │ 327342 │ █▋ │ 6958 │ █▋ │ -│ 2011-12-01 │ 3728313 │ █▊ │ 354817 │ █▊ │ 7713 │ █▉ │ -│ 2012-01-01 │ 16350205 │ ████████▏ │ 696110 │ ███▍ │ 14281 │ ███▌ │ -│ 2012-02-01 │ 16015695 │ ████████ │ 722892 │ ███▌ │ 14949 │ ███▋ │ -│ 2012-03-01 │ 17881943 │ ████████▉ │ 789664 │ ███▉ │ 15795 │ ███▉ │ -│ 2012-04-01 │ 19044534 │ █████████▌ │ 842491 │ ████▏ │ 16440 │ ████ │ -│ 2012-05-01 │ 20388260 │ ██████████▏ │ 886176 │ ████▍ │ 16974 │ ████▏ │ -│ 2012-06-01 │ 21897913 │ ██████████▉ │ 946798 │ ████▋ │ 17952 │ ████▍ │ -│ 2012-07-01 │ 24087517 │ ████████████ │ 1018636 │ █████ │ 19069 │ ████▊ │ -│ 2012-08-01 │ 25703326 │ ████████████▊ │ 1094445 │ █████▍ │ 20553 │ █████▏ │ -│ 2012-09-01 │ 23419524 │ ███████████▋ │ 1088491 │ █████▍ │ 20831 │ █████▏ │ -│ 2012-10-01 │ 24788236 │ ████████████▍ │ 1131885 │ █████▋ │ 21868 │ █████▍ │ -│ 2012-11-01 │ 24648302 │ ████████████▎ │ 1167608 │ █████▊ │ 21791 │ █████▍ │ -│ 2012-12-01 │ 26080276 │ █████████████ │ 1218402 │ ██████ │ 22622 │ █████▋ │ -│ 2013-01-01 │ 30365867 │ ███████████████▏ │ 1341703 │ ██████▋ │ 24696 │ ██████▏ │ -│ 2013-02-01 │ 27213960 │ █████████████▌ │ 1304756 │ ██████▌ │ 24514 │ ██████▏ │ -│ 2013-03-01 │ 30771274 │ ███████████████▍ │ 1391703 │ ██████▉ │ 25730 │ ██████▍ │ -│ 2013-04-01 │ 33259557 │ ████████████████▋ │ 1485971 │ ███████▍ │ 27294 │ ██████▊ │ -│ 2013-05-01 │ 33126225 │ ████████████████▌ │ 1506473 │ ███████▌ │ 27299 │ ██████▊ │ -│ 2013-06-01 │ 32648247 │ ████████████████▎ │ 1506650 │ ███████▌ │ 27450 │ ██████▊ │ -│ 2013-07-01 │ 34922133 │ █████████████████▍ │ 1561771 │ ███████▊ │ 28294 │ ███████ │ -│ 2013-08-01 │ 34766579 │ █████████████████▍ │ 1589781 │ ███████▉ │ 28943 │ ███████▏ │ -│ 2013-09-01 │ 31990369 │ ███████████████▉ │ 1570342 │ ███████▊ │ 29408 │ ███████▎ │ -│ 2013-10-01 │ 35940040 │ █████████████████▉ │ 1683770 │ ████████▍ │ 30273 │ ███████▌ │ -│ 2013-11-01 │ 37396497 │ ██████████████████▋ │ 1757467 │ ████████▊ │ 31173 │ ███████▊ │ -│ 2013-12-01 │ 39810216 │ ███████████████████▉ │ 1846204 │ █████████▏ │ 32326 │ ████████ │ -│ 2014-01-01 │ 42420655 │ █████████████████████▏ │ 1927229 │ █████████▋ │ 35603 │ ████████▉ │ -│ 2014-02-01 │ 38703362 │ ███████████████████▎ │ 1874067 │ █████████▎ │ 37007 │ █████████▎ │ -│ 2014-03-01 │ 42459956 │ █████████████████████▏ │ 1959888 │ █████████▊ │ 37948 │ █████████▍ │ -│ 2014-04-01 │ 42440735 │ █████████████████████▏ │ 1951369 │ █████████▊ │ 38362 │ █████████▌ │ -│ 2014-05-01 │ 42514094 │ █████████████████████▎ │ 1970197 │ █████████▊ │ 39078 │ █████████▊ │ -│ 2014-06-01 │ 41990650 │ ████████████████████▉ │ 1943850 │ █████████▋ │ 38268 │ █████████▌ │ -│ 2014-07-01 │ 46868899 │ ███████████████████████▍ │ 2059346 │ ██████████▎ │ 40634 │ ██████████▏ │ -│ 2014-08-01 │ 46990813 │ ███████████████████████▍ │ 2117335 │ ██████████▌ │ 41764 │ ██████████▍ │ -│ 2014-09-01 │ 44992201 │ ██████████████████████▍ │ 2124708 │ ██████████▌ │ 41890 │ ██████████▍ │ -│ 2014-10-01 │ 47497520 │ ███████████████████████▋ │ 2206535 │ ███████████ │ 43109 │ ██████████▊ │ -│ 2014-11-01 │ 46118074 │ ███████████████████████ │ 2239747 │ ███████████▏ │ 43718 │ ██████████▉ │ -│ 2014-12-01 │ 48807699 │ ████████████████████████▍ │ 2372945 │ ███████████▊ │ 43823 │ ██████████▉ │ -│ 2015-01-01 │ 53851542 │ █████████████████████████ │ 2499536 │ ████████████▍ │ 47172 │ ███████████▊ │ -│ 2015-02-01 │ 48342747 │ ████████████████████████▏ │ 2448496 │ ████████████▏ │ 47229 │ ███████████▊ │ -│ 2015-03-01 │ 54564441 │ █████████████████████████ │ 2550534 │ ████████████▊ │ 48156 │ ████████████ │ -│ 2015-04-01 │ 55005780 │ █████████████████████████ │ 2609443 │ █████████████ │ 49865 │ ████████████▍ │ -│ 2015-05-01 │ 54504410 │ █████████████████████████ │ 2585535 │ ████████████▉ │ 50137 │ ████████████▌ │ -│ 2015-06-01 │ 54258492 │ █████████████████████████ │ 2595129 │ ████████████▉ │ 49598 │ ████████████▍ │ -│ 2015-07-01 │ 58451788 │ █████████████████████████ │ 2720026 │ █████████████▌ │ 55022 │ █████████████▊ │ -│ 2015-08-01 │ 58075327 │ █████████████████████████ │ 2743994 │ █████████████▋ │ 55302 │ █████████████▊ │ -│ 2015-09-01 │ 55574825 │ █████████████████████████ │ 2672793 │ █████████████▎ │ 53960 │ █████████████▍ │ -│ 2015-10-01 │ 59494045 │ █████████████████████████ │ 2816426 │ ██████████████ │ 70210 │ █████████████████▌ │ -│ 2015-11-01 │ 57117500 │ █████████████████████████ │ 2847146 │ ██████████████▏ │ 71363 │ █████████████████▊ │ -│ 2015-12-01 │ 58523312 │ █████████████████████████ │ 2854840 │ ██████████████▎ │ 94559 │ ███████████████████████▋ │ -│ 2016-01-01 │ 61991732 │ █████████████████████████ │ 2920366 │ ██████████████▌ │ 108438 │ █████████████████████████ │ -│ 2016-02-01 │ 59189875 │ █████████████████████████ │ 2854683 │ ██████████████▎ │ 109916 │ █████████████████████████ │ -│ 2016-03-01 │ 63918864 │ █████████████████████████ │ 2969542 │ ██████████████▊ │ 84787 │ █████████████████████▏ │ -│ 2016-04-01 │ 64271256 │ █████████████████████████ │ 2999086 │ ██████████████▉ │ 61647 │ ███████████████▍ │ -│ 2016-05-01 │ 65212004 │ █████████████████████████ │ 3034674 │ ███████████████▏ │ 67465 │ ████████████████▊ │ -│ 2016-06-01 │ 65867743 │ █████████████████████████ │ 3057604 │ ███████████████▎ │ 75170 │ ██████████████████▊ │ -│ 2016-07-01 │ 66974735 │ █████████████████████████ │ 3199374 │ ███████████████▉ │ 77732 │ ███████████████████▍ │ -│ 2016-08-01 │ 69654819 │ █████████████████████████ │ 3239957 │ ████████████████▏ │ 63080 │ ███████████████▊ │ -│ 2016-09-01 │ 67024973 │ █████████████████████████ │ 3190864 │ ███████████████▉ │ 62324 │ ███████████████▌ │ -│ 2016-10-01 │ 71826553 │ █████████████████████████ │ 3284340 │ ████████████████▍ │ 62549 │ ███████████████▋ │ -│ 2016-11-01 │ 71022319 │ █████████████████████████ │ 3300822 │ ████████████████▌ │ 69718 │ █████████████████▍ │ -│ 2016-12-01 │ 72942967 │ █████████████████████████ │ 3430324 │ █████████████████▏ │ 71705 │ █████████████████▉ │ -│ 2017-01-01 │ 78946585 │ █████████████████████████ │ 3572093 │ █████████████████▊ │ 78198 │ ███████████████████▌ │ -│ 2017-02-01 │ 70609487 │ █████████████████████████ │ 3421115 │ █████████████████ │ 69823 │ █████████████████▍ │ -│ 2017-03-01 │ 79723106 │ █████████████████████████ │ 3638122 │ ██████████████████▏ │ 73865 │ ██████████████████▍ │ -│ 2017-04-01 │ 77478009 │ █████████████████████████ │ 3620591 │ ██████████████████ │ 74387 │ ██████████████████▌ │ -│ 2017-05-01 │ 79810360 │ █████████████████████████ │ 3650820 │ ██████████████████▎ │ 74356 │ ██████████████████▌ │ -│ 2017-06-01 │ 79901711 │ █████████████████████████ │ 3737614 │ ██████████████████▋ │ 72114 │ ██████████████████ │ -│ 2017-07-01 │ 81798725 │ █████████████████████████ │ 3872330 │ ███████████████████▎ │ 76052 │ ███████████████████ │ -│ 2017-08-01 │ 84658503 │ █████████████████████████ │ 3960093 │ ███████████████████▊ │ 77798 │ ███████████████████▍ │ -│ 2017-09-01 │ 83165192 │ █████████████████████████ │ 3880501 │ ███████████████████▍ │ 78402 │ ███████████████████▌ │ -│ 2017-10-01 │ 85828912 │ █████████████████████████ │ 3980335 │ ███████████████████▉ │ 80685 │ ████████████████████▏ │ -│ 2017-11-01 │ 84965681 │ █████████████████████████ │ 4026749 │ ████████████████████▏ │ 82659 │ ████████████████████▋ │ -│ 2017-12-01 │ 85973810 │ █████████████████████████ │ 4196354 │ ████████████████████▉ │ 91984 │ ██████████████████████▉ │ -│ 2018-01-01 │ 91558594 │ █████████████████████████ │ 4364443 │ █████████████████████▊ │ 102577 │ █████████████████████████ │ -│ 2018-02-01 │ 86467179 │ █████████████████████████ │ 4277899 │ █████████████████████▍ │ 104610 │ █████████████████████████ │ -│ 2018-03-01 │ 96490262 │ █████████████████████████ │ 4422470 │ ██████████████████████ │ 112559 │ █████████████████████████ │ -│ 2018-04-01 │ 98101232 │ █████████████████████████ │ 4572434 │ ██████████████████████▊ │ 105284 │ █████████████████████████ │ -│ 2018-05-01 │ 100109100 │ █████████████████████████ │ 4698908 │ ███████████████████████▍ │ 103910 │ █████████████████████████ │ -│ 2018-06-01 │ 100009462 │ █████████████████████████ │ 4697426 │ ███████████████████████▍ │ 101107 │ █████████████████████████ │ -│ 2018-07-01 │ 108151359 │ █████████████████████████ │ 5099492 │ █████████████████████████ │ 106184 │ █████████████████████████ │ -│ 2018-08-01 │ 107330940 │ █████████████████████████ │ 5084082 │ █████████████████████████ │ 109985 │ █████████████████████████ │ -│ 2018-09-01 │ 104473929 │ █████████████████████████ │ 5011953 │ █████████████████████████ │ 109710 │ █████████████████████████ │ -│ 2018-10-01 │ 112346556 │ █████████████████████████ │ 5320405 │ █████████████████████████ │ 112533 │ █████████████████████████ │ -│ 2018-11-01 │ 112573001 │ █████████████████████████ │ 5353282 │ █████████████████████████ │ 112211 │ █████████████████████████ │ -│ 2018-12-01 │ 121953600 │ █████████████████████████ │ 5611543 │ █████████████████████████ │ 118291 │ █████████████████████████ │ -│ 2019-01-01 │ 129386587 │ █████████████████████████ │ 6016687 │ █████████████████████████ │ 125725 │ █████████████████████████ │ -│ 2019-02-01 │ 120645639 │ █████████████████████████ │ 5974488 │ █████████████████████████ │ 125420 │ █████████████████████████ │ -│ 2019-03-01 │ 137650471 │ █████████████████████████ │ 6410197 │ █████████████████████████ │ 135924 │ █████████████████████████ │ -│ 2019-04-01 │ 138473643 │ █████████████████████████ │ 6416384 │ █████████████████████████ │ 139844 │ █████████████████████████ │ -│ 2019-05-01 │ 142463421 │ █████████████████████████ │ 6574836 │ █████████████████████████ │ 142012 │ █████████████████████████ │ -│ 2019-06-01 │ 134172939 │ █████████████████████████ │ 6601267 │ █████████████████████████ │ 140997 │ █████████████████████████ │ -│ 2019-07-01 │ 145965083 │ █████████████████████████ │ 6901822 │ █████████████████████████ │ 147802 │ █████████████████████████ │ -│ 2019-08-01 │ 146854393 │ █████████████████████████ │ 6993882 │ █████████████████████████ │ 151888 │ █████████████████████████ │ -│ 2019-09-01 │ 137540219 │ █████████████████████████ │ 7001362 │ █████████████████████████ │ 148839 │ █████████████████████████ │ -│ 2019-10-01 │ 145909884 │ █████████████████████████ │ 7160126 │ █████████████████████████ │ 152075 │ █████████████████████████ │ -│ 2019-11-01 │ 138512489 │ █████████████████████████ │ 7098723 │ █████████████████████████ │ 164597 │ █████████████████████████ │ -│ 2019-12-01 │ 146012313 │ █████████████████████████ │ 7438261 │ █████████████████████████ │ 166966 │ █████████████████████████ │ -│ 2020-01-01 │ 153498208 │ █████████████████████████ │ 7703548 │ █████████████████████████ │ 174390 │ █████████████████████████ │ -│ 2020-02-01 │ 148386817 │ █████████████████████████ │ 7582031 │ █████████████████████████ │ 170257 │ █████████████████████████ │ -│ 2020-03-01 │ 166266315 │ █████████████████████████ │ 8339049 │ █████████████████████████ │ 192460 │ █████████████████████████ │ -│ 2020-04-01 │ 178511581 │ █████████████████████████ │ 8991649 │ █████████████████████████ │ 202334 │ █████████████████████████ │ -│ 2020-05-01 │ 189993779 │ █████████████████████████ │ 9331358 │ █████████████████████████ │ 217357 │ █████████████████████████ │ -│ 2020-06-01 │ 187914434 │ █████████████████████████ │ 9085003 │ █████████████████████████ │ 223362 │ █████████████████████████ │ -│ 2020-07-01 │ 194244994 │ █████████████████████████ │ 9321706 │ █████████████████████████ │ 228222 │ █████████████████████████ │ -│ 2020-08-01 │ 196099301 │ █████████████████████████ │ 9368408 │ █████████████████████████ │ 230251 │ █████████████████████████ │ -│ 2020-09-01 │ 182549761 │ █████████████████████████ │ 9271571 │ █████████████████████████ │ 227889 │ █████████████████████████ │ -│ 2020-10-01 │ 186583890 │ █████████████████████████ │ 9396112 │ █████████████████████████ │ 233715 │ █████████████████████████ │ -│ 2020-11-01 │ 186083723 │ █████████████████████████ │ 9623053 │ █████████████████████████ │ 234963 │ █████████████████████████ │ -│ 2020-12-01 │ 191317162 │ █████████████████████████ │ 9898168 │ █████████████████████████ │ 249115 │ █████████████████████████ │ -│ 2021-01-01 │ 210496207 │ █████████████████████████ │ 10503943 │ █████████████████████████ │ 259805 │ █████████████████████████ │ -│ 2021-02-01 │ 193510365 │ █████████████████████████ │ 10215033 │ █████████████████████████ │ 253656 │ █████████████████████████ │ -│ 2021-03-01 │ 207454415 │ █████████████████████████ │ 10365629 │ █████████████████████████ │ 267263 │ █████████████████████████ │ -│ 2021-04-01 │ 204573086 │ █████████████████████████ │ 10391984 │ █████████████████████████ │ 270543 │ █████████████████████████ │ -│ 2021-05-01 │ 217655366 │ █████████████████████████ │ 10648130 │ █████████████████████████ │ 288555 │ █████████████████████████ │ -│ 2021-06-01 │ 208027069 │ █████████████████████████ │ 10397311 │ █████████████████████████ │ 291520 │ █████████████████████████ │ -│ 2021-07-01 │ 210955954 │ █████████████████████████ │ 10063967 │ █████████████████████████ │ 252061 │ █████████████████████████ │ -│ 2021-08-01 │ 225681244 │ █████████████████████████ │ 10383556 │ █████████████████████████ │ 254569 │ █████████████████████████ │ -│ 2021-09-01 │ 220086513 │ █████████████████████████ │ 10298344 │ █████████████████████████ │ 256826 │ █████████████████████████ │ -│ 2021-10-01 │ 227527379 │ █████████████████████████ │ 10729882 │ █████████████████████████ │ 283328 │ █████████████████████████ │ -│ 2021-11-01 │ 228289963 │ █████████████████████████ │ 10995197 │ █████████████████████████ │ 302386 │ █████████████████████████ │ -│ 2021-12-01 │ 235807471 │ █████████████████████████ │ 11312798 │ █████████████████████████ │ 313876 │ █████████████████████████ │ -│ 2022-01-01 │ 256766679 │ █████████████████████████ │ 12074520 │ █████████████████████████ │ 340407 │ █████████████████████████ │ -│ 2022-02-01 │ 219927645 │ █████████████████████████ │ 10846045 │ █████████████████████████ │ 293236 │ █████████████████████████ │ -│ 2022-03-01 │ 236554668 │ █████████████████████████ │ 11330285 │ █████████████████████████ │ 302387 │ █████████████████████████ │ -│ 2022-04-01 │ 231188077 │ █████████████████████████ │ 11697995 │ █████████████████████████ │ 316303 │ █████████████████████████ │ -│ 2022-05-01 │ 230492108 │ █████████████████████████ │ 11448584 │ █████████████████████████ │ 323725 │ █████████████████████████ │ -│ 2022-06-01 │ 218842949 │ █████████████████████████ │ 11400399 │ █████████████████████████ │ 324846 │ █████████████████████████ │ -│ 2022-07-01 │ 242504279 │ █████████████████████████ │ 12049204 │ █████████████████████████ │ 335621 │ █████████████████████████ │ -│ 2022-08-01 │ 247215325 │ █████████████████████████ │ 12189276 │ █████████████████████████ │ 337873 │ █████████████████████████ │ -│ 2022-09-01 │ 234131223 │ █████████████████████████ │ 11674079 │ █████████████████████████ │ 326325 │ █████████████████████████ │ -│ 2022-10-01 │ 237365072 │ █████████████████████████ │ 11804508 │ █████████████████████████ │ 336063 │ █████████████████████████ │ -│ 2022-11-01 │ 229478878 │ █████████████████████████ │ 11543020 │ █████████████████████████ │ 323122 │ █████████████████████████ │ -│ 2022-12-01 │ 238862690 │ █████████████████████████ │ 11967451 │ █████████████████████████ │ 331668 │ █████████████████████████ │ -│ 2023-01-01 │ 253577512 │ █████████████████████████ │ 12264087 │ █████████████████████████ │ 332711 │ █████████████████████████ │ -│ 2023-02-01 │ 221285501 │ █████████████████████████ │ 11537091 │ █████████████████████████ │ 317879 │ █████████████████████████ │ -└──────────────┴───────────┴───────────────────────────┴──────────┴───────────────────────────┴────────────┴───────────────────────────┘ - -203 行がセットされました。経過時間:48.492 秒。14.69億行、213.35 GBを処理しました(302.91万行/秒、4.40 GB/秒)。 -``` -## More queries {#more-queries} - -11. こちらは2022年のトップ10サブレディットです: - -```sql -SELECT - subreddit, - count() AS count -FROM reddit -WHERE toYear(created_utc) = 2022 -GROUP BY subreddit -ORDER BY count DESC -LIMIT 10; -``` - -```response -┌─subreddit──────┬────count─┐ -│ AskReddit │ 72312060 │ -│ AmItheAsshole │ 25323210 │ -│ teenagers │ 22355960 │ -│ worldnews │ 17797707 │ -│ FreeKarma4U │ 15652274 │ -│ FreeKarma4You │ 14929055 │ -│ wallstreetbets │ 14235271 │ -│ politics │ 12511136 │ -│ memes │ 11610792 │ -│ nba │ 11586571 │ -└────────────────┴──────────┘ - -10 rows in set. Elapsed: 5.956 sec. Processed 14.69 billion rows, 126.19 GB (2.47 billion rows/s., 21.19 GB/s.) -``` - -12. 2018年から2019年までのコメント数の増加が最も大きかったサブレディットを見てみましょう: - -```sql -SELECT - subreddit, - newcount - oldcount AS diff -FROM -( - SELECT - subreddit, - count(*) AS newcount - FROM reddit - WHERE toYear(created_utc) = 2019 - GROUP BY subreddit -) -ALL INNER JOIN -( - SELECT - subreddit, - count(*) AS oldcount - FROM reddit - WHERE toYear(created_utc) = 2018 - GROUP BY subreddit -) USING (subreddit) -ORDER BY diff DESC -LIMIT 50 -SETTINGS joined_subquery_requires_alias = 0; -``` - -2019年には「memes」と「teenagers」がRedditで活発でした: - -```response -┌─subreddit────────────┬─────diff─┐ -│ AskReddit │ 18765909 │ -│ memes │ 16496996 │ -│ teenagers │ 13071715 │ -│ AmItheAsshole │ 12312663 │ -│ dankmemes │ 12016716 │ -│ unpopularopinion │ 6809935 │ -│ PewdiepieSubmissions │ 6330844 │ -│ Market76 │ 5213690 │ -│ relationship_advice │ 4060717 │ -│ Minecraft │ 3328659 │ -│ freefolk │ 3227970 │ -│ classicwow │ 3063133 │ -│ Animemes │ 2866876 │ -│ gonewild │ 2457680 │ -│ PublicFreakout │ 2452288 │ -│ gameofthrones │ 2411661 │ -│ RoastMe │ 2378781 │ -│ ShitPostCrusaders │ 2345414 │ -│ AnthemTheGame │ 1813152 │ -│ nfl │ 1804407 │ -│ Showerthoughts │ 1797968 │ -│ Cringetopia │ 1764034 │ -│ pokemon │ 1763269 │ -│ entitledparents │ 1744852 │ -│ HistoryMemes │ 1721645 │ -│ MortalKombat │ 1718184 │ -│ trashy │ 1684357 │ -│ ChapoTrapHouse │ 1675363 │ -│ Brawlstars │ 1663763 │ -│ iamatotalpieceofshit │ 1647381 │ -│ ukpolitics │ 1599204 │ -│ cursedcomments │ 1590781 │ -│ Pikabu │ 1578597 │ -│ wallstreetbets │ 1535225 │ -│ AskOuija │ 1533214 │ -│ interestingasfuck │ 1528910 │ -│ aww │ 1439008 │ -│ wholesomememes │ 1436566 │ -│ SquaredCircle │ 1432172 │ -│ insanepeoplefacebook │ 1290686 │ -│ borderlands3 │ 1274462 │ -│ FreeKarma4U │ 1217769 │ -│ YangForPresidentHQ │ 1186918 │ -│ FortniteCompetitive │ 1184508 │ -│ AskMen │ 1180820 │ -│ EpicSeven │ 1172061 │ -│ MurderedByWords │ 1112476 │ -│ politics │ 1084087 │ -│ barstoolsports │ 1068020 │ -│ BattlefieldV │ 1053878 │ -└──────────────────────┴──────────┘ - -50 rows in set. Elapsed: 10.680 sec. Processed 29.38 billion rows, 198.67 GB (2.75 billion rows/s., 18.60 GB/s.) -``` -## Other queries {#other-queries} - -13. もう一つのクエリ: ClickHouseの言及をSnowflakeやPostgresなどの他の技術と比較しましょう。このクエリは、サブストリングのサーチのために147億のコメントを3回検索する必要があるため大きなものですが、実際のパフォーマンスはかなり印象的です。(残念ながらClickHouseのユーザーはまだRedditではあまり活発ではありません): - -```sql -SELECT - toStartOfQuarter(created_utc) AS quarter, - sum(if(positionCaseInsensitive(body, 'clickhouse') > 0, 1, 0)) AS clickhouse, - sum(if(positionCaseInsensitive(body, 'snowflake') > 0, 1, 0)) AS snowflake, - sum(if(positionCaseInsensitive(body, 'postgres') > 0, 1, 0)) AS postgres -FROM reddit -GROUP BY quarter -ORDER BY quarter ASC; -``` - -```response -┌────quarter─┬─clickhouse─┬─snowflake─┬─postgres─┐ -│ 2005-10-01 │ 0 │ 0 │ 0 │ -│ 2006-01-01 │ 0 │ 2 │ 23 │ -│ 2006-04-01 │ 0 │ 2 │ 24 │ -│ 2006-07-01 │ 0 │ 4 │ 13 │ -│ 2006-10-01 │ 0 │ 23 │ 73 │ -│ 2007-01-01 │ 0 │ 14 │ 91 │ -│ 2007-04-01 │ 0 │ 10 │ 59 │ -│ 2007-07-01 │ 0 │ 39 │ 116 │ -│ 2007-10-01 │ 0 │ 45 │ 125 │ -│ 2008-01-01 │ 0 │ 53 │ 234 │ -│ 2008-04-01 │ 0 │ 79 │ 303 │ -│ 2008-07-01 │ 0 │ 102 │ 174 │ -│ 2008-10-01 │ 0 │ 156 │ 323 │ -│ 2009-01-01 │ 0 │ 206 │ 208 │ -│ 2009-04-01 │ 0 │ 178 │ 417 │ -│ 2009-07-01 │ 0 │ 300 │ 295 │ -│ 2009-10-01 │ 0 │ 633 │ 589 │ -│ 2010-01-01 │ 0 │ 555 │ 501 │ -│ 2010-04-01 │ 0 │ 587 │ 469 │ -│ 2010-07-01 │ 0 │ 601 │ 696 │ -│ 2010-10-01 │ 0 │ 1246 │ 505 │ -│ 2011-01-01 │ 0 │ 758 │ 247 │ -│ 2011-04-01 │ 0 │ 537 │ 113 │ -│ 2011-07-01 │ 0 │ 173 │ 64 │ -│ 2011-10-01 │ 0 │ 649 │ 96 │ -│ 2012-01-01 │ 0 │ 4621 │ 662 │ -│ 2012-04-01 │ 0 │ 5737 │ 785 │ -│ 2012-07-01 │ 0 │ 6097 │ 1127 │ -│ 2012-10-01 │ 0 │ 7986 │ 600 │ -│ 2013-01-01 │ 0 │ 9704 │ 839 │ -│ 2013-04-01 │ 0 │ 8161 │ 853 │ -│ 2013-07-01 │ 0 │ 9704 │ 1028 │ -│ 2013-10-01 │ 0 │ 12879 │ 1404 │ -│ 2014-01-01 │ 0 │ 12317 │ 1548 │ -│ 2014-04-01 │ 0 │ 13181 │ 1577 │ -│ 2014-07-01 │ 0 │ 15640 │ 1710 │ -│ 2014-10-01 │ 0 │ 19479 │ 1959 │ -│ 2015-01-01 │ 0 │ 20411 │ 2104 │ -│ 2015-04-01 │ 1 │ 20309 │ 9112 │ -│ 2015-07-01 │ 0 │ 20325 │ 4771 │ -│ 2015-10-01 │ 0 │ 25087 │ 3030 │ -│ 2016-01-01 │ 0 │ 23462 │ 3126 │ -│ 2016-04-01 │ 3 │ 25496 │ 2757 │ -│ 2016-07-01 │ 4 │ 28233 │ 2928 │ -│ 2016-10-01 │ 2 │ 45445 │ 2449 │ -│ 2017-01-01 │ 9 │ 76019 │ 2808 │ -│ 2017-04-01 │ 9 │ 67919 │ 2803 │ -│ 2017-07-01 │ 13 │ 68974 │ 2771 │ -│ 2017-10-01 │ 12 │ 69730 │ 2906 │ -│ 2018-01-01 │ 17 │ 67476 │ 3152 │ -│ 2018-04-01 │ 3 │ 67139 │ 3986 │ -│ 2018-07-01 │ 14 │ 67979 │ 3609 │ -│ 2018-10-01 │ 28 │ 74147 │ 3850 │ -│ 2019-01-01 │ 14 │ 80250 │ 4305 │ -│ 2019-04-01 │ 30 │ 70307 │ 3872 │ -│ 2019-07-01 │ 33 │ 77149 │ 4164 │ -│ 2019-10-01 │ 22 │ 113011 │ 4369 │ -│ 2020-01-01 │ 34 │ 238273 │ 5133 │ -│ 2020-04-01 │ 52 │ 454467 │ 6100 │ -│ 2020-07-01 │ 37 │ 406623 │ 5507 │ -│ 2020-10-01 │ 49 │ 212143 │ 5385 │ -│ 2021-01-01 │ 56 │ 151262 │ 5749 │ -│ 2021-04-01 │ 71 │ 119928 │ 6039 │ -│ 2021-07-01 │ 53 │ 110342 │ 5765 │ -│ 2021-10-01 │ 92 │ 121144 │ 6401 │ -│ 2022-01-01 │ 93 │ 107512 │ 6772 │ -│ 2022-04-01 │ 120 │ 91560 │ 6687 │ -│ 2022-07-01 │ 183 │ 99764 │ 7377 │ -│ 2022-10-01 │ 123 │ 99447 │ 7052 │ -│ 2023-01-01 │ 126 │ 58733 │ 4891 │ -└────────────┴────────────┴───────────┴──────────┘ - -70 rows in set. Elapsed: 325.835 sec. Processed 14.69 billion rows, 2.57 TB (45.08 million rows/s., 7.87 GB/s.) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md.hash deleted file mode 100644 index 1a96613f647..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/reddit-comments.md.hash +++ /dev/null @@ -1 +0,0 @@ -f1fa4132c7f8449c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md deleted file mode 100644 index fc360af2fcd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -description: 'Analyzing Stack Overflow data with ClickHouse' -sidebar_label: 'Stack Overflow' -sidebar_position: 1 -slug: '/getting-started/example-datasets/stackoverflow' -title: 'Analyzing Stack Overflow data with ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import stackoverflow from '@site/static/images/getting-started/example-datasets/stackoverflow.png' - -このデータセットには、Stack Overflowで発生したすべての `Posts`, `Users`, `Votes`, `Comments`, `Badges`, `PostHistory`, 及び `PostLinks` が含まれています。 - -ユーザーは、2024年4月までのすべての投稿を含む事前準備されたParquetバージョンをダウンロードするか、最新のデータをXML形式でダウンロードしてロードすることができます。Stack Overflowはこのデータを定期的に更新しており、歴史的には3か月ごとに提供しています。 - -以下の図は、Parquet形式の利用可能なテーブルのスキーマを示しています。 - -Stack Overflow スキーマ - -このデータのスキーマの説明は[こちら](https://meta.stackexchange.com/questions/2677/database-schema-documentation-for-the-public-data-dump-and-sede)で見つけることができます。 - -## 事前準備されたデータ {#pre-prepared-data} - -2024年4月時点の最新のParquet形式のデータのコピーを提供しています。行数(6000万件の投稿)に関してはClickHouseには小さいですが、このデータセットは重要なテキストの量と大きなStringカラムを含んでいます。 - -```sql -CREATE DATABASE stackoverflow -``` - -以下の時間は、`eu-west-2`にある96 GiB、24 vCPUのClickHouse Cloudクラスタのものです。データセットは`eu-west-3`に位置しています。 - -### 投稿 {#posts} - -```sql -CREATE TABLE stackoverflow.posts -( - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - `PostTypeId` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta(4), ZSTD(1)), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta(2), ZSTD(1)), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC') -) -ENGINE = MergeTree -PARTITION BY toYear(CreationDate) -ORDER BY (PostTypeId, toDate(CreationDate), CreationDate) - -INSERT INTO stackoverflow.posts SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') - -0 rows in set. Elapsed: 265.466 sec. Processed 59.82 million rows, 38.07 GB (225.34 thousand rows/s., 143.42 MB/s.) -``` - -投稿は年ごとにも利用でき、例えば [https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/2020.parquet](https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/2020.parquet) で確認できます。 - -### 投票 {#votes} - -```sql -CREATE TABLE stackoverflow.votes -( - `Id` UInt32, - `PostId` Int32, - `VoteTypeId` UInt8, - `CreationDate` DateTime64(3, 'UTC'), - `UserId` Int32, - `BountyAmount` UInt8 -) -ENGINE = MergeTree -ORDER BY (VoteTypeId, CreationDate, PostId, UserId) - -INSERT INTO stackoverflow.votes SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/votes/*.parquet') - -0 rows in set. Elapsed: 21.605 sec. Processed 238.98 million rows, 2.13 GB (11.06 million rows/s., 98.46 MB/s.) -``` - -投票は年ごとにも利用でき、例えば [https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/votes/2020.parquet](https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/votes/2020.parquet) で確認できます。 - -### コメント {#comments} - -```sql -CREATE TABLE stackoverflow.comments -( - `Id` UInt32, - `PostId` UInt32, - `Score` UInt16, - `Text` String, - `CreationDate` DateTime64(3, 'UTC'), - `UserId` Int32, - `UserDisplayName` LowCardinality(String) -) -ENGINE = MergeTree -ORDER BY CreationDate - -INSERT INTO stackoverflow.comments SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/comments/*.parquet') - -0 rows in set. Elapsed: 56.593 sec. Processed 90.38 million rows, 11.14 GB (1.60 million rows/s., 196.78 MB/s.) -``` - -コメントは年ごとにも利用でき、例えば [https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/comments/2020.parquet](https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/comments/2020.parquet) で確認できます。 - -### ユーザー {#users} - -```sql -CREATE TABLE stackoverflow.users -( - `Id` Int32, - `Reputation` LowCardinality(String), - `CreationDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `DisplayName` String, - `LastAccessDate` DateTime64(3, 'UTC'), - `AboutMe` String, - `Views` UInt32, - `UpVotes` UInt32, - `DownVotes` UInt32, - `WebsiteUrl` String, - `Location` LowCardinality(String), - `AccountId` Int32 -) -ENGINE = MergeTree -ORDER BY (Id, CreationDate) - -INSERT INTO stackoverflow.users SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/users.parquet') - -0 rows in set. Elapsed: 10.988 sec. Processed 22.48 million rows, 1.36 GB (2.05 million rows/s., 124.10 MB/s.) -``` - -### バッジ {#badges} - -```sql -CREATE TABLE stackoverflow.badges -( - `Id` UInt32, - `UserId` Int32, - `Name` LowCardinality(String), - `Date` DateTime64(3, 'UTC'), - `Class` Enum8('Gold' = 1, 'Silver' = 2, 'Bronze' = 3), - `TagBased` Bool -) -ENGINE = MergeTree -ORDER BY UserId - -INSERT INTO stackoverflow.badges SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/badges.parquet') - -0 rows in set. Elapsed: 6.635 sec. Processed 51.29 million rows, 797.05 MB (7.73 million rows/s., 120.13 MB/s.) -``` - -### PostLinks {#postlinks} - -```sql -CREATE TABLE stackoverflow.postlinks -( - `Id` UInt64, - `CreationDate` DateTime64(3, 'UTC'), - `PostId` Int32, - `RelatedPostId` Int32, - `LinkTypeId` Enum8('Linked' = 1, 'Duplicate' = 3) -) -ENGINE = MergeTree -ORDER BY (PostId, RelatedPostId) - -INSERT INTO stackoverflow.postlinks SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/postlinks.parquet') - -0 rows in set. Elapsed: 1.534 sec. Processed 6.55 million rows, 129.70 MB (4.27 million rows/s., 84.57 MB/s.) -``` - -### PostHistory {#posthistory} - -```sql -CREATE TABLE stackoverflow.posthistory -( - `Id` UInt64, - `PostHistoryTypeId` UInt8, - `PostId` Int32, - `RevisionGUID` String, - `CreationDate` DateTime64(3, 'UTC'), - `UserId` Int32, - `Text` String, - `ContentLicense` LowCardinality(String), - `Comment` String, - `UserDisplayName` String -) -ENGINE = MergeTree -ORDER BY (CreationDate, PostId) - -INSERT INTO stackoverflow.posthistory SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posthistory/*.parquet') - -0 rows in set. Elapsed: 422.795 sec. Processed 160.79 million rows, 67.08 GB (380.30 thousand rows/s., 158.67 MB/s.) -``` - -## オリジナルデータセット {#original-dataset} - -オリジナルデータセットは、[https://archive.org/download/stackexchange](https://archive.org/download/stackexchange) で圧縮(7zip)されたXML形式で利用可能で、ファイルのプレフィックスは `stackoverflow.com*` です。 - -### ダウンロード {#download} - -```bash -wget https://archive.org/download/stackexchange/stackoverflow.com-Badges.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-Comments.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-PostHistory.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-PostLinks.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-Users.7z -wget https://archive.org/download/stackexchange/stackoverflow.com-Votes.7z -``` - -これらのファイルは最大で35GBあり、インターネット接続によってはダウンロードに約30分かかることがあります- ダウンロードサーバーは約20MB/secで制限しています。 - -### JSONへの変換 {#convert-to-json} - -執筆時点で、ClickHouseはXMLを入力形式としてネイティブにサポートしていません。データをClickHouseにロードするには、まずNDJSONに変換します。 - -XMLをJSONに変換するには、[`xq`](https://github.com/kislyuk/yq)というLinuxツールをお勧めします。これはXMLドキュメント用のシンプルな`jq`ラッパーです。 - -xqとjqをインストールします: - -```bash -sudo apt install jq -pip install yq -``` - -上記のファイルには次の手順が適用されます。`stackoverflow.com-Posts.7z`ファイルを例として使用します。必要に応じて変更してください。 - -[ p7zip](https://p7zip.sourceforge.net/)を使用してファイルを抽出します。これにより、単一のxmlファイル(この場合は`Posts.xml`)が生成されます。 - -> ファイルは約4.5倍圧縮されています。圧縮時22GBの投稿ファイルは、約97GBの展開されたサイズが必要です。 - -```bash -p7zip -d stackoverflow.com-Posts.7z -``` - -次に、xmlファイルを10000行ずつ分割して新しいファイルを作成します。 - -```bash -mkdir posts -cd posts - -# 次のコマンドは、入力xmlファイルを10000行のサブファイルに分割します。 -tail +3 ../Posts.xml | head -n -1 | split -l 10000 --filter='{ printf "\n"; cat - ; printf "\n"; } > $FILE' - -``` - -上記を実行した後、各ファイルに10000行が含まれる一連のファイルが作成されます。これにより、次のコマンドのメモリオーバーヘッドが過度になることがないようにします(xmlからJSONへの変換はメモリ内で行われます)。 - -```bash -find . -maxdepth 1 -type f -exec xq -c '.rows.row[]' {} \; | sed -e 's:"@:":g' > posts_v2.json -``` - -上記のコマンドを実行すると、単一の`posts.json`ファイルが生成されます。 - -次のコマンドを使用してClickHouseにロードします。スキーマは`posts.json`ファイルのために指定されています。これはターゲットテーブルに合わせてデータ型ごとに調整する必要があります。 - -```bash -clickhouse local --query "SELECT * FROM file('posts.json', JSONEachRow, 'Id Int32, PostTypeId UInt8, AcceptedAnswerId UInt32, CreationDate DateTime64(3, \'UTC\'), Score Int32, ViewCount UInt32, Body String, OwnerUserId Int32, OwnerDisplayName String, LastEditorUserId Int32, LastEditorDisplayName String, LastEditDate DateTime64(3, \'UTC\'), LastActivityDate DateTime64(3, \'UTC\'), Title String, Tags String, AnswerCount UInt16, CommentCount UInt8, FavoriteCount UInt8, ContentLicense String, ParentId String, CommunityOwnedDate DateTime64(3, \'UTC\'), ClosedDate DateTime64(3, \'UTC\')') FORMAT Native" | clickhouse client --host --secure --password --query "INSERT INTO stackoverflow.posts_v2 FORMAT Native" -``` - -## 例のクエリ {#example-queries} - -いくつかの簡単な質問で始めましょう。 - -### Stack Overflowで最も人気のあるタグ {#most-popular-tags-on-stack-overflow} - -```sql - -SELECT - arrayJoin(arrayFilter(t -> (t != ''), splitByChar('|', Tags))) AS Tags, - count() AS c -FROM stackoverflow.posts -GROUP BY Tags -ORDER BY c DESC -LIMIT 10 - -┌─Tags───────┬───────c─┐ -│ javascript │ 2527130 │ -│ python │ 2189638 │ -│ java │ 1916156 │ -│ c# │ 1614236 │ -│ php │ 1463901 │ -│ android │ 1416442 │ -│ html │ 1186567 │ -│ jquery │ 1034621 │ -│ c++ │ 806202 │ -│ css │ 803755 │ -└────────────┴─────────┘ - -10 rows in set. Elapsed: 1.013 sec. Processed 59.82 million rows, 1.21 GB (59.07 million rows/s., 1.19 GB/s.) -Peak memory usage: 224.03 MiB. -``` - -### 最も回答数が多いユーザー (アクティブアカウント) {#user-with-the-most-answers-active-accounts} - -アカウントには`UserId`が必要です。 - -```sql -SELECT - any(OwnerUserId) UserId, - OwnerDisplayName, - count() AS c -FROM stackoverflow.posts WHERE OwnerDisplayName != '' AND PostTypeId='Answer' AND OwnerUserId != 0 -GROUP BY OwnerDisplayName -ORDER BY c DESC -LIMIT 5 - -┌─UserId─┬─OwnerDisplayName─┬────c─┐ -│ 22656 │ Jon Skeet │ 2727 │ -│ 23354 │ Marc Gravell │ 2150 │ -│ 12950 │ tvanfosson │ 1530 │ -│ 3043 │ Joel Coehoorn │ 1438 │ -│ 10661 │ S.Lott │ 1087 │ -└────────┴──────────────────┴──────┘ - -5 rows in set. Elapsed: 0.154 sec. Processed 35.83 million rows, 193.39 MB (232.33 million rows/s., 1.25 GB/s.) -Peak memory usage: 206.45 MiB. -``` - -### ClickHouse関連の投稿で最も閲覧数が多いもの {#clickhouse-related-posts-with-the-most-views} - -```sql -SELECT - Id, - Title, - ViewCount, - AnswerCount -FROM stackoverflow.posts -WHERE Title ILIKE '%ClickHouse%' -ORDER BY ViewCount DESC -LIMIT 10 - -┌───────Id─┬─Title────────────────────────────────────────────────────────────────────────────┬─ViewCount─┬─AnswerCount─┐ -│ 52355143 │ ClickHouseテーブルから古いレコードを削除することは可能ですか? │ 41462 │ 3 │ -│ 37954203 │ Clickhouseデータインポート │ 38735 │ 3 │ -│ 37901642 │ Clickhouseでデータを更新する │ 36236 │ 6 │ -│ 58422110 │ Pandas: Clickhouseにデータフレームを挿入する方法 │ 29731 │ 4 │ -│ 63621318 │ DBeaver - Clickhouse - SQLエラー [159] .. 読み取りタイムアウト │ 27350 │ 1 │ -│ 47591813 │ Clickhouseテーブルの配列カラムの内容でフィルターをかける方法 │ 27078 │ 2 │ -│ 58728436 │ Clickhouseデータベースでクエリにおいてケースインセンシティブで文字列を検索する方法 │ 26567 │ 3 │ -│ 65316905 │ Clickhouse: DB::Exception: メモリ制限 (クエリ用) を超えました │ 24899 │ 2 │ -│ 49944865 │ Clickhouseにカラムを追加する方法 │ 24424 │ 1 │ -│ 59712399 │ ClickHouseで日付文字列をDateTime形式に拡張パースする方法 │ 22620 │ 1 │ -└──────────┴──────────────────────────────────────────────────────────────────────────────────┴───────────┴─────────────┘ - -10 rows in set. Elapsed: 0.472 sec. Processed 59.82 million rows, 1.91 GB (126.63 million rows/s., 4.03 GB/s.) -Peak memory usage: 240.01 MiB。 -``` - -### 最も物議を醸した投稿 {#most-controversial-posts} - -```sql -SELECT - Id, - Title, - UpVotes, - DownVotes, - abs(UpVotes - DownVotes) AS Controversial_ratio -FROM stackoverflow.posts -INNER JOIN -( - SELECT - PostId, - countIf(VoteTypeId = 2) AS UpVotes, - countIf(VoteTypeId = 3) AS DownVotes - FROM stackoverflow.votes - GROUP BY PostId - HAVING (UpVotes > 10) AND (DownVotes > 10) -) AS votes ON posts.Id = votes.PostId -WHERE Title != '' -ORDER BY Controversial_ratio ASC -LIMIT 3 - -┌───────Id─┬─Title─────────────────────────────────────────────┬─UpVotes─┬─DownVotes─┬─Controversial_ratio─┐ -│ 583177 │ VB.NET無限フォーループ │ 12 │ 12 │ 0 │ -│ 9756797 │ コンソール入力を列挙可能として読み込む - 1文で? │ 16 │ 16 │ 0 │ -│ 13329132 │ RubyのARGVの目的は何ですか? │ 22 │ 22 │ 0 │ -└──────────┴───────────────────────────────────────────────────┴─────────┴───────────┴─────────────────────┘ - -3 rows in set. Elapsed: 4.779 sec. Processed 298.80 million rows, 3.16 GB (62.52 million rows/s., 661.05 MB/s.) -Peak memory usage: 6.05 GiB. -``` - -## 著作権表示 {#attribution} - -Stack Overflowが提供しているこのデータに感謝し、`cc-by-sa 4.0`ライセンスの下でその努力と元データの出所である[https://archive.org/details/stackexchange](https://archive.org/details/stackexchange)を認識します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md.hash deleted file mode 100644 index c190abd986a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/stackoverflow.md.hash +++ /dev/null @@ -1 +0,0 @@ -35c21b12cf68d5ee diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md deleted file mode 100644 index 2aa4d4455d8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md +++ /dev/null @@ -1,771 +0,0 @@ ---- -description: 'The Star Schema Benchmark (SSB) data set and queries' -sidebar_label: 'Star Schema Benchmark' -slug: '/getting-started/example-datasets/star-schema' -title: 'Star Schema Benchmark (SSB, 2009)' ---- - - - -The Star Schema Benchmark is roughly based on the [TPC-H](tpch.md)'s tables and queries but unlike TPC-H, it uses a star schema layout. -The bulk of the data sits in a gigantic fact table which is surrounded by multiple small dimension tables. -The queries joined the fact table with one or more dimension tables to apply filter criteria, e.g. `MONTH = 'JANUARY'`. - -References: -- [Star Schema Benchmark](https://cs.umb.edu/~poneil/StarSchemaB.pdf) (O'Neil et. al), 2009 -- [Variations of the Star Schema Benchmark to Test the Effects of Data Skew on Query Performance](https://doi.org/10.1145/2479871.2479927) (Rabl. et. al.), 2013 - -First, checkout the star schema benchmark repository and compile the data generator: - -```bash -git clone https://github.com/vadimtk/ssb-dbgen.git -cd ssb-dbgen -make -``` - -Then, generate the data. Parameter `-s` specifies the scale factor. For example, with `-s 100`, 600 million rows are generated. - -```bash -./dbgen -s 1000 -T c -./dbgen -s 1000 -T l -./dbgen -s 1000 -T p -./dbgen -s 1000 -T s -./dbgen -s 1000 -T d -``` - -Now create tables in ClickHouse: - -```sql -CREATE TABLE customer -( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) -) -ENGINE = MergeTree ORDER BY (C_CUSTKEY); - -CREATE TABLE lineorder -( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) -) -ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); - -CREATE TABLE part -( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) -) -ENGINE = MergeTree ORDER BY P_PARTKEY; - -CREATE TABLE supplier -( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String -) -ENGINE = MergeTree ORDER BY S_SUPPKEY; - -CREATE TABLE date -( - D_DATEKEY Date, - D_DATE FixedString(18), - D_DAYOFWEEK LowCardinality(String), - D_MONTH LowCardinality(String), - D_YEAR UInt16, - D_YEARMONTHNUM UInt32, - D_YEARMONTH LowCardinality(FixedString(7)), - D_DAYNUMINWEEK UInt8, - D_DAYNUMINMONTH UInt8, - D_DAYNUMINYEAR UInt16, - D_MONTHNUMINYEAR UInt8, - D_WEEKNUMINYEAR UInt8, - D_SELLINGSEASON String, - D_LASTDAYINWEEKFL UInt8, - D_LASTDAYINMONTHFL UInt8, - D_HOLIDAYFL UInt8, - D_WEEKDAYFL UInt8 -) -ENGINE = MergeTree ORDER BY D_DATEKEY; -``` - -The data can be imported as follows: - -```bash -clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl -clickhouse-client --query "INSERT INTO part FORMAT CSV" < part.tbl -clickhouse-client --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl -clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" < lineorder.tbl -clickhouse-client --query "INSERT INTO date FORMAT CSV" < date.tbl -``` - -In many use cases of ClickHouse, multiple tables are converted into a single denormalized flat table. -This step is optional, below queries are listed in their original form and in a format rewritten for the denormalized table. - -```sql -SET max_memory_usage = 20000000000; - -CREATE TABLE lineorder_flat -ENGINE = MergeTree ORDER BY (LO_ORDERDATE, LO_ORDERKEY) -AS SELECT - l.LO_ORDERKEY AS LO_ORDERKEY, - l.LO_LINENUMBER AS LO_LINENUMBER, - l.LO_CUSTKEY AS LO_CUSTKEY, - l.LO_PARTKEY AS LO_PARTKEY, - l.LO_SUPPKEY AS LO_SUPPKEY, - l.LO_ORDERDATE AS LO_ORDERDATE, - l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, - l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, - l.LO_QUANTITY AS LO_QUANTITY, - l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, - l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, - l.LO_DISCOUNT AS LO_DISCOUNT, - l.LO_REVENUE AS LO_REVENUE, - l.LO_SUPPLYCOST AS LO_SUPPLYCOST, - l.LO_TAX AS LO_TAX, - l.LO_COMMITDATE AS LO_COMMITDATE, - l.LO_SHIPMODE AS LO_SHIPMODE, - c.C_NAME AS C_NAME, - c.C_ADDRESS AS C_ADDRESS, - c.C_CITY AS C_CITY, - c.C_NATION AS C_NATION, - c.C_REGION AS C_REGION, - c.C_PHONE AS C_PHONE, - c.C_MKTSEGMENT AS C_MKTSEGMENT, - s.S_NAME AS S_NAME, - s.S_ADDRESS AS S_ADDRESS, - s.S_CITY AS S_CITY, - s.S_NATION AS S_NATION, - s.S_REGION AS S_REGION, - s.S_PHONE AS S_PHONE, - p.P_NAME AS P_NAME, - p.P_MFGR AS P_MFGR, - p.P_CATEGORY AS P_CATEGORY, - p.P_BRAND AS P_BRAND, - p.P_COLOR AS P_COLOR, - p.P_TYPE AS P_TYPE, - p.P_SIZE AS P_SIZE, - p.P_CONTAINER AS P_CONTAINER -FROM lineorder AS l -INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY -INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY -INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; -``` - -The queries are generated by `./qgen -s `. Example queries for `s = 100`: - -Q1.1 - -```sql -SELECT - sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS REVENUE -FROM - lineorder, - date -WHERE - LO_ORDERDATE = D_DATEKEY - AND D_YEAR = 1993 - AND LO_DISCOUNT BETWEEN 1 AND 3 - AND LO_QUANTITY < 25; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM - lineorder_flat -WHERE - toYear(LO_ORDERDATE) = 1993 - AND LO_DISCOUNT BETWEEN 1 AND 3 - AND LO_QUANTITY < 25; -``` - -Q1.2 - -```sql -SELECT - sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS REVENUE -FROM - lineorder, - date -WHERE - LO_ORDERDATE = D_DATEKEY - AND D_YEARMONTHNUM = 199401 - AND LO_DISCOUNT BETWEEN 4 AND 6 - AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM - lineorder_flat -WHERE - toYYYYMM(LO_ORDERDATE) = 199401 - AND LO_DISCOUNT BETWEEN 4 AND 6 - AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Q1.3 - -```sql -SELECT - sum(LO_EXTENDEDPRICE*LO_DISCOUNT) AS REVENUE -FROM - lineorder, - date -WHERE - LO_ORDERDATE = D_DATEKEY - AND D_WEEKNUMINYEAR = 6 - AND D_YEAR = 1994 - AND LO_DISCOUNT BETWEEN 5 AND 7 - AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM - lineorder_flat -WHERE - toISOWeek(LO_ORDERDATE) = 6 - AND toYear(LO_ORDERDATE) = 1994 - AND LO_DISCOUNT BETWEEN 5 AND 7 - AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Q2.1 - -```sql -SELECT - sum(LO_REVENUE), - D_YEAR, - P_BRAND -FROM - lineorder, - date, - part, - supplier -WHERE - LO_ORDERDATE = D_DATEKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND P_CATEGORY = 'MFGR#12' - AND S_REGION = 'AMERICA' -GROUP BY - D_YEAR, - P_BRAND -ORDER BY - D_YEAR, - P_BRAND; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE - P_CATEGORY = 'MFGR#12' - AND S_REGION = 'AMERICA' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q2.2 - -```sql -SELECT - sum(LO_REVENUE), - D_YEAR, - P_BRAND -FROM - lineorder, - date, - part, - supplier -WHERE - LO_ORDERDATE = D_DATEKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND P_BRAND BETWEEN - 'MFGR#2221' AND 'MFGR#2228' - AND S_REGION = 'ASIA' -GROUP BY - D_YEAR, - P_BRAND -ORDER BY - D_YEAR, - P_BRAND; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q2.3 - -```sql -SELECT - sum(LO_REVENUE), - D_YEAR, - P_BRAND -FROM - lineorder, - date, - part, - supplier -WHERE - LO_ORDERDATE = D_DATEKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND P_BRAND = 'MFGR#2221' - AND S_REGION = 'EUROPE' -GROUP BY - D_YEAR, - P_BRAND -ORDER BY - D_YEAR, - P_BRAND; -``` - -Denormalized table: - -```sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q3.1 - -```sql -SELECT - C_NATION, - S_NATION, - D_YEAR, - sum(LO_REVENUE) AS REVENUE -FROM - customer, - lineorder, - supplier, - date -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_ORDERDATE = D_DATEKEY - AND C_REGION = 'ASIA' AND S_REGION = 'ASIA' - AND D_YEAR >= 1992 AND D_YEAR <= 1997 -GROUP BY - C_NATION, - S_NATION, - D_YEAR -ORDER BY - D_YEAR ASC, - REVENUE DESC; -``` - -Denormalized table: - -```sql -SELECT - C_NATION, - S_NATION, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE - C_REGION = 'ASIA' - AND S_REGION = 'ASIA' - AND year >= 1992 - AND year <= 1997 -GROUP BY - C_NATION, - S_NATION, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.2 - -```sql -SELECT - C_CITY, - S_CITY, - D_YEAR, - sum(LO_REVENUE) AS REVENUE -FROM - customer, - lineorder, - supplier, - date -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_ORDERDATE = D_DATEKEY - AND C_NATION = 'UNITED STATES' - AND S_NATION = 'UNITED STATES' - AND D_YEAR >= 1992 AND D_YEAR <= 1997 -GROUP BY - C_CITY, - S_CITY, - D_YEAR -ORDER BY - D_YEAR ASC, - REVENUE DESC; -``` - -Denormalized table: - -```sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE - C_NATION = 'UNITED STATES' - AND S_NATION = 'UNITED STATES' - AND year >= 1992 - AND year <= 1997 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.3 - -```sql -SELECT - C_CITY, - S_CITY, - D_YEAR, - sum(LO_REVENUE) AS revenue -FROM - customer, - lineorder, - supplier, - date -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_ORDERDATE = D_DATEKEY - AND (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') - AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') - AND D_YEAR >= 1992 - AND D_YEAR <= 1997 -GROUP BY - C_CITY, - S_CITY, - D_YEAR -ORDER BY - D_YEAR ASC, - revenue DESC; -``` - -Denormalized table: - -```sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE - (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') - AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') - AND year >= 1992 - AND year <= 1997 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.4 - -```sql -SELECT - C_CITY, - S_CITY, - D_YEAR, - sum(LO_REVENUE) AS revenue -FROM - customer, - lineorder, - supplier, - date -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_ORDERDATE = D_DATEKEY - AND (C_CITY='UNITED KI1' OR C_CITY='UNITED KI5') - AND (S_CITY='UNITED KI1' OR S_CITY='UNITED KI5') - AND D_YEARMONTH = 'Dec1997' -GROUP BY - C_CITY, - S_CITY, - D_YEAR -ORDER BY - D_YEAR ASC, - revenue DESC; -``` - -Denormalized table: - -```sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE - (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') - AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') - AND toYYYYMM(LO_ORDERDATE) = 199712 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q4.1 - -```sql -SELECT - D_YEAR, - C_NATION, - sum(LO_REVENUE - LO_SUPPLYCOST) AS PROFIT -FROM - date, - customer, - supplier, - part, - lineorder -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_ORDERDATE = D_DATEKEY - AND C_REGION = 'AMERICA' - AND S_REGION = 'AMERICA' - AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - D_YEAR, - C_NATION -ORDER BY - D_YEAR, - C_NATION -``` - -Denormalized table: - -```sql -SELECT - toYear(LO_ORDERDATE) AS year, - C_NATION, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM lineorder_flat -WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - year, - C_NATION -ORDER BY - year ASC, - C_NATION ASC; -``` - -Q4.2 - -```sql -SELECT - D_YEAR, - S_NATION, - P_CATEGORY, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM - date, - customer, - supplier, - part, - lineorder -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_ORDERDATE = D_DATEKEY - AND C_REGION = 'AMERICA' - AND S_REGION = 'AMERICA' - AND (D_YEAR = 1997 OR D_YEAR = 1998) - AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - D_YEAR, - S_NATION, - P_CATEGORY -ORDER BY - D_YEAR, - S_NATION, - P_CATEGORY -``` - -Denormalized table: - -```sql -SELECT - toYear(LO_ORDERDATE) AS year, - S_NATION, - P_CATEGORY, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM lineorder_flat -WHERE - C_REGION = 'AMERICA' - AND S_REGION = 'AMERICA' - AND (year = 1997 OR year = 1998) - AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - year, - S_NATION, - P_CATEGORY -ORDER BY - year ASC, - S_NATION ASC, - P_CATEGORY ASC; -``` - -Q4.3 - -```sql -SELECT - D_YEAR, - S_CITY, - P_BRAND, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM - date, - customer, - supplier, - part, - lineorder -WHERE - LO_CUSTKEY = C_CUSTKEY - AND LO_SUPPKEY = S_SUPPKEY - AND LO_PARTKEY = P_PARTKEY - AND LO_ORDERDATE = D_DATEKEY - AND C_REGION = 'AMERICA' - AND S_NATION = 'UNITED STATES' - AND (D_YEAR = 1997 OR D_YEAR = 1998) - AND P_CATEGORY = 'MFGR#14' -GROUP BY - D_YEAR, - S_CITY, - P_BRAND -ORDER BY - D_YEAR, - S_CITY, - P_BRAND -``` - -Denormalized table: - -```sql -SELECT - toYear(LO_ORDERDATE) AS year, - S_CITY, - P_BRAND, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM - lineorder_flat -WHERE - S_NATION = 'UNITED STATES' - AND (year = 1997 OR year = 1998) - AND P_CATEGORY = 'MFGR#14' -GROUP BY - year, - S_CITY, - P_BRAND -ORDER BY - year ASC, - S_CITY ASC, - P_BRAND ASC; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md.hash deleted file mode 100644 index 654eebe1dd5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/star-schema.md.hash +++ /dev/null @@ -1 +0,0 @@ -ae628edb1a80b92b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md deleted file mode 100644 index 4d71bfc0bdf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md +++ /dev/null @@ -1,601 +0,0 @@ ---- -description: 'The TPC-DS benchmark data set and queries.' -sidebar_label: 'TPC-DS' -slug: '/getting-started/example-datasets/tpcds' -title: 'TPC-DS (2012)' ---- - - - -以下は、指定されたテキストの日本語訳です。 - ---- - -[Star Schema Benchmark (SSB)](star-schema.md)と同様に、TPC-DSは[TPC-H](tpch.md)に基づいていますが、逆のアプローチを取り、つまり複雑なスノーフレークスキーマにデータを保存することによって、必要な結合の数を8から24に拡張しました。 -データ分布は歪んでいます(たとえば、正規分布とポアソン分布)。 -ランダムな置き換えを含む99のレポーティングおよびアドホッククエリを含みます。 - -### 参考文献 -- [TPC-DSの制作](https://dl.acm.org/doi/10.5555/1182635.1164217) (Nambiar)、2006 - -最初に、TPC-DSリポジトリをチェックアウトし、データ生成器をコンパイルします: - -```bash -git clone https://github.com/gregrahn/tpcds-kit.git -cd tpcds-kit/tools -make -``` - -次に、データを生成します。パラメーター`-scale`はスケールファクターを指定します。 - -```bash -./dsdgen -scale 1 -``` - -次に、クエリを生成します(同じスケールファクターを使用): - -```bash -./dsqgen -DIRECTORY ../query_templates/ -INPUT ../query_templates/templates.lst -SCALE 1 # out/query_0.sqlに99クエリを生成 -``` - -次に、ClickHouseでテーブルを作成します。 -元のテーブル定義(tools/tpcds.sql)を使用するか、適切に定義された主キーインデックスとLowCardinality型カラム定義を備えた「調節された」テーブル定義を使用することができます。 - -```sql -CREATE TABLE call_center( - cc_call_center_sk Int64, - cc_call_center_id LowCardinality(String), - cc_rec_start_date Nullable(Date), - cc_rec_end_date Nullable(Date), - cc_closed_date_sk Nullable(UInt32), - cc_open_date_sk Nullable(UInt32), - cc_name LowCardinality(String), - cc_class LowCardinality(String), - cc_employees Int32, - cc_sq_ft Int32, - cc_hours LowCardinality(String), - cc_manager LowCardinality(String), - cc_mkt_id Int32, - cc_mkt_class LowCardinality(String), - cc_mkt_desc LowCardinality(String), - cc_market_manager LowCardinality(String), - cc_division Int32, - cc_division_name LowCardinality(String), - cc_company Int32, - cc_company_name LowCardinality(String), - cc_street_number LowCardinality(String), - cc_street_name LowCardinality(String), - cc_street_type LowCardinality(String), - cc_suite_number LowCardinality(String), - cc_city LowCardinality(String), - cc_county LowCardinality(String), - cc_state LowCardinality(String), - cc_zip LowCardinality(String), - cc_country LowCardinality(String), - cc_gmt_offset Decimal(7,2), - cc_tax_percentage Decimal(7,2), - PRIMARY KEY (cc_call_center_sk) -); - -CREATE TABLE catalog_page( - cp_catalog_page_sk Int64, - cp_catalog_page_id LowCardinality(String), - cp_start_date_sk Nullable(UInt32), - cp_end_date_sk Nullable(UInt32), - cp_department LowCardinality(Nullable(String)), - cp_catalog_number Nullable(Int32), - cp_catalog_page_number Nullable(Int32), - cp_description LowCardinality(Nullable(String)), - cp_type LowCardinality(Nullable(String)), - PRIMARY KEY (cp_catalog_page_sk) -); - -CREATE TABLE catalog_returns( - cr_returned_date_sk Int32, - cr_returned_time_sk Int64, - cr_item_sk Int64, - cr_refunded_customer_sk Nullable(Int64), - cr_refunded_cdemo_sk Nullable(Int64), - cr_refunded_hdemo_sk Nullable(Int64), - cr_refunded_addr_sk Nullable(Int64), - cr_returning_customer_sk Nullable(Int64), - cr_returning_cdemo_sk Nullable(Int64), - cr_returning_hdemo_sk Nullable(Int64), - cr_returning_addr_sk Nullable(Int64), - cr_call_center_sk Nullable(Int64), - cr_catalog_page_sk Nullable(Int64), - cr_ship_mode_sk Nullable(Int64), - cr_warehouse_sk Nullable(Int64), - cr_reason_sk Nullable(Int64), - cr_order_number Int64, - cr_return_quantity Nullable(Int32), - cr_return_amount Nullable(Decimal(7,2)), - cr_return_tax Nullable(Decimal(7,2)), - cr_return_amt_inc_tax Nullable(Decimal(7,2)), - cr_fee Nullable(Decimal(7,2)), - cr_return_ship_cost Nullable(Decimal(7,2)), - cr_refunded_cash Nullable(Decimal(7,2)), - cr_reversed_charge Nullable(Decimal(7,2)), - cr_store_credit Nullable(Decimal(7,2)), - cr_net_loss Nullable(Decimal(7,2)), - PRIMARY KEY (cr_item_sk, cr_order_number) -); - -CREATE TABLE catalog_sales ( - cs_sold_date_sk Nullable(UInt32), - cs_sold_time_sk Nullable(Int64), - cs_ship_date_sk Nullable(UInt32), - cs_bill_customer_sk Nullable(Int64), - cs_bill_cdemo_sk Nullable(Int64), - cs_bill_hdemo_sk Nullable(Int64), - cs_bill_addr_sk Nullable(Int64), - cs_ship_customer_sk Nullable(Int64), - cs_ship_cdemo_sk Nullable(Int64), - cs_ship_hdemo_sk Nullable(Int64), - cs_ship_addr_sk Nullable(Int64), - cs_call_center_sk Nullable(Int64), - cs_catalog_page_sk Nullable(Int64), - cs_ship_mode_sk Nullable(Int64), - cs_warehouse_sk Nullable(Int64), - cs_item_sk Int64, - cs_promo_sk Nullable(Int64), - cs_order_number Int64, - cs_quantity Nullable(Int32), - cs_wholesale_cost Nullable(Decimal(7,2)), - cs_list_price Nullable(Decimal(7,2)), - cs_sales_price Nullable(Decimal(7,2)), - cs_ext_discount_amt Nullable(Decimal(7,2)), - cs_ext_sales_price Nullable(Decimal(7,2)), - cs_ext_wholesale_cost Nullable(Decimal(7,2)), - cs_ext_list_price Nullable(Decimal(7,2)), - cs_ext_tax Nullable(Decimal(7,2)), - cs_coupon_amt Nullable(Decimal(7,2)), - cs_ext_ship_cost Nullable(Decimal(7,2)), - cs_net_paid Nullable(Decimal(7,2)), - cs_net_paid_inc_tax Nullable(Decimal(7,2)), - cs_net_paid_inc_ship Nullable(Decimal(7,2)), - cs_net_paid_inc_ship_tax Nullable(Decimal(7,2)), - cs_net_profit Decimal(7,2), - PRIMARY KEY (cs_item_sk, cs_order_number) -); - -CREATE TABLE customer_address ( - ca_address_sk Int64, - ca_address_id LowCardinality(String), - ca_street_number LowCardinality(Nullable(String)), - ca_street_name LowCardinality(Nullable(String)), - ca_street_type LowCardinality(Nullable(String)), - ca_suite_number LowCardinality(Nullable(String)), - ca_city LowCardinality(Nullable(String)), - ca_county LowCardinality(Nullable(String)), - ca_state LowCardinality(Nullable(String)), - ca_zip LowCardinality(Nullable(String)), - ca_country LowCardinality(Nullable(String)), - ca_gmt_offset Nullable(Decimal(7,2)), - ca_location_type LowCardinality(Nullable(String)), - PRIMARY KEY (ca_address_sk) -); - -CREATE TABLE customer_demographics ( - cd_demo_sk Int64, - cd_gender LowCardinality(String), - cd_marital_status LowCardinality(String), - cd_education_status LowCardinality(String), - cd_purchase_estimate Int32, - cd_credit_rating LowCardinality(String), - cd_dep_count Int32, - cd_dep_employed_count Int32, - cd_dep_college_count Int32, - PRIMARY KEY (cd_demo_sk) -); - -CREATE TABLE customer ( - c_customer_sk Int64, - c_customer_id LowCardinality(String), - c_current_cdemo_sk Nullable(Int64), - c_current_hdemo_sk Nullable(Int64), - c_current_addr_sk Nullable(Int64), - c_first_shipto_date_sk Nullable(UInt32), - c_first_sales_date_sk Nullable(UInt32), - c_salutation LowCardinality(Nullable(String)), - c_first_name LowCardinality(Nullable(String)), - c_last_name LowCardinality(Nullable(String)), - c_preferred_cust_flag LowCardinality(Nullable(String)), - c_birth_day Nullable(Int32), - c_birth_month Nullable(Int32), - c_birth_year Nullable(Int32), - c_birth_country LowCardinality(Nullable(String)), - c_login LowCardinality(Nullable(String)), - c_email_address LowCardinality(Nullable(String)), - c_last_review_date LowCardinality(Nullable(String)), - PRIMARY KEY (c_customer_sk) -); - -CREATE TABLE date_dim ( - d_date_sk UInt32, - d_date_id LowCardinality(String), - d_date Date, - d_month_seq UInt16, - d_week_seq UInt16, - d_quarter_seq UInt16, - d_year UInt16, - d_dow UInt16, - d_moy UInt16, - d_dom UInt16, - d_qoy UInt16, - d_fy_year UInt16, - d_fy_quarter_seq UInt16, - d_fy_week_seq UInt16, - d_day_name LowCardinality(String), - d_quarter_name LowCardinality(String), - d_holiday LowCardinality(String), - d_weekend LowCardinality(String), - d_following_holiday LowCardinality(String), - d_first_dom Int32, - d_last_dom Int32, - d_same_day_ly Int32, - d_same_day_lq Int32, - d_current_day LowCardinality(String), - d_current_week LowCardinality(String), - d_current_month LowCardinality(String), - d_current_quarter LowCardinality(String), - d_current_year LowCardinality(String), - PRIMARY KEY (d_date_sk) -); - -CREATE TABLE household_demographics ( - hd_demo_sk Int64, - hd_income_band_sk Int64, - hd_buy_potential LowCardinality(String), - hd_dep_count Int32, - hd_vehicle_count Int32, - PRIMARY KEY (hd_demo_sk) -); - -CREATE TABLE income_band( - ib_income_band_sk Int64, - ib_lower_bound Int32, - ib_upper_bound Int32, - PRIMARY KEY (ib_income_band_sk), -); - -CREATE TABLE inventory ( - inv_date_sk UInt32, - inv_item_sk Int64, - inv_warehouse_sk Int64, - inv_quantity_on_hand Nullable(Int32) - PRIMARY KEY (inv_date_sk, inv_item_sk, inv_warehouse_sk), -); - -CREATE TABLE item ( - i_item_sk Int64, - i_item_id LowCardinality(String), - i_rec_start_date LowCardinality(Nullable(String)), - i_rec_end_date LowCardinality(Nullable(String)), - i_item_desc LowCardinality(Nullable(String)), - i_current_price Nullable(Decimal(7,2)), - i_wholesale_cost Nullable(Decimal(7,2)), - i_brand_id Nullable(Int32), - i_brand LowCardinality(Nullable(String)), - i_class_id Nullable(Int32), - i_class LowCardinality(Nullable(String)), - i_category_id Nullable(Int32), - i_category LowCardinality(Nullable(String)), - i_manufact_id Nullable(Int32), - i_manufact LowCardinality(Nullable(String)), - i_size LowCardinality(Nullable(String)), - i_formulation LowCardinality(Nullable(String)), - i_color LowCardinality(Nullable(String)), - i_units LowCardinality(Nullable(String)), - i_container LowCardinality(Nullable(String)), - i_manager_id Nullable(Int32), - i_product_name LowCardinality(Nullable(String)), - PRIMARY KEY (i_item_sk) -); - -CREATE TABLE promotion ( - p_promo_sk Int64, - p_promo_id LowCardinality(String), - p_start_date_sk Nullable(UInt32), - p_end_date_sk Nullable(UInt32), - p_item_sk Nullable(Int64), - p_cost Nullable(Decimal(15,2)), - p_response_target Nullable(Int32), - p_promo_name LowCardinality(Nullable(String)), - p_channel_dmail LowCardinality(Nullable(String)), - p_channel_email LowCardinality(Nullable(String)), - p_channel_catalog LowCardinality(Nullable(String)), - p_channel_tv LowCardinality(Nullable(String)), - p_channel_radio LowCardinality(Nullable(String)), - p_channel_press LowCardinality(Nullable(String)), - p_channel_event LowCardinality(Nullable(String)), - p_channel_demo LowCardinality(Nullable(String)), - p_channel_details LowCardinality(Nullable(String)), - p_purpose LowCardinality(Nullable(String)), - p_discount_active LowCardinality(Nullable(String)), - PRIMARY KEY (p_promo_sk) -); - -CREATE TABLE reason( - r_reason_sk Int64, - r_reason_id LowCardinality(String), - r_reason_desc LowCardinality(String), - PRIMARY KEY (r_reason_sk) -); - -CREATE TABLE ship_mode( - sm_ship_mode_sk Int64, - sm_ship_mode_id LowCardinality(String), - sm_type LowCardinality(String), - sm_code LowCardinality(String), - sm_carrier LowCardinality(String), - sm_contract LowCardinality(String), - PRIMARY KEY (sm_ship_mode_sk) -); - -CREATE TABLE store_returns ( - sr_returned_date_sk Nullable(UInt32), - sr_return_time_sk Nullable(Int64), - sr_item_sk Int64, - sr_customer_sk Nullable(Int64), - sr_cdemo_sk Nullable(Int64), - sr_hdemo_sk Nullable(Int64), - sr_addr_sk Nullable(Int64), - sr_store_sk Nullable(Int64), - sr_reason_sk Nullable(Int64), - sr_ticket_number Int64, - sr_return_quantity Nullable(Int32), - sr_return_amt Nullable(Decimal(7,2)), - sr_return_tax Nullable(Decimal(7,2)), - sr_return_amt_inc_tax Nullable(Decimal(7,2)), - sr_fee Nullable(Decimal(7,2)), - sr_return_ship_cost Nullable(Decimal(7,2)), - sr_refunded_cash Nullable(Decimal(7,2)), - sr_reversed_charge Nullable(Decimal(7,2)), - sr_store_credit Nullable(Decimal(7,2)), - sr_net_loss Nullable(Decimal(7,2)), - PRIMARY KEY (sr_item_sk, sr_ticket_number) -); - -CREATE TABLE store_sales ( - ss_sold_date_sk Nullable(UInt32), - ss_sold_time_sk Nullable(Int64), - ss_item_sk Int64, - ss_customer_sk Nullable(Int64), - ss_cdemo_sk Nullable(Int64), - ss_hdemo_sk Nullable(Int64), - ss_addr_sk Nullable(Int64), - ss_store_sk Nullable(Int64), - ss_promo_sk Nullable(Int64), - ss_ticket_number Int64, - ss_quantity Nullable(Int32), - ss_wholesale_cost Nullable(Decimal(7,2)), - ss_list_price Nullable(Decimal(7,2)), - ss_sales_price Nullable(Decimal(7,2)), - ss_ext_discount_amt Nullable(Decimal(7,2)), - ss_ext_sales_price Nullable(Decimal(7,2)), - ss_ext_wholesale_cost Nullable(Decimal(7,2)), - ss_ext_list_price Nullable(Decimal(7,2)), - ss_ext_tax Nullable(Decimal(7,2)), - ss_coupon_amt Nullable(Decimal(7,2)), - ss_net_paid Nullable(Decimal(7,2)), - ss_net_paid_inc_tax Nullable(Decimal(7,2)), - ss_net_profit Nullable(Decimal(7,2)), - PRIMARY KEY (ss_item_sk, ss_ticket_number) -); - -CREATE TABLE store ( - s_store_sk Int64, - s_store_id LowCardinality(String), - s_rec_start_date LowCardinality(Nullable(String)), - s_rec_end_date LowCardinality(Nullable(String)), - s_closed_date_sk Nullable(UInt32), - s_store_name LowCardinality(Nullable(String)), - s_number_employees Nullable(Int32), - s_floor_space Nullable(Int32), - s_hours LowCardinality(Nullable(String)), - s_manager LowCardinality(Nullable(String)), - s_market_id Nullable(Int32), - s_geography_class LowCardinality(Nullable(String)), - s_market_desc LowCardinality(Nullable(String)), - s_market_manager LowCardinality(Nullable(String)), - s_division_id Nullable(Int32), - s_division_name LowCardinality(Nullable(String)), - s_company_id Nullable(Int32), - s_company_name LowCardinality(Nullable(String)), - s_street_number LowCardinality(Nullable(String)), - s_street_name LowCardinality(Nullable(String)), - s_street_type LowCardinality(Nullable(String)), - s_suite_number LowCardinality(Nullable(String)), - s_city LowCardinality(Nullable(String)), - s_county LowCardinality(Nullable(String)), - s_state LowCardinality(Nullable(String)), - s_zip LowCardinality(Nullable(String)), - s_country LowCardinality(Nullable(String)), - s_gmt_offset Nullable(Decimal(7,2)), - s_tax_precentage Nullable(Decimal(7,2)), - PRIMARY KEY (s_store_sk) -); - -CREATE TABLE time_dim ( - t_time_sk UInt32, - t_time_id LowCardinality(String), - t_time UInt32, - t_hour UInt8, - t_minute UInt8, - t_second UInt8, - t_am_pm LowCardinality(String), - t_shift LowCardinality(String), - t_sub_shift LowCardinality(String), - t_meal_time LowCardinality(Nullable(String)), - PRIMARY KEY (t_time_sk) -); - -CREATE TABLE warehouse( - w_warehouse_sk Int64, - w_warehouse_id LowCardinality(String), - w_warehouse_name LowCardinality(Nullable(String)), - w_warehouse_sq_ft Nullable(Int32), - w_street_number LowCardinality(Nullable(String)), - w_street_name LowCardinality(Nullable(String)), - w_street_type LowCardinality(Nullable(String)), - w_suite_number LowCardinality(Nullable(String)), - w_city LowCardinality(Nullable(String)), - w_county LowCardinality(Nullable(String)), - w_state LowCardinality(Nullable(String)), - w_zip LowCardinality(Nullable(String)), - w_country LowCardinality(Nullable(String)), - w_gmt_offset Decimal(7,2), - PRIMARY KEY (w_warehouse_sk) -); - -CREATE TABLE web_page( - wp_web_page_sk Int64, - wp_web_page_id LowCardinality(String), - wp_rec_start_date LowCardinality(Nullable(String)), - wp_rec_end_date LowCardinality(Nullable(String)), - wp_creation_date_sk Nullable(UInt32), - wp_access_date_sk Nullable(UInt32), - wp_autogen_flag LowCardinality(Nullable(String)), - wp_customer_sk Nullable(Int64), - wp_url LowCardinality(Nullable(String)), - wp_type LowCardinality(Nullable(String)), - wp_char_count Nullable(Int32), - wp_link_count Nullable(Int32), - wp_image_count Nullable(Int32), - wp_max_ad_count Nullable(Int32), - PRIMARY KEY (wp_web_page_sk) -); - -CREATE TABLE web_returns ( - wr_returned_date_sk Nullable(UInt32), - wr_returned_time_sk Nullable(Int64), - wr_item_sk Int64, - wr_refunded_customer_sk Nullable(Int64), - wr_refunded_cdemo_sk Nullable(Int64), - wr_refunded_hdemo_sk Nullable(Int64), - wr_refunded_addr_sk Nullable(Int64), - wr_returning_customer_sk Nullable(Int64), - wr_returning_cdemo_sk Nullable(Int64), - wr_returning_hdemo_sk Nullable(Int64), - wr_returning_addr_sk Nullable(Int64), - wr_web_page_sk Nullable(Int64), - wr_reason_sk Nullable(Int64), - wr_order_number Int64, - wr_return_quantity Nullable(Int32), - wr_return_amt Nullable(Decimal(7,2)), - wr_return_tax Nullable(Decimal(7,2)), - wr_return_amt_inc_tax Nullable(Decimal(7,2)), - wr_fee Nullable(Decimal(7,2)), - wr_return_ship_cost Nullable(Decimal(7,2)), - wr_refunded_cash Nullable(Decimal(7,2)), - wr_reversed_charge Nullable(Decimal(7,2)), - wr_account_credit Nullable(Decimal(7,2)), - wr_net_loss Nullable(Decimal(7,2)), - PRIMARY KEY (wr_item_sk, wr_order_number) -); - -CREATE TABLE web_sales ( - ws_sold_date_sk Nullable(UInt32), - ws_sold_time_sk Nullable(Int64), - ws_ship_date_sk Nullable(UInt32), - ws_item_sk Int64, - ws_bill_customer_sk Nullable(Int64), - ws_bill_cdemo_sk Nullable(Int64), - ws_bill_hdemo_sk Nullable(Int64), - ws_bill_addr_sk Nullable(Int64), - ws_ship_customer_sk Nullable(Int64), - ws_ship_cdemo_sk Nullable(Int64), - ws_ship_hdemo_sk Nullable(Int64), - ws_ship_addr_sk Nullable(Int64), - ws_web_page_sk Nullable(Int64), - ws_web_site_sk Nullable(Int64), - ws_ship_mode_sk Nullable(Int64), - ws_warehouse_sk Nullable(Int64), - ws_promo_sk Nullable(Int64), - ws_order_number Int64, - ws_quantity Nullable(Int32), - ws_wholesale_cost Nullable(Decimal(7,2)), - ws_list_price Nullable(Decimal(7,2)), - ws_sales_price Nullable(Decimal(7,2)), - ws_ext_discount_amt Nullable(Decimal(7,2)), - ws_ext_sales_price Nullable(Decimal(7,2)), - ws_ext_wholesale_cost Nullable(Decimal(7,2)), - ws_ext_list_price Nullable(Decimal(7,2)), - ws_ext_tax Nullable(Decimal(7,2)), - ws_coupon_amt Nullable(Decimal(7,2)), - ws_ext_ship_cost Nullable(Decimal(7,2)), - ws_net_paid Nullable(Decimal(7,2)), - ws_net_paid_inc_tax Nullable(Decimal(7,2)), - ws_net_paid_inc_ship Decimal(7,2), - ws_net_paid_inc_ship_tax Decimal(7,2), - ws_net_profit Decimal(7,2), - PRIMARY KEY (ws_item_sk, ws_order_number) -); - -CREATE TABLE web_site ( - web_site_sk Int64, - web_site_id LowCardinality(String), - web_rec_start_date LowCardinality(String), - web_rec_end_date LowCardinality(Nullable(String)), - web_name LowCardinality(String), - web_open_date_sk UInt32, - web_close_date_sk Nullable(UInt32), - web_class LowCardinality(String), - web_manager LowCardinality(String), - web_mkt_id Int32, - web_mkt_class LowCardinality(String), - web_mkt_desc LowCardinality(String), - web_market_manager LowCardinality(String), - web_company_id Int32, - web_company_name LowCardinality(String), - web_street_number LowCardinality(String), - web_street_name LowCardinality(String), - web_street_type LowCardinality(String), - web_suite_number LowCardinality(String), - web_city LowCardinality(String), - web_county LowCardinality(String), - web_state LowCardinality(String), - web_zip LowCardinality(String), - web_country LowCardinality(String), - web_gmt_offset Decimal(7,2), - web_tax_percentage Decimal(7,2), - PRIMARY KEY (web_site_sk) -); -``` - -データは以下のようにインポートできます: - -```bash -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO call_center FORMAT CSV" < call_center.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO catalog_page FORMAT CSV" < catalog_page.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO catalog_returns FORMAT CSV" < catalog_returns.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO catalog_sales FORMAT CSV" < catalog_sales.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO customer FORMAT CSV" < customer.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO customer_address FORMAT CSV" < customer_address.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO customer_demographics FORMAT CSV" < customer_demographics.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO date_dim FORMAT CSV" < date_dim.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO household_demographics FORMAT CSV" < household_demographics.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO income_band FORMAT CSV" < income_band.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO inventory FORMAT CSV" < inventory.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO item FORMAT CSV" < item.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO promotion FORMAT CSV" < promotion.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO reason FORMAT CSV" < reason.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO ship_mode FORMAT CSV" < ship_mode.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO store FORMAT CSV" < store.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO store_returns FORMAT CSV" < store_returns.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO store_sales FORMAT CSV" < store_sales.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO time_dim FORMAT CSV" < time_dim.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO warehouse FORMAT CSV" < warehouse.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO web_page FORMAT CSV" < web_page.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO web_returns FORMAT CSV" < web_returns.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO web_sales FORMAT CSV" < web_sales.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO web_site FORMAT CSV" < web_site.tbl -``` - -次に、生成されたクエリを実行します。 - -::::warning -TPC-DSは、執筆時点(2024年9月)のClickHouseではサポートされていない相関サブクエリを多用します([issue #6697](https://github.com/ClickHouse/ClickHouse/issues/6697))。 -その結果、上記のベンチマーククエリの多くがエラーで失敗します。 -:::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md.hash deleted file mode 100644 index 12313a5af7c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpcds.md.hash +++ /dev/null @@ -1 +0,0 @@ -2e040044c29e0fee diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md deleted file mode 100644 index 598a56fc967..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md +++ /dev/null @@ -1,1100 +0,0 @@ ---- -description: 'The TPC-H benchmark data set and queries.' -sidebar_label: 'TPC-H' -slug: '/getting-started/example-datasets/tpch' -title: 'TPC-H (1999)' ---- - - - -A popular benchmark which models the internal data warehouse of a wholesale supplier. -データは3rd正規形の表現で保存され、多くのジョインがクエリ実行時に必要です。 -その古さとデータが均一かつ独立して分布しているという非現実的な前提にもかかわらず、TPC-Hは現在まで最も人気のあるOLAPベンチマークです。 - -**References** - -- [TPC-H](https://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) -- [New TPC Benchmarks for Decision Support and Web Commerce](https://doi.org/10.1145/369275.369291) (Poess et. al., 2000) -- [TPC-H Analyzed: Hidden Messages and Lessons Learned from an Influential Benchmark](https://doi.org/10.1007/978-3-319-04936-6_5) (Boncz et. al.), 2013 -- [Quantifying TPC-H Choke Points and Their Optimizations](https://doi.org/10.14778/3389133.3389138) (Dresseler et. al.), 2020 - -## Data Generation and Import {#data-generation-and-import} - -まず、TPC-Hリポジトリをチェックアウトし、データジェネレーターをコンパイルします。 - -```bash -git clone https://github.com/gregrahn/tpch-kit.git -cd tpch-kit/dbgen -make -``` - -次に、データを生成します。パラメータ `-s` はスケールファクターを指定します。例えば、`-s 100` を指定すると、'lineitem' テーブルに対して6億行が生成されます。 - -```bash -./dbgen -s 100 -``` - -スケールファクター100の詳細なテーブルサイズ: - -| Table | size (in rows) | size (compressed in ClickHouse) | -|----------|----------------|---------------------------------| -| nation | 25 | 2 kB | -| region | 5 | 1 kB | -| part | 20.000.000 | 895 MB | -| supplier | 1.000.000 | 75 MB | -| partsupp | 80.000.000 | 4.37 GB | -| customer | 15.000.000 | 1.19 GB | -| orders | 150.000.000 | 6.15 GB | -| lineitem | 600.00.00 | 26.69 GB | - -(ClickHouseの圧縮サイズは `system.tables.total_bytes` から取得され、以下のテーブル定義に基づいています。) - -次に、ClickHouseにテーブルを作成します。 - -私たちはTPC-H仕様のルールにできるだけ近く従います: -- 主キーは、仕様のセクション1.4.2.2に記載されたカラムに対してのみ作成します。 -- 置換パラメータは、仕様のセクション2.1.x.4のクエリ検証の値に置き換えました。 -- 仕様のセクション1.4.2.1に従い、テーブル定義ではオプションの `NOT NULL` 制約を使用しておらず、たとえ `dbgen` がデフォルトで生成してもそうです。 - ClickHouseでの `SELECT` クエリのパフォーマンスは、 `NOT NULL` 制約の存在または欠如に影響されません。 -- 仕様のセクション1.3.1に従い、クリックハウスのネイティブデータ型(例: `Int32`, `String`)を使用して、仕様に記載されている抽象データ型(例: `Identifier`, `Variable text, size N`)を実装しています。これにより可読性が向上します。`dbgen` によって生成されるSQL-92データ型(例: `INTEGER`, `VARCHAR(40)`)もClickHouseで使用することができます。 - -```sql -CREATE TABLE nation ( - n_nationkey Int32, - n_name String, - n_regionkey Int32, - n_comment String) -ORDER BY (n_nationkey); - -CREATE TABLE region ( - r_regionkey Int32, - r_name String, - r_comment String) -ORDER BY (r_regionkey); - -CREATE TABLE part ( - p_partkey Int32, - p_name String, - p_mfgr String, - p_brand String, - p_type String, - p_size Int32, - p_container String, - p_retailprice Decimal(15,2), - p_comment String) -ORDER BY (p_partkey); - -CREATE TABLE supplier ( - s_suppkey Int32, - s_name String, - s_address String, - s_nationkey Int32, - s_phone String, - s_acctbal Decimal(15,2), - s_comment String) -ORDER BY (s_suppkey); - -CREATE TABLE partsupp ( - ps_partkey Int32, - ps_suppkey Int32, - ps_availqty Int32, - ps_supplycost Decimal(15,2), - ps_comment String) -ORDER BY (ps_partkey, ps_suppkey); - -CREATE TABLE customer ( - c_custkey Int32, - c_name String, - c_address String, - c_nationkey Int32, - c_phone String, - c_acctbal Decimal(15,2), - c_mktsegment String, - c_comment String) -ORDER BY (c_custkey); - -CREATE TABLE orders ( - o_orderkey Int32, - o_custkey Int32, - o_orderstatus String, - o_totalprice Decimal(15,2), - o_orderdate Date, - o_orderpriority String, - o_clerk String, - o_shippriority Int32, - o_comment String) -ORDER BY (o_orderkey); --- 以下は公式のTPC-Hルールに準拠していない代替のオーダーキーですが、 --- 「Quantifying TPC-H Choke Points and Their Optimizations」のセクション4.5で推奨されています: --- ORDER BY (o_orderdate, o_orderkey); - -CREATE TABLE lineitem ( - l_orderkey Int32, - l_partkey Int32, - l_suppkey Int32, - l_linenumber Int32, - l_quantity Decimal(15,2), - l_extendedprice Decimal(15,2), - l_discount Decimal(15,2), - l_tax Decimal(15,2), - l_returnflag String, - l_linestatus String, - l_shipdate Date, - l_commitdate Date, - l_receiptdate Date, - l_shipinstruct String, - l_shipmode String, - l_comment String) -ORDER BY (l_orderkey, l_linenumber); --- 以下は公式のTPC-Hルールに準拠していない代替のオーダーキーですが、 --- 「Quantifying TPC-H Choke Points and Their Optimizations」のセクション4.5で推奨されています: --- ORDER BY (l_shipdate, l_orderkey, l_linenumber); -``` - -データは以下のようにインポートできます: - -```bash -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO nation FORMAT CSV" < nation.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO region FORMAT CSV" < region.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO part FORMAT CSV" < part.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO partsupp FORMAT CSV" < partsupp.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO customer FORMAT CSV" < customer.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT CSV" < orders.tbl -clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl -``` - -:::note -tpch-kitを使用してテーブルを自分で生成する代わりに、公開されたS3バケットからデータをインポートすることもできます。 -最初に上記の `CREATE` ステートメントを使用して空のテーブルを作成することを確認してください。 - -```sql --- スケールファクター1 -INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; - --- スケールファクター100 -INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; -``` -::: - -## Queries {#queries} - -:::note -正しい結果を生成するために [`join_use_nulls`](../../operations/settings/settings.md#join_use_nulls) を有効にする必要があります。 -::: - -クエリは `./qgen -s ` によって生成されます。スケールファクター `s = 100` の例のクエリ: - -**Correctness** - -クエリの結果は、特に記載がない限り、公式の結果と一致します。確認するためには、スケールファクター = 1 (`dbgen`、上記参照) でTPC-Hデータベースを生成し、[tpch-kitの期待される結果](https://github.com/gregrahn/tpch-kit/tree/master/dbgen/answers)と比較してください。 - -**Q1** - -```sql -SELECT - l_returnflag, - l_linestatus, - sum(l_quantity) AS sum_qty, - sum(l_extendedprice) AS sum_base_price, - sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price, - sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge, - avg(l_quantity) AS avg_qty, - avg(l_extendedprice) AS avg_price, - avg(l_discount) AS avg_disc, - count(*) AS count_order -FROM - lineitem -WHERE - l_shipdate <= DATE '1998-12-01' - INTERVAL '90' DAY -GROUP BY - l_returnflag, - l_linestatus -ORDER BY - l_returnflag, - l_linestatus; -``` - -**Q2** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - s_acctbal, - s_name, - n_name, - p_partkey, - p_mfgr, - s_address, - s_phone, - s_comment -FROM - part, - supplier, - partsupp, - nation, - region -WHERE - p_partkey = ps_partkey - AND s_suppkey = ps_suppkey - AND p_size = 15 - AND p_type LIKE '%BRASS' - AND s_nationkey = n_nationkey - AND n_regionkey = r_regionkey - AND r_name = 'EUROPE' - AND ps_supplycost = ( - SELECT - min(ps_supplycost) - FROM - partsupp, - supplier, - nation, - region - WHERE - p_partkey = ps_partkey - AND s_suppkey = ps_suppkey - AND s_nationkey = n_nationkey - AND n_regionkey = r_regionkey - AND r_name = 'EUROPE' - ) -ORDER BY - s_acctbal DESC, - n_name, - s_name, - p_partkey; -``` - -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 - -この代替のフォームは動作し、参照結果を返すことが確認されています。 - -```sql -WITH MinSupplyCost AS ( - SELECT - ps_partkey, - MIN(ps_supplycost) AS min_supplycost - FROM - partsupp ps - JOIN - supplier s ON ps.ps_suppkey = s.s_suppkey - JOIN - nation n ON s.s_nationkey = n.n_nationkey - JOIN - region r ON n.n_regionkey = r.r_regionkey - WHERE - r.r_name = 'EUROPE' - GROUP BY - ps_partkey -) -SELECT - s.s_acctbal, - s.s_name, - n.n_name, - p.p_partkey, - p.p_mfgr, - s.s_address, - s.s_phone, - s.s_comment -FROM - part p -JOIN - partsupp ps ON p.p_partkey = ps.ps_partkey -JOIN - supplier s ON s.s_suppkey = ps.ps_suppkey -JOIN - nation n ON s.s_nationkey = n.n_nationkey -JOIN - region r ON n.n_regionkey = r.r_regionkey -JOIN - MinSupplyCost msc ON ps.ps_partkey = msc.ps_partkey AND ps.ps_supplycost = msc.min_supplycost -WHERE - p.p_size = 15 - AND p.p_type LIKE '%BRASS' - AND r.r_name = 'EUROPE' -ORDER BY - s.s_acctbal DESC, - n.n_name, - s.s_name, - p.p_partkey; -``` -:::: - -**Q3** - -```sql -SELECT - l_orderkey, - sum(l_extendedprice * (1 - l_discount)) AS revenue, - o_orderdate, - o_shippriority -FROM - customer, - orders, - lineitem -WHERE - c_mktsegment = 'BUILDING' - AND c_custkey = o_custkey - AND l_orderkey = o_orderkey - AND o_orderdate < DATE '1995-03-15' - AND l_shipdate > DATE '1995-03-15' -GROUP BY - l_orderkey, - o_orderdate, - o_shippriority -ORDER BY - revenue DESC, - o_orderdate; -``` - -**Q4** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - o_orderpriority, - count(*) AS order_count -FROM - orders -WHERE - o_orderdate >= DATE '1993-07-01' - AND o_orderdate < DATE '1993-07-01' + INTERVAL '3' MONTH - AND EXISTS ( - SELECT - * - FROM - lineitem - WHERE - l_orderkey = o_orderkey - AND l_commitdate < l_receiptdate - ) -GROUP BY - o_orderpriority -ORDER BY - o_orderpriority; -``` - -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 - -この代替のフォームは動作し、参照結果を返すことが確認されています。 - -```sql -WITH ValidLineItems AS ( - SELECT - l_orderkey - FROM - lineitem - WHERE - l_commitdate < l_receiptdate - GROUP BY - l_orderkey -) -SELECT - o.o_orderpriority, - COUNT(*) AS order_count -FROM - orders o -JOIN - ValidLineItems vli ON o.o_orderkey = vli.l_orderkey -WHERE - o.o_orderdate >= DATE '1993-07-01' - AND o.o_orderdate < DATE '1993-07-01' + INTERVAL '3' MONTH -GROUP BY - o.o_orderpriority -ORDER BY - o.o_orderpriority; -``` -:::: - -**Q5** - -```sql -SELECT - n_name, - sum(l_extendedprice * (1 - l_discount)) AS revenue -FROM - customer, - orders, - lineitem, - supplier, - nation, - region -WHERE - c_custkey = o_custkey - AND l_orderkey = o_orderkey - AND l_suppkey = s_suppkey - AND c_nationkey = s_nationkey - AND s_nationkey = n_nationkey - AND n_regionkey = r.regionkey - AND r_name = 'ASIA' - AND o_orderdate >= DATE '1994-01-01' - AND o_orderdate < DATE '1994-01-01' + INTERVAL '1' year -GROUP BY - n_name -ORDER BY - revenue DESC; -``` - -**Q6** - -```sql -SELECT - sum(l_extendedprice * l_discount) AS revenue -FROM - lineitem -WHERE - l_shipdate >= DATE '1994-01-01' - AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' year - AND l_discount BETWEEN 0.06 - 0.01 AND 0.06 + 0.01 - AND l_quantity < 24; -``` - -::::note -2025年2月現在、このクエリはDecimalの加算のバグのため、すぐに動作しません。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/70136 - -この代替のフォームは動作し、参照結果を返すことが確認されています。 - -```sql -SELECT - sum(l_extendedprice * l_discount) AS revenue -FROM - lineitem -WHERE - l_shipdate >= DATE '1994-01-01' - AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' year - AND l_discount BETWEEN 0.05 AND 0.07 - AND l_quantity < 24; -``` -:::: - -**Q7** - -```sql -SELECT - supp_nation, - cust_nation, - l_year, - sum(volume) AS revenue -FROM ( - SELECT - n1.n_name AS supp_nation, - n2.n_name AS cust_nation, - extract(year FROM l_shipdate) AS l_year, - l_extendedprice * (1 - l_discount) AS volume - FROM - supplier, - lineitem, - orders, - customer, - nation n1, - nation n2 - WHERE - s_suppkey = l_suppkey - AND o_orderkey = l_orderkey - AND c_custkey = o_custkey - AND s_nationkey = n1.n_nationkey - AND c_nationkey = n2.n_nationkey - AND ( - (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') - ) - AND l_shipdate BETWEEN DATE '1995-01-01' AND DATE '1996-12-31' - ) AS shipping -GROUP BY - supp_nation, - cust_nation, - l_year -ORDER BY - supp_nation, - cust_nation, - l_year; -``` - -**Q8** - -```sql -SELECT - o_year, - sum(CASE - WHEN nation = 'BRAZIL' - THEN volume - ELSE 0 - END) / sum(volume) AS mkt_share -FROM ( - SELECT - extract(year FROM o_orderdate) AS o_year, - l_extendedprice * (1 - l_discount) AS volume, - n2.n_name AS nation - FROM - part, - supplier, - lineitem, - orders, - customer, - nation n1, - nation n2, - region - WHERE - p_partkey = l_partkey - AND s_suppkey = l_suppkey - AND l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND c_nationkey = n1.n_nationkey - AND n1.n_regionkey = r.r_regionkey - AND r_name = 'AMERICA' - AND s_nationkey = n2.n_nationkey - AND o_orderdate BETWEEN DATE '1995-01-01' AND DATE '1996-12-31' - AND p_type = 'ECONOMY ANODIZED STEEL' - ) AS all_nations -GROUP BY - o_year -ORDER BY - o_year; -``` - -**Q9** - -```sql -SELECT - nation, - o_year, - sum(amount) AS sum_profit -FROM ( - SELECT - n_name AS nation, - extract(year FROM o_orderdate) AS o_year, - l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity AS amount - FROM - part, - supplier, - lineitem, - partsupp, - orders, - nation - WHERE - s_suppkey = l_suppkey - AND ps_suppkey = l_suppkey - AND ps_partkey = l_partkey - AND p_partkey = l_partkey - AND o_orderkey = l_orderkey - AND s_nationkey = n_nationkey - AND p_name LIKE '%green%' - ) AS profit -GROUP BY - nation, - o_year -ORDER BY - nation, - o_year DESC; -``` - -**Q10** - -```sql -SELECT - c_custkey, - c_name, - sum(l_extendedprice * (1 - l_discount)) AS revenue, - c_acctbal, - n_name, - c_address, - c_phone, - c_comment -FROM - customer, - orders, - lineitem, - nation -WHERE - c_custkey = o_custkey - AND l_orderkey = o_orderkey - AND o_orderdate >= DATE '1993-10-01' - AND o_orderdate < DATE '1993-10-01' + INTERVAL '3' MONTH - AND l_returnflag = 'R' - AND c_nationkey = n_nationkey -GROUP BY - c_custkey, - c_name, - c_acctbal, - c_phone, - n_name, - c_address, - c_comment -ORDER BY - revenue DESC; -``` - -**Q11** - -```sql -SELECT - ps_partkey, - sum(ps_supplycost * ps_availqty) AS value -FROM - partsupp, - supplier, - nation -WHERE - ps_suppkey = s_suppkey - AND s_nationkey = n_nationkey - AND n_name = 'GERMANY' -GROUP BY - ps_partkey -HAVING - sum(ps_supplycost * ps_availqty) > ( - SELECT - sum(ps_supplycost * ps_availqty) * 0.0001 - FROM - partsupp, - supplier, - nation - WHERE - ps_suppkey = s_suppkey - AND s_nationkey = n_nationkey - AND n_name = 'GERMANY' - ) -ORDER BY - value DESC; -``` - -**Q12** - -```sql -SELECT - l_shipmode, - sum(CASE - WHEN o_orderpriority = '1-URGENT' - OR o_orderpriority = '2-HIGH' - THEN 1 - ELSE 0 - END) AS high_line_count, - sum(CASE - WHEN o_orderpriority <> '1-URGENT' - AND o_orderpriority <> '2-HIGH' - THEN 1 - ELSE 0 - END) AS low_line_count -FROM - orders, - lineitem -WHERE - o_orderkey = l_orderkey - AND l_shipmode in ('MAIL', 'SHIP') - AND l_commitdate < l_receiptdate - AND l_shipdate < l_commitdate - AND l_receiptdate >= DATE '1994-01-01' - AND l_receiptdate < DATE '1994-01-01' + INTERVAL '1' year -GROUP BY - l_shipmode -ORDER BY - l_shipmode; -``` - -**Q13** - -```sql -SELECT - c_count, - count(*) AS custdist -FROM ( - SELECT - c_custkey, - count(o_orderkey) as c_count - FROM - customer LEFT OUTER JOIN orders ON - c_custkey = o_custkey - AND o_comment NOT LIKE '%special%requests%' - GROUP BY - c_custkey - ) AS c_orders -GROUP BY - c_count -ORDER BY - custdist DESC, - c_count DESC; -``` - -**Q14** - -```sql -SELECT - 100.00 * sum(CASE - WHEN p_type LIKE 'PROMO%' - THEN l_extendedprice * (1 - l_discount) - ELSE 0 - END) / sum(l_extendedprice * (1 - l_discount)) AS promo_revenue -FROM - lineitem, - part -WHERE - l_partkey = p_partkey - AND l_shipdate >= DATE '1995-09-01' - AND l_shipdate < DATE '1995-09-01' + INTERVAL '1' MONTH; -``` - -**Q15** - -```sql -CREATE VIEW revenue0 (supplier_no, total_revenue) AS - SELECT - l_suppkey, - sum(l_extendedprice * (1 - l_discount)) - FROM - lineitem - WHERE - l_shipdate >= DATE '1996-01-01' - AND l_shipdate < DATE '1996-01-01' + INTERVAL '3' MONTH - GROUP BY - l_suppkey; - -SELECT - s_suppkey, - s_name, - s_address, - s_phone, - total_revenue -FROM - supplier, - revenue0 -WHERE - s_suppkey = supplier_no - AND total_revenue = ( - SELECT - max(total_revenue) - FROM - revenue0 - ) -ORDER BY - s_suppkey; - -DROP VIEW revenue0; -``` - -**Q16** - -```sql -SELECT - p_brand, - p_type, - p_size, - count(distinct ps_suppkey) AS supplier_cnt -FROM - partsupp, - part -WHERE - p_partkey = ps_partkey - AND p_brand <> 'Brand#45' - AND p_type NOT LIKE 'MEDIUM POLISHED%' - AND p_size in (49, 14, 23, 45, 19, 3, 36, 9) - AND ps_suppkey NOT in ( - SELECT - s_suppkey - FROM - supplier - WHERE - s_comment LIKE '%Customer%Complaints%' - ) -GROUP BY - p_brand, - p_type, - p_size -ORDER BY - supplier_cnt DESC, - p_brand, - p_type, - p_size; -``` - -**Q17** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - sum(l_extendedprice) / 7.0 AS avg_yearly -FROM - lineitem, - part -WHERE - p_partkey = l_partkey - AND p_brand = 'Brand#23' - AND p_container = 'MED BOX' - AND l_quantity < ( - SELECT - 0.2 * avg(l_quantity) - FROM - lineitem - WHERE - l_partkey = p_partkey - ); -``` - -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 - -この代替のフォームは動作し、参照結果を返すことが確認されています。 - -```sql -WITH AvgQuantity AS ( - SELECT - l_partkey, - AVG(l_quantity) * 0.2 AS avg_quantity - FROM - lineitem - GROUP BY - l_partkey -) -SELECT - SUM(l.l_extendedprice) / 7.0 AS avg_yearly -FROM - lineitem l -JOIN - part p ON p.p_partkey = l.l_partkey -JOIN - AvgQuantity aq ON l.l_partkey = aq.l_partkey -WHERE - p.p_brand = 'Brand#23' - AND p.p_container = 'MED BOX' - AND l.l_quantity < aq.avg_quantity; - -``` -:::: - -**Q18** - -```sql -SELECT - c_name, - c_custkey, - o_orderkey, - o_orderdate, - o_totalprice, - sum(l_quantity) -FROM - customer, - orders, - lineitem -WHERE - o_orderkey in ( - SELECT - l_orderkey - FROM - lineitem - GROUP BY - l_orderkey - HAVING - sum(l_quantity) > 300 - ) - AND c_custkey = o_custkey - AND o_orderkey = l_orderkey -GROUP BY - c_name, - c_custkey, - o_orderkey, - o_orderdate, - o_totalprice -ORDER BY - o_totalprice DESC, - o_orderdate; -``` - -**Q19** - -```sql -SELECT - sum(l_extendedprice * (1 - l_discount)) AS revenue -FROM - lineitem, - part -WHERE - ( - p_partkey = l_partkey - AND p_brand = 'Brand#12' - AND p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') - AND l_quantity >= 1 AND l_quantity <= 1 + 10 - AND p_size BETWEEN 1 AND 5 - AND l_shipmode in ('AIR', 'AIR REG') - AND l_shipinstruct = 'DELIVER IN PERSON' - ) - OR - ( - p_partkey = l_partkey - AND p_brand = 'Brand#23' - AND p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') - AND l_quantity >= 10 AND l_quantity <= 10 + 10 - AND p_size BETWEEN 1 AND 10 - AND l_shipmode in ('AIR', 'AIR REG') - AND l_shipinstruct = 'DELIVER IN PERSON' - ) - OR - ( - p_partkey = l_partkey - AND p_brand = 'Brand#34' - AND p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') - AND l_quantity >= 20 AND l_quantity <= 20 + 10 - AND p_size BETWEEN 1 AND 15 - AND l_shipmode in ('AIR', 'AIR REG') - AND l_shipinstruct = 'DELIVER IN PERSON' - ); -``` - -**Q20** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - s_name, - s_address -FROM - supplier, - nation -WHERE - s_suppkey in ( - SELECT - ps_suppkey - FROM - partsupp - WHERE - ps_partkey in ( - SELECT - p_partkey - FROM - part - WHERE - p_name LIKE 'forest%' - ) - AND ps_availqty > ( - SELECT - 0.5 * sum(l_quantity) - FROM - lineitem - WHERE - l_partkey = ps_partkey - AND l_suppkey = ps_suppkey - AND l_shipdate >= DATE '1994-01-01' - AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' year - ) - ) - AND s_nationkey = n_nationkey - AND n_name = 'CANADA' -ORDER BY - s_name; -``` - -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 -:::: - -**Q21** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - s_name, - count(*) AS numwait -FROM - supplier, - lineitem l1, - orders, - nation -WHERE - s_suppkey = l1.l_suppkey - AND o_orderkey = l1.l_orderkey - AND o_orderstatus = 'F' - AND l1.l_receiptdate > l1.l_commitdate - AND EXISTS ( - SELECT - * - FROM - lineitem l2 - WHERE - l2.l_orderkey = l1.l_orderkey - AND l2.l_suppkey <> l1.l_suppkey - ) - AND NOT EXISTS ( - SELECT - * - FROM - lineitem l3 - WHERE - l3.l_orderkey = l1.l_orderkey - AND l3.l_suppkey <> l1.l_suppkey - AND l3.l_receiptdate > l3.l_commitdate - ) - AND s_nationkey = n_nationkey - AND n_name = 'SAUDI ARABIA' -GROUP BY - s_name -ORDER BY - numwait DESC, - s_name; -``` -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 -:::: - -**Q22** - -```sql -SET allow_experimental_correlated_subqueries = 1; -- since v25.5 - -SELECT - cntrycode, - count(*) AS numcust, - sum(c_acctbal) AS totacctbal -FROM ( - SELECT - substring(c_phone FROM 1 for 2) AS cntrycode, - c_acctbal - FROM - customer - WHERE - substring(c_phone FROM 1 for 2) in - ('13', '31', '23', '29', '30', '18', '17') - AND c_acctbal > ( - SELECT - avg(c_acctbal) - FROM - customer - WHERE - c_acctbal > 0.00 - AND substring(c_phone FROM 1 for 2) in - ('13', '31', '23', '29', '30', '18', '17') - ) - AND NOT EXISTS ( - SELECT - * - FROM - orders - WHERE - o_custkey = c_custkey - ) - ) AS custsale -GROUP BY - cntrycode -ORDER BY - cntrycode; -``` - -::::note -v25.5まで、クエリは相関サブクエリのため、すぐに動作しない場合があります。対応する問題: https://github.com/ClickHouse/ClickHouse/issues/6697 -:::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md.hash deleted file mode 100644 index e9f3c02c8b4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tpch.md.hash +++ /dev/null @@ -1 +0,0 @@ -908f2197364dd38d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md deleted file mode 100644 index f9711d7a471..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -description: '過去128年間の天候観測データ131百万行' -sidebar_label: '台湾の歴史的天候データセット' -sidebar_position: 1 -slug: '/getting-started/example-datasets/tw-weather' -title: '台湾の歴史的天候データセット' ---- - - - -このデータセットは、過去128年間の歴史的気象観測測定値を含んでいます。各行は、特定の日付と時間および気象観測所での測定を示しています。 - -このデータセットの起源は[こちら](https://github.com/Raingel/historical_weather)で入手可能で、気象観測所の番号のリストは[こちら](https://github.com/Raingel/weather_station_list)で確認できます。 - -> 気象データセットのソースには、中央気象局が設置した気象観測所(ステーショコードはC0、C1、または4で始まる)と、農業委員会に属する農業気象観測所(上記以外のステーショコード)が含まれます: - - - StationId - - MeasuredDate、観測時間 - - StnPres、観測所の気圧 - - SeaPres、海面気圧 - - Td、露点温度 - - RH、相対湿度 - - 利用可能なその他の要素 - -## データのダウンロード {#downloading-the-data} - -- ClickHouse用に前処理された[バージョン](#pre-processed-data)のデータで、清掃され、再構成され、強化されています。このデータセットは1896年から2023年までの期間をカバーしています。 -- [元の生データをダウンロード](#original-raw-data)し、ClickHouseが要求する形式に変換してください。独自のカラムを追加したいユーザーは、自分のアプローチを探求または完成させることをお勧めします。 - -### 前処理されたデータ {#pre-processed-data} - -データセットは、行ごとの測定から、気象観測所IDと測定日ごとの行に再構成されています。すなわち、 - -```csv -StationId,MeasuredDate,StnPres,Tx,RH,WS,WD,WSGust,WDGust,Precp,GloblRad,TxSoil0cm,TxSoil5cm,TxSoil20cm,TxSoil50cm,TxSoil100cm,SeaPres,Td,PrecpHour,SunShine,TxSoil10cm,EvapA,Visb,UVI,Cloud Amount,TxSoil30cm,TxSoil200cm,TxSoil300cm,TxSoil500cm,VaporPressure -C0X100,2016-01-01 01:00:00,1022.1,16.1,72,1.1,8.0,,,,,,,,,,,,,,,,,,,,,,, -C0X100,2016-01-01 02:00:00,1021.6,16.0,73,1.2,358.0,,,,,,,,,,,,,,,,,,,,,,, -C0X100,2016-01-01 03:00:00,1021.3,15.8,74,1.5,353.0,,,,,,,,,,,,,,,,,,,,,,, -C0X100,2016-01-01 04:00:00,1021.2,15.8,74,1.7,8.0,,,,,,,,,,,,,,,,,,,,,,, -``` - -クエリが簡単に実行でき、結果のテーブルはスパースが少なく、一部の要素はこの気象観測所では測定できないためにnullになる可能性があります。 - -このデータセットは、以下のGoogle CloudStorageの場所で利用可能です。データセットをローカルファイルシステムにダウンロード(そしてClickHouseクライアントで挿入)するか、ClickHouseに直接挿入してください([URLからの挿入](#inserting-from-url)を参照)。 - -ダウンロードするには: - -```bash -wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/preprocessed_weather_daily_1896_2023.tar.gz - - -# オプション: チェックサムを検証 -md5sum preprocessed_weather_daily_1896_2023.tar.gz - -# チェックサムは次と等しいはずです: 11b484f5bd9ddafec5cfb131eb2dd008 - -tar -xzvf preprocessed_weather_daily_1896_2023.tar.gz -daily_weather_preprocessed_1896_2023.csv - - -# オプション: チェックサムを検証 -md5sum daily_weather_preprocessed_1896_2023.csv - -# チェックサムは次と等しいはずです: 1132248c78195c43d93f843753881754 -``` - -### 元の生データ {#original-raw-data} - -以下は、元の生データをダウンロードし、変換・編集する手順についての詳細です。 - -#### ダウンロード {#download} - -元の生データをダウンロードするには: - -```bash -mkdir tw_raw_weather_data && cd tw_raw_weather_data - -wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/raw_data_weather_daily_1896_2023.tar.gz - - -# オプション: チェックサムを検証 -md5sum raw_data_weather_daily_1896_2023.tar.gz - -# チェックサムは次と等しいはずです: b66b9f137217454d655e3004d7d1b51a - -tar -xzvf raw_data_weather_daily_1896_2023.tar.gz -466920_1928.csv -466920_1929.csv -466920_1930.csv -466920_1931.csv -... - - -# オプション: チェックサムを検証 -cat *.csv | md5sum - -# チェックサムは次と等しいはずです: b26db404bf84d4063fac42e576464ce1 -``` - -#### 台湾の気象観測所を取得 {#retrieve-the-taiwan-weather-stations} - -```bash -wget -O weather_sta_list.csv https://github.com/Raingel/weather_station_list/raw/main/data/weather_sta_list.csv - - -# オプション: UTF-8-BOMをUTF-8エンコーディングに変換 -sed -i '1s/^\xEF\xBB\xBF//' weather_sta_list.csv -``` - -## テーブルスキーマの作成 {#create-table-schema} - -ClickHouseでMergeTreeテーブルを作成します(ClickHouseクライアントから)。 - -```bash -CREATE TABLE tw_weather_data ( - StationId String null, - MeasuredDate DateTime64, - StnPres Float64 null, - SeaPres Float64 null, - Tx Float64 null, - Td Float64 null, - RH Float64 null, - WS Float64 null, - WD Float64 null, - WSGust Float64 null, - WDGust Float64 null, - Precp Float64 null, - PrecpHour Float64 null, - SunShine Float64 null, - GloblRad Float64 null, - TxSoil0cm Float64 null, - TxSoil5cm Float64 null, - TxSoil10cm Float64 null, - TxSoil20cm Float64 null, - TxSoil50cm Float64 null, - TxSoil100cm Float64 null, - TxSoil30cm Float64 null, - TxSoil200cm Float64 null, - TxSoil300cm Float64 null, - TxSoil500cm Float64 null, - VaporPressure Float64 null, - UVI Float64 null, - "Cloud Amount" Float64 null, - EvapA Float64 null, - Visb Float64 null -) -ENGINE = MergeTree -ORDER BY (MeasuredDate); -``` - -## ClickHouseへの挿入 {#inserting-into-clickhouse} - -### ローカルファイルからの挿入 {#inserting-from-local-file} - -データは以下のようにローカルファイルから挿入できます(ClickHouseクライアントから): - -```sql -INSERT INTO tw_weather_data FROM INFILE '/path/to/daily_weather_preprocessed_1896_2023.csv' -``` - -ここで`/path/to`は、ディスク上のローカルファイルへの特定のユーザーパスを表します。 - -データをClickHouseに挿入した後のサンプルレスポンス出力は次の通りです: - -```response -Query id: 90e4b524-6e14-4855-817c-7e6f98fbeabb - -Ok. -131985329 rows in set. Elapsed: 71.770 sec. Processed 131.99 million rows, 10.06 GB (1.84 million rows/s., 140.14 MB/s.) -Peak memory usage: 583.23 MiB. -``` - -### URLからの挿入 {#inserting-from-url} - -```sql -INSERT INTO tw_weather_data SELECT * -FROM url('https://storage.googleapis.com/taiwan-weather-observaiton-datasets/daily_weather_preprocessed_1896_2023.csv', 'CSVWithNames') - -``` -これを高速化する方法については、[大規模データの読み込みの調整](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2)に関するブログ記事を参照してください。 - -## データ行とサイズのチェック {#check-data-rows-and-sizes} - -1. 挿入された行数を確認するには: - -```sql -SELECT formatReadableQuantity(count()) -FROM tw_weather_data; -``` - -```response -┌─formatReadableQuantity(count())─┐ -│ 131.99 million │ -└─────────────────────────────────┘ -``` - -2. このテーブルが使用しているディスクスペースを確認するには: - -```sql -SELECT - formatReadableSize(sum(bytes)) AS disk_size, - formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size -FROM system.parts -WHERE (`table` = 'tw_weather_data') AND active -``` - -```response -┌─disk_size─┬─uncompressed_size─┐ -│ 2.13 GiB │ 32.94 GiB │ -└───────────┴───────────────────┘ -``` - -## サンプルクエリ {#sample-queries} - -### Q1: 特定の年における各気象観測所の最高露点温度を取得する {#q1-retrieve-the-highest-dew-point-temperature-for-each-weather-station-in-the-specific-year} - -```sql -SELECT - StationId, - max(Td) AS max_td -FROM tw_weather_data -WHERE (year(MeasuredDate) = 2023) AND (Td IS NOT NULL) -GROUP BY StationId - -┌─StationId─┬─max_td─┐ -│ 466940 │ 1 │ -│ 467300 │ 1 │ -│ 467540 │ 1 │ -│ 467490 │ 1 │ -│ 467080 │ 1 │ -│ 466910 │ 1 │ -│ 467660 │ 1 │ -│ 467270 │ 1 │ -│ 467350 │ 1 │ -│ 467571 │ 1 │ -│ 466920 │ 1 │ -│ 467650 │ 1 │ -│ 467550 │ 1 │ -│ 467480 │ 1 │ -│ 467610 │ 1 │ -│ 467050 │ 1 │ -│ 467590 │ 1 │ -│ 466990 │ 1 │ -│ 467060 │ 1 │ -│ 466950 │ 1 │ -│ 467620 │ 1 │ -│ 467990 │ 1 │ -│ 466930 │ 1 │ -│ 467110 │ 1 │ -│ 466881 │ 1 │ -│ 467410 │ 1 │ -│ 467441 │ 1 │ -│ 467420 │ 1 │ -│ 467530 │ 1 │ -│ 466900 │ 1 │ -└───────────┴────────┘ - -30行がセットされています。経過時間: 0.045秒。処理されたのは641万行、187.33 MB(143.92万行/s、4.21 GB/s)。 -``` - -### Q2: 特定の期間、フィールド、および気象観測所による生データの取得 {#q2-raw-data-fetching-with-the-specific-duration-time-range-fields-and-weather-station} - -```sql -SELECT - StnPres, - SeaPres, - Tx, - Td, - RH, - WS, - WD, - WSGust, - WDGust, - Precp, - PrecpHour -FROM tw_weather_data -WHERE (StationId = 'C0UB10') AND (MeasuredDate >= '2023-12-23') AND (MeasuredDate < '2023-12-24') -ORDER BY MeasuredDate ASC -LIMIT 10 -``` - -```response -┌─StnPres─┬─SeaPres─┬───Tx─┬───Td─┬─RH─┬──WS─┬──WD─┬─WSGust─┬─WDGust─┬─Precp─┬─PrecpHour─┐ -│ 1029.5 │ ᴺᵁᴸᴸ │ 11.8 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 271 │ 5.5 │ 275 │ -99.8 │ -99.8 │ -│ 1029.8 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 289 │ 5.5 │ 308 │ -99.8 │ -99.8 │ -│ 1028.6 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 79 │ 2.3 │ 251 │ 6.1 │ 289 │ -99.8 │ -99.8 │ -│ 1028.2 │ ᴺᵁᴸᴸ │ 13 │ ᴺᵁᴸᴸ │ 75 │ 4.3 │ 312 │ 7.5 │ 316 │ -99.8 │ -99.8 │ -│ 1027.8 │ ᴺᵁᴸᴸ │ 11.1 │ ᴺᵁᴸᴸ │ 89 │ 7.1 │ 310 │ 11.6 │ 322 │ -99.8 │ -99.8 │ -│ 1027.8 │ ᴺᵁᴸᴸ │ 11.6 │ ᴺᵁᴸᴸ │ 90 │ 3.1 │ 269 │ 10.7 │ 295 │ -99.8 │ -99.8 │ -│ 1027.9 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 89 │ 4.7 │ 296 │ 8.1 │ 310 │ -99.8 │ -99.8 │ -│ 1028.2 │ ᴺᵁᴸᴸ │ 12.2 │ ᴺᵁᴸᴸ │ 94 │ 2.5 │ 246 │ 7.1 │ 283 │ -99.8 │ -99.8 │ -│ 1028.4 │ ᴺᵁᴸᴸ │ 12.5 │ ᴺᵁᴸᴸ │ 94 │ 3.1 │ 265 │ 4.8 │ 297 │ -99.8 │ -99.8 │ -│ 1028.3 │ ᴺᵁᴸᴸ │ 13.6 │ ᴺᵁᴸᴸ │ 91 │ 1.2 │ 273 │ 4.4 │ 256 │ -99.8 │ -99.8 │ -└─────────┴─────────┴──────┴──────┴────┴─────┴─────┴────────┴────────┴───────┴───────────┘ - -10行がセットされています。経過時間: 0.009秒。処理されたのは91,700行、2.33 MB(9.67万行/s、245.31 MB/s)。 -``` - -## クレジット {#credits} - -中央気象局および農業委員会の農業気象観測ネットワーク(ステーション)によるこのデータセットの準備、清掃、および配布に対する努力を認識したいと思います。あなたの努力に感謝します。 - -Ou, J.-H., Kuo, C.-H., Wu, Y.-F., Lin, G.-C., Lee, M.-H., Chen, R.-K., Chou, H.-P., Wu, H.-Y., Chu, S.-C., Lai, Q.-J., Tsai, Y.-C., Lin, C.-C., Kuo, C.-C., Liao, C.-T., Chen, Y.-N., Chu, Y.-W., Chen, C.-Y., 2023. 台湾での稲のいもち病の早期警告のための応用指向の深層学習モデル。生態情報学 73, 101950. https://doi.org/10.1016/j.ecoinf.2022.101950 [13/12/2022] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md.hash deleted file mode 100644 index 150ea00a787..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/tw-weather.md.hash +++ /dev/null @@ -1 +0,0 @@ -7957b25b3baad99a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md deleted file mode 100644 index 6bd1053cc34..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -description: 'Learn how to use projections to improve the performance of queries - that you run frequently using the UK property dataset, which contains data about - prices paid for real-estate property in England and Wales' -sidebar_label: 'UK Property Prices' -sidebar_position: 1 -slug: '/getting-started/example-datasets/uk-price-paid' -title: 'The UK property prices dataset' ---- - - - -このデータには、イングランドおよびウェールズにおける不動産物件の購入価格が含まれています。このデータは1995年から利用可能で、圧縮されていない形のデータセットサイズは約4 GiB(ClickHouseでは約278 MiBしかかかりません)。 - -- ソース: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads -- フィールドの説明: https://www.gov.uk/guidance/about-the-price-paid-data -- HM土地台帳データ © Crown copyright and database right 2021. このデータはOpen Government Licence v3.0のもとでライセンスされています。 - -## テーブルの作成 {#create-table} - -```sql -CREATE DATABASE uk; - -CREATE TABLE uk.uk_price_paid -( - price UInt32, - date Date, - postcode1 LowCardinality(String), - postcode2 LowCardinality(String), - type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0), - is_new UInt8, - duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0), - addr1 String, - addr2 String, - street LowCardinality(String), - locality LowCardinality(String), - town LowCardinality(String), - district LowCardinality(String), - county LowCardinality(String) -) -ENGINE = MergeTree -ORDER BY (postcode1, postcode2, addr1, addr2); -``` - -## データの前処理と挿入 {#preprocess-import-data} - -`url` 関数を使用して、データをClickHouseにストリーミングします。まず、一部の受信データを前処理する必要があります。これには以下が含まれます。 -- `postcode` を2つの異なるカラム - `postcode1` と `postcode2` に分割し、ストレージとクエリのために最適化します。 -- `time` フィールドを日付に変換します。これは0:00の時間だけを含むためです。 -- 分析に必要ないため、[UUid](../../sql-reference/data-types/uuid.md) フィールドを無視します。 -- `type` と `duration` をより読みやすい `Enum` フィールドに変換します。これは [transform](../../sql-reference/functions/other-functions.md#transform) 関数を使用します。 -- `is_new` フィールドを単一文字列(`Y`/`N`)から [UInt8](../../sql-reference/data-types/int-uint) フィールドに変換し、0または1にします。 -- 最後の2つのカラムは全て同じ値(0)を持つため、削除します。 - -`url` 関数は、ウェブサーバーからのデータをClickHouseのテーブルにストリーミングします。次のコマンドは、`uk_price_paid` テーブルに500万行を挿入します。 - -```sql -INSERT INTO uk.uk_price_paid -SELECT - toUInt32(price_string) AS price, - parseDateTimeBestEffortUS(time) AS date, - splitByChar(' ', postcode)[1] AS postcode1, - splitByChar(' ', postcode)[2] AS postcode2, - transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type, - b = 'Y' AS is_new, - transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration, - addr1, - addr2, - street, - locality, - town, - district, - county -FROM url( - 'http://prod1.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv', - 'CSV', - 'uuid_string String, - price_string String, - time String, - postcode String, - a String, - b String, - c String, - addr1 String, - addr2 String, - street String, - locality String, - town String, - district String, - county String, - d String, - e String' -) SETTINGS max_http_get_redirects=10; -``` - -データが挿入されるのを待ちます - ネットワーク速度によっては1分か2分かかるでしょう。 - -## データの検証 {#validate-data} - -挿入された行数を確認して、動作が正しかったか確かめます。 - -```sql runnable -SELECT count() -FROM uk.uk_price_paid -``` - -このクエリが実行された時点で、データセットには27,450,499行がありました。ClickHouseでのテーブルのストレージサイズを確認してみましょう。 - -```sql runnable -SELECT formatReadableSize(total_bytes) -FROM system.tables -WHERE name = 'uk_price_paid' -``` - -テーブルのサイズはわずか221.43 MiBです! - -## クエリの実行 {#run-queries} - -データを分析するためにいくつかのクエリを実行します。 - -### クエリ1. 年ごとの平均価格 {#average-price} - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 1000000, 80 -) -FROM uk.uk_price_paid -GROUP BY year -ORDER BY year -``` - -### クエリ2. ロンドンの年ごとの平均価格 {#average-price-london} - -```sql runnable -SELECT - toYear(date) AS year, - round(avg(price)) AS price, - bar(price, 0, 2000000, 100 -) -FROM uk.uk_price_paid -WHERE town = 'LONDON' -GROUP BY year -ORDER BY year -``` - -2020年に住宅価格に何かが起こりました!しかし、それはおそらく驚くべきことではないでしょう... - -### クエリ3. 最も高価な地域 {#most-expensive-neighborhoods} - -```sql -SELECT - town, - district, - count() AS c, - round(avg(price)) AS price, - bar(price, 0, 5000000, 100) -FROM uk.uk_price_paid -WHERE date >= '2020-01-01' -GROUP BY - town, - district -HAVING c >= 100 -ORDER BY price DESC -LIMIT 100 -``` - -## プロジェクションを使用したクエリの高速化 {#speeding-up-queries-with-projections} - -プロジェクションを使用することで、これらのクエリの速度を向上させることができます。このデータセットの例については、["Projections"](/data-modeling/projections) を参照してください。 - -### Playgroundでテスト {#playground} - -データセットは、[オンラインプレイグラウンド](https://sql.clickhouse.com?query_id=TRCWH5ZETY4SEEK8ISCCAX)でも利用可能です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md.hash deleted file mode 100644 index 902a8a4e3b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/uk-price-paid.md.hash +++ /dev/null @@ -1 +0,0 @@ -0410d06546a68783 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md deleted file mode 100644 index 08d012f306c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: 'Explore the WikiStat dataset containing 0.5 trillion records.' -sidebar_label: 'WikiStat' -slug: '/getting-started/example-datasets/wikistat' -title: 'WikiStat' ---- - - - -データセットには0.5兆レコードが含まれています。 - -FOSDEM 2023からのビデオをご覧ください: https://www.youtube.com/watch?v=JlcI2Vfz_uk - -およびプレゼンテーション: https://presentations.clickhouse.com/fosdem2023/ - -データソース: https://dumps.wikimedia.org/other/pageviews/ - -リンクのリストを取得する: -```shell -for i in {2015..2023}; do - for j in {01..12}; do - echo "${i}-${j}" >&2 - curl -sSL "https://dumps.wikimedia.org/other/pageviews/$i/$i-$j/" \ - | grep -oE 'pageviews-[0-9]+-[0-9]+\.gz' - done -done | sort | uniq | tee links.txt -``` - -データをダウンロードする: -```shell -sed -r 's!pageviews-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz!https://dumps.wikimedia.org/other/pageviews/\1/\1-\2/\0!' \ - links.txt | xargs -P3 wget --continue -``` - -(約3日かかります) - -テーブルを作成する: - -```sql -CREATE TABLE wikistat -( - time DateTime CODEC(Delta, ZSTD(3)), - project LowCardinality(String), - subproject LowCardinality(String), - path String CODEC(ZSTD(3)), - hits UInt64 CODEC(ZSTD(3)) -) -ENGINE = MergeTree -ORDER BY (path, time); -``` - -データをロードする: - -```shell -clickhouse-local --query " - WITH replaceRegexpOne(_path, '^.+pageviews-(\\d{4})(\\d{2})(\\d{2})-(\\d{2})(\\d{2})(\\d{2}).gz$', '\1-\2-\3 \4-\5-\6')::DateTime AS time, - extractGroups(line, '^([^ \\.]+)(\\.[^ ]+)? +([^ ]+) +(\\d+) +(\\d+)$') AS values - SELECT - time, - values[1] AS project, - values[2] AS subproject, - values[3] AS path, - (values[4])::UInt64 AS hits - FROM file('pageviews*.gz', LineAsString) - WHERE length(values) = 5 FORMAT Native -" | clickhouse-client --query "INSERT INTO wikistat FORMAT Native" -``` - -または、クリーンデータをロードする: - -```sql -INSERT INTO wikistat WITH - parseDateTimeBestEffort(extract(_file, '^pageviews-([\\d\\-]+)\\.gz$')) AS time, - splitByChar(' ', line) AS values, - splitByChar('.', values[1]) AS projects -SELECT - time, - projects[1] AS project, - projects[2] AS subproject, - decodeURLComponent(values[2]) AS path, - CAST(values[3], 'UInt64') AS hits -FROM s3( - 'https://clickhouse-public-datasets.s3.amazonaws.com/wikistat/original/pageviews*.gz', - LineAsString) -WHERE length(values) >= 3 -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md.hash deleted file mode 100644 index 96cc20f520d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/wikistat.md.hash +++ /dev/null @@ -1 +0,0 @@ -d37808d440084bae diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md deleted file mode 100644 index c5fb7554968..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md +++ /dev/null @@ -1,486 +0,0 @@ ---- -description: 'YouTubeビデオのディスライクのコレクションです。' -sidebar_label: 'YouTubeのディスライク' -slug: '/getting-started/example-datasets/youtube-dislikes' -title: 'YouTube dataset of dislikes' ---- - - - -2021年11月、YouTubeはすべての動画から公開された ***低評価*** カウントを削除しました。作成者はまだ低評価の数を確認できますが、視聴者は動画が受け取った ***高評価*** の数のみを見ることができます。 - -:::important -データセットには45.5億件以上のレコードが含まれているため、リソースがその種類のボリュームに対応できない限り、以下のコマンドをそのままコピー&ペーストしないように注意してください。以下のコマンドは、[ClickHouse Cloud](https://clickhouse.cloud) の **Production** インスタンスで実行されました。 -::: - -データはJSON形式で、[archive.org](https://archive.org/download/dislikes_youtube_2021_12_video_json_files) からダウンロードできます。同じデータをS3にも提供しているので、ClickHouse Cloudインスタンスにより効率的にダウンロードできます。 - -ClickHouse Cloudでテーブルを作成し、データを挿入する手順は以下の通りです。 - -:::note -以下の手順は、ローカルのClickHouseインストールでも簡単に動作します。唯一の変更は、`s3cluster`の代わりに`s3`関数を使用することですが、クラスターが構成されている場合は `default` をクラスターの名前に変更してください。 -::: - -## 手順の説明 {#step-by-step-instructions} - -1. データがどのような形をしているか見てみましょう。 `s3cluster` テーブル関数はテーブルを返すので、結果を `DESCRIBE` できます: - -```sql -DESCRIBE s3( - 'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst', - 'JSONLines' -); -``` - -ClickHouseはJSONファイルから以下のスキーマを推測します: - -```response -┌─name────────────────┬─type───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ id │ Nullable(String) │ │ │ │ │ │ -│ fetch_date │ Nullable(String) │ │ │ │ │ │ -│ upload_date │ Nullable(String) │ │ │ │ │ │ -│ title │ Nullable(String) │ │ │ │ │ │ -│ uploader_id │ Nullable(String) │ │ │ │ │ │ -│ uploader │ Nullable(String) │ │ │ │ │ │ -│ uploader_sub_count │ Nullable(Int64) │ │ │ │ │ │ -│ is_age_limit │ Nullable(Bool) │ │ │ │ │ │ -│ view_count │ Nullable(Int64) │ │ │ │ │ │ -│ like_count │ Nullable(Int64) │ │ │ │ │ │ -│ dislike_count │ Nullable(Int64) │ │ │ │ │ │ -│ is_crawlable │ Nullable(Bool) │ │ │ │ │ │ -│ is_live_content │ Nullable(Bool) │ │ │ │ │ │ -│ has_subtitles │ Nullable(Bool) │ │ │ │ │ │ -│ is_ads_enabled │ Nullable(Bool) │ │ │ │ │ │ -│ is_comments_enabled │ Nullable(Bool) │ │ │ │ │ │ -│ description │ Nullable(String) │ │ │ │ │ │ -│ rich_metadata │ Array(Tuple(call Nullable(String), content Nullable(String), subtitle Nullable(String), title Nullable(String), url Nullable(String))) │ │ │ │ │ │ -│ super_titles │ Array(Tuple(text Nullable(String), url Nullable(String))) │ │ │ │ │ │ -│ uploader_badges │ Nullable(String) │ │ │ │ │ │ -│ video_badges │ Nullable(String) │ │ │ │ │ │ -└─────────────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -2. 推測されたスキーマに基づいて、データ型を整理し、主キーを追加しました。以下のテーブルを定義します: - -```sql -CREATE TABLE youtube -( - `id` String, - `fetch_date` DateTime, - `upload_date_str` String, - `upload_date` Date, - `title` String, - `uploader_id` String, - `uploader` String, - `uploader_sub_count` Int64, - `is_age_limit` Bool, - `view_count` Int64, - `like_count` Int64, - `dislike_count` Int64, - `is_crawlable` Bool, - `has_subtitles` Bool, - `is_ads_enabled` Bool, - `is_comments_enabled` Bool, - `description` String, - `rich_metadata` Array(Tuple(call String, content String, subtitle String, title String, url String)), - `super_titles` Array(Tuple(text String, url String)), - `uploader_badges` String, - `video_badges` String -) -ENGINE = MergeTree -ORDER BY (uploader, upload_date) -``` - -3. 以下のコマンドは、S3ファイルから `youtube` テーブルにレコードをストリームします。 - -:::important -これは多くのデータを挿入します - 46.5億行です。データセット全体を挿入したくない場合は、単に `LIMIT` 句を追加して希望する行数を指定してください。 -::: - -```sql -INSERT INTO youtube -SETTINGS input_format_null_as_default = 1 -SELECT - id, - parseDateTimeBestEffortUSOrZero(toString(fetch_date)) AS fetch_date, - upload_date AS upload_date_str, - toDate(parseDateTimeBestEffortUSOrZero(upload_date::String)) AS upload_date, - ifNull(title, '') AS title, - uploader_id, - ifNull(uploader, '') AS uploader, - uploader_sub_count, - is_age_limit, - view_count, - like_count, - dislike_count, - is_crawlable, - has_subtitles, - is_ads_enabled, - is_comments_enabled, - ifNull(description, '') AS description, - rich_metadata, - super_titles, - ifNull(uploader_badges, '') AS uploader_badges, - ifNull(video_badges, '') AS video_badges -FROM s3( - 'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst', - 'JSONLines' -) -``` - -`INSERT` コマンドに関するコメント: - -- `parseDateTimeBestEffortUSOrZero` 関数は、受信する日付フィールドが正しい形式でない場合に便利です。もし `fetch_date` が正しく解析されない場合、 `0` に設定されます。 -- `upload_date` カラムには有効な日付が含まれていますが、「4 hours ago」などの文字列も含まれており、これは確かに有効な日付ではありません。オリジナルの値を `upload_date_str` に保存し、`toDate(parseDateTimeBestEffortUSOrZero(upload_date::String))` で解析を試みることにしました。解析に失敗した場合は、単に `0` になります。 -- テーブルに `NULL` 値が入るのを避けるために `ifNull` を使用しました。受信する値が `NULL` の場合、 `ifNull` 関数が値を空の文字列に設定しています。 - -4. ClickHouse CloudのSQLコンソールで新しいタブを開く(または新しい `clickhouse-client` ウィンドウを開く)と、カウントが増えていく様子を確認できます。サーバーリソースに応じて、46.5B行を挿入するにはしばらく時間がかかります。(設定を調整しなくても、約4.5時間かかります。) - -```sql -SELECT formatReadableQuantity(count()) -FROM youtube -``` - -```response -┌─formatReadableQuantity(count())─┐ -│ 4.56 billion │ -└─────────────────────────────────┘ -``` - -5. データが挿入されたら、お気に入りの動画またはチャンネルの低評価の数をカウントしてみてください。ClickHouseがアップロードした動画の数を見てみましょう: - -```sql -SELECT count() -FROM youtube -WHERE uploader = 'ClickHouse'; -``` - -```response -┌─count()─┐ -│ 84 │ -└─────────┘ - -1 row in set. Elapsed: 0.570 sec. Processed 237.57 thousand rows, 5.77 MB (416.54 thousand rows/s., 10.12 MB/s.) -``` - -:::note -上記のクエリは、主キーの最初のカラムとして `uploader` を選択したため、非常に迅速に実行されます - そのため、237k行を処理する必要がありました。 -::: - -6. ClickHouseの動画の高評価と低評価を見てみましょう: - -```sql -SELECT - title, - like_count, - dislike_count -FROM youtube -WHERE uploader = 'ClickHouse' -ORDER BY dislike_count DESC; -``` - -レスポンスは以下のようになります: - -```response -┌─title────────────────────────────────────────────────────────────────────────────────────────────────┬─like_count─┬─dislike_count─┐ -│ ClickHouse v21.11 Release Webinar │ 52 │ 3 │ -│ ClickHouse Introduction │ 97 │ 3 │ -│ Casa Modelo Algarve │ 180 │ 3 │ -│ Профайлер запросов: трудный путь │ 33 │ 3 │ -│ ClickHouse в Курсометре │ 4 │ 2 │ -│ 10 Good Reasons to Use ClickHouse │ 27 │ 2 │ -... - -84 rows in set. Elapsed: 0.013 sec. Processed 155.65 thousand rows, 16.94 MB (11.96 million rows/s., 1.30 GB/s.) -``` - -7. `title` または `description` フィールドに **ClickHouse** が含まれている動画を検索します: - -```sql -SELECT - view_count, - like_count, - dislike_count, - concat('https://youtu.be/', id) AS url, - title -FROM youtube -WHERE (title ILIKE '%ClickHouse%') OR (description ILIKE '%ClickHouse%') -ORDER BY - like_count DESC, - view_count DESC; -``` - -このクエリはすべての行を処理し、2つの文字列カラムを解析する必要があります。それでも、4.15M行/秒で良好なパフォーマンスを得ています: - -```response -1174 rows in set. Elapsed: 1099.368 sec. Processed 4.56 billion rows, 1.98 TB (4.15 million rows/s., 1.80 GB/s.) -``` - -結果は以下のようになります: - -```response -┌─view_count─┬─like_count─┬─dislike_count─┬─url──────────────────────────┬─title──────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ 1919 │ 63 │ 1 │ https://youtu.be/b9MeoOtAivQ │ ClickHouse v21.10 Release Webinar │ -│ 8710 │ 62 │ 4 │ https://youtu.be/PeV1mC2z--M │ What is JDBC DriverManager? | JDBC │ -│ 3534 │ 62 │ 1 │ https://youtu.be/8nWRhK9gw10 │ CLICKHOUSE - Arquitetura Modular │ -``` - -## 質問 {#questions} - -### コメントを無効にすると、実際に高評価または低評価をクリックする可能性が低くなりますか? {#if-someone-disables-comments-does-it-lower-the-chance-someone-will-actually-click-like-or-dislike} - -コメントが無効になった場合、人々は動画についての気持ちを表現するために高評価または低評価をクリックする可能性が高くなりますか? - -```sql -SELECT - concat('< ', formatReadableQuantity(view_range)) AS views, - is_comments_enabled, - total_clicks / num_views AS prob_like_dislike -FROM -( - SELECT - is_comments_enabled, - power(10, CEILING(log10(view_count + 1))) AS view_range, - sum(like_count + dislike_count) AS total_clicks, - sum(view_count) AS num_views - FROM youtube - GROUP BY - view_range, - is_comments_enabled -) WHERE view_range > 1 -ORDER BY - is_comments_enabled ASC, - num_views ASC; -``` - -```response -┌─views─────────────┬─is_comments_enabled─┬────prob_like_dislike─┐ -│ < 10.00 │ false │ 0.08224180712685371 │ -│ < 100.00 │ false │ 0.06346337759167248 │ -│ < 1.00 thousand │ false │ 0.03201883652987105 │ -│ < 10.00 thousand │ false │ 0.01716073540410903 │ -│ < 10.00 billion │ false │ 0.004555639481829971 │ -│ < 100.00 thousand │ false │ 0.01293351460515323 │ -│ < 1.00 billion │ false │ 0.004761811192464957 │ -│ < 1.00 million │ false │ 0.010472604018980551 │ -│ < 10.00 million │ false │ 0.00788902538420125 │ -│ < 100.00 million │ false │ 0.00579152804250582 │ -│ < 10.00 │ true │ 0.09819517478134059 │ -│ < 100.00 │ true │ 0.07403784478585775 │ -│ < 1.00 thousand │ true │ 0.03846294910067627 │ -│ < 10.00 billion │ true │ 0.005615217329358215 │ -│ < 10.00 thousand │ true │ 0.02505881391701455 │ -│ < 1.00 billion │ true │ 0.007434998802482997 │ -│ < 100.00 thousand │ true │ 0.022694648130822004 │ -│ < 100.00 million │ true │ 0.011761563746575625 │ -│ < 1.00 million │ true │ 0.020776022304589435 │ -│ < 10.00 million │ true │ 0.016917095718089584 │ -└───────────────────┴─────────────────────┴──────────────────────┘ - -22 rows in set. Elapsed: 8.460 sec. Processed 4.56 billion rows, 77.48 GB (538.73 million rows/s., 9.16 GB/s.) -``` - -コメントを有効にすると、エンゲージメント率が高くなることが関連しているようです。 - - -### 時間の経過とともに動画の数はどのように変化しますか - 注目すべきイベントは? {#how-does-the-number-of-videos-change-over-time---notable-events} - -```sql -SELECT - toStartOfMonth(toDateTime(upload_date)) AS month, - uniq(uploader_id) AS uploaders, - count() as num_videos, - sum(view_count) as view_count -FROM youtube -GROUP BY month -ORDER BY month ASC; -``` - -```response -┌──────month─┬─uploaders─┬─num_videos─┬───view_count─┐ -│ 2005-04-01 │ 5 │ 6 │ 213597737 │ -│ 2005-05-01 │ 6 │ 9 │ 2944005 │ -│ 2005-06-01 │ 165 │ 351 │ 18624981 │ -│ 2005-07-01 │ 395 │ 1168 │ 94164872 │ -│ 2005-08-01 │ 1171 │ 3128 │ 124540774 │ -│ 2005-09-01 │ 2418 │ 5206 │ 475536249 │ -│ 2005-10-01 │ 6750 │ 13747 │ 737593613 │ -│ 2005-11-01 │ 13706 │ 28078 │ 1896116976 │ -│ 2005-12-01 │ 24756 │ 49885 │ 2478418930 │ -│ 2006-01-01 │ 49992 │ 100447 │ 4532656581 │ -│ 2006-02-01 │ 67882 │ 138485 │ 5677516317 │ -│ 2006-03-01 │ 103358 │ 212237 │ 8430301366 │ -│ 2006-04-01 │ 114615 │ 234174 │ 9980760440 │ -│ 2006-05-01 │ 152682 │ 332076 │ 14129117212 │ -│ 2006-06-01 │ 193962 │ 429538 │ 17014143263 │ -│ 2006-07-01 │ 234401 │ 530311 │ 18721143410 │ -│ 2006-08-01 │ 281280 │ 614128 │ 20473502342 │ -│ 2006-09-01 │ 312434 │ 679906 │ 23158422265 │ -│ 2006-10-01 │ 404873 │ 897590 │ 27357846117 │ -``` - -covidの周りでのアップローダーの急増が目立ちます [こちら](https://www.theverge.com/2020/3/27/21197642/youtube-with-me-style-videos-views-coronavirus-cook-workout-study-home-beauty) のリンクをご覧ください。 - - -### 時間の経過とともに字幕が増えるのはいつか {#more-subtitles-over-time-and-when} - -音声認識の進歩により、字幕を作成することがこれまで以上に簡単になり、YouTubeは2009年末に自動キャプションを追加しました。この時に急増したのでしょうか? - -```sql -SELECT - toStartOfMonth(upload_date) AS month, - countIf(has_subtitles) / count() AS percent_subtitles, - percent_subtitles - any(percent_subtitles) OVER ( - ORDER BY month ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING - ) AS previous -FROM youtube -GROUP BY month -ORDER BY month ASC; -``` - -```response -┌──────month─┬───percent_subtitles─┬────────────────previous─┐ -│ 2015-01-01 │ 0.2652653881082824 │ 0.2652653881082824 │ -│ 2015-02-01 │ 0.3147556050309162 │ 0.049490216922633834 │ -│ 2015-03-01 │ 0.32460464492371877 │ 0.009849039892802558 │ -│ 2015-04-01 │ 0.33471963051468445 │ 0.010114985590965686 │ -│ 2015-05-01 │ 0.3168087575501062 │ -0.017910872964578273 │ -│ 2015-06-01 │ 0.3162609788438222 │ -0.0005477787062839745 │ -│ 2015-07-01 │ 0.31828767677518033 │ 0.0020266979313581235 │ -│ 2015-08-01 │ 0.3045551564286859 │ -0.013732520346494415 │ -│ 2015-09-01 │ 0.311221133995152 │ 0.006665977566466086 │ -│ 2015-10-01 │ 0.30574870926812175 │ -0.005472424727030245 │ -│ 2015-11-01 │ 0.31125409712077234 │ 0.0055053878526505895 │ -│ 2015-12-01 │ 0.3190967954651779 │ 0.007842698344405541 │ -│ 2016-01-01 │ 0.32636021432496176 │ 0.007263418859783877 │ - -``` - -データ結果は2009年に急増を示しています。その時、YouTubeは他の人の動画に対する字幕のアップロードを可能にしたコミュニティキャプション機能を削除していたようです。このことは、視覚障害者や耳が不自由な視聴者のために、クリエイターが動画に字幕を追加するよう働きかける非常に成功したキャンペーンを促しました。 - - -### 時間の経過とともにトップアップローダー {#top-uploaders-over-time} - -```sql -WITH uploaders AS - ( - SELECT uploader - FROM youtube - GROUP BY uploader - ORDER BY sum(view_count) DESC - LIMIT 10 - ) -SELECT - month, - uploader, - sum(view_count) AS total_views, - avg(dislike_count / like_count) AS like_to_dislike_ratio -FROM youtube -WHERE uploader IN (uploaders) -GROUP BY - toStartOfMonth(upload_date) AS month, - uploader -ORDER BY - month ASC, - total_views DESC; -``` - -```response -┌──────month─┬─uploader───────────────────┬─total_views─┬─like_to_dislike_ratio─┐ -│ 1970-01-01 │ T-Series │ 10957099 │ 0.022784656361208206 │ -│ 1970-01-01 │ Ryan's World │ 0 │ 0.003035559410234172 │ -│ 1970-01-01 │ SET India │ 0 │ nan │ -│ 2006-09-01 │ Cocomelon - Nursery Rhymes │ 256406497 │ 0.7005566715978622 │ -│ 2007-06-01 │ Cocomelon - Nursery Rhymes │ 33641320 │ 0.7088650914344298 │ -│ 2008-02-01 │ WWE │ 43733469 │ 0.07198856488734842 │ -│ 2008-03-01 │ WWE │ 16514541 │ 0.1230603715431997 │ -│ 2008-04-01 │ WWE │ 5907295 │ 0.2089399470159618 │ -│ 2008-05-01 │ WWE │ 7779627 │ 0.09101676560436774 │ -│ 2008-06-01 │ WWE │ 7018780 │ 0.0974184753155297 │ -│ 2008-07-01 │ WWE │ 4686447 │ 0.1263845422065158 │ -│ 2008-08-01 │ WWE │ 4514312 │ 0.08384574274791441 │ -│ 2008-09-01 │ WWE │ 3717092 │ 0.07872802579349912 │ -``` - -### 視聴回数が増えるにつれて、いいね割合はどう変化しますか? {#how-do-like-ratio-changes-as-views-go-up} - -```sql -SELECT - concat('< ', formatReadableQuantity(view_range)) AS view_range, - is_comments_enabled, - round(like_ratio, 2) AS like_ratio -FROM -( -SELECT - power(10, CEILING(log10(view_count + 1))) as view_range, - is_comments_enabled, - avg(like_count / dislike_count) as like_ratio -FROM youtube WHERE dislike_count > 0 -GROUP BY - view_range, - is_comments_enabled HAVING view_range > 1 -ORDER BY - view_range ASC, - is_comments_enabled ASC -); -``` - -```response -┌─view_range────────┬─is_comments_enabled─┬─like_ratio─┐ -│ < 10.00 │ false │ 0.66 │ -│ < 10.00 │ true │ 0.66 │ -│ < 100.00 │ false │ 3 │ -│ < 100.00 │ true │ 3.95 │ -│ < 1.00 thousand │ false │ 8.45 │ -│ < 1.00 thousand │ true │ 13.07 │ -│ < 10.00 thousand │ false │ 18.57 │ -│ < 10.00 thousand │ true │ 30.92 │ -│ < 100.00 thousand │ false │ 23.55 │ -│ < 100.00 thousand │ true │ 42.13 │ -│ < 1.00 million │ false │ 19.23 │ -│ < 1.00 million │ true │ 37.86 │ -│ < 10.00 million │ false │ 12.13 │ -│ < 10.00 million │ true │ 30.72 │ -│ < 100.00 million │ false │ 6.67 │ -│ < 100.00 million │ true │ 23.32 │ -│ < 1.00 billion │ false │ 3.08 │ -│ < 1.00 billion │ true │ 20.69 │ -│ < 10.00 billion │ false │ 1.77 │ -│ < 10.00 billion │ true │ 19.5 │ -└───────────────────┴─────────────────────┴────────────┘ -``` - -### 視聴回数はどのように分布していますか? {#how-are-views-distributed} - -```sql -SELECT - labels AS percentile, - round(quantiles) AS views -FROM -( - SELECT - quantiles(0.999, 0.99, 0.95, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1)(view_count) AS quantiles, - ['99.9th', '99th', '95th', '90th', '80th', '70th','60th', '50th', '40th', '30th', '20th', '10th'] AS labels - FROM youtube -) -ARRAY JOIN - quantiles, - labels; -``` - -```response -┌─percentile─┬───views─┐ -│ 99.9th │ 1216624 │ -│ 99th │ 143519 │ -│ 95th │ 13542 │ -│ 90th │ 4054 │ -│ 80th │ 950 │ -│ 70th │ 363 │ -│ 60th │ 177 │ -│ 50th │ 97 │ -│ 40th │ 57 │ -│ 30th │ 32 │ -│ 20th │ 16 │ -│ 10th │ 6 │ -└────────────┴─────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md.hash deleted file mode 100644 index 83997a3eaea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/example-datasets/youtube-dislikes.md.hash +++ /dev/null @@ -1 +0,0 @@ -5be86b72338123bb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md deleted file mode 100644 index d8f54efa655..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: 'ClickHouseを使用して、チュートリアルとサンプルデータセットを利用して始めましょう' -keywords: -- 'clickhouse' -- 'install' -- 'tutorial' -- 'sample' -- 'datasets' -pagination_next: 'tutorial' -sidebar_label: '概要' -sidebar_position: 0 -slug: '/getting-started/example-datasets/' -title: 'チュートリアルとサンプルデータセット' ---- - - - - -# チュートリアルとサンプルデータセット - -ClickHouseの使い方を学ぶためのリソースがたくさんあります: - -- ClickHouseを立ち上げる必要がある場合は、[クイックスタート](../quick-start.mdx)をチェックしてください -- [ClickHouseチュートリアル](../tutorial.md)では、ニューヨーク市のタクシーのライドデータセットを分析します - -さらに、サンプルデータセットはClickHouseを学ぶ素晴らしい体験を提供し、重要なテクニックやコツを学び、ClickHouseの多くの強力な関数を利用する方法を示しています。サンプルデータセットは次のようになります: - - -| ページ | 説明 | -|-----|-----| -| [ニューヨークタクシーデータ](/getting-started/example-datasets/nyc-taxi) | 2009年以降にニューヨーク市から始まる数十億のタクシーおよび有料車両(Uber、Lyftなど)の旅行データ | -| [Criteoのテラバイトクリックログ](/getting-started/example-datasets/criteo) | Criteoからのテラバイトのクリックログ | -| [WikiStat](/getting-started/example-datasets/wikistat) | 0.5兆レコードを含むWikiStatデータセットを探索します。 | -| [TPC-DS (2012)](/getting-started/example-datasets/tpcds) | TPC-DSベンチマークデータセットとクエリ。 | -| [レシピデータセット](/getting-started/example-datasets/recipes) | 220万のレシピを含むRecipeNLGデータセット | -| [COVID-19オープンデータ](/getting-started/example-datasets/covid19) | COVID-19オープンデータは、COVID-19の疫学データと人口統計、経済、政府の対応などの関連要因の大規模でオープンソースのデータベースです | -| [NOAAの世界歴史気候ネットワーク](/getting-started/example-datasets/noaa) | 過去120年間の気候データの25億行 | -| [GitHubイベントデータセット](/getting-started/example-datasets/github-events) | 2011年から2020年12月6日までのGitHub上のすべてのイベントを含むデータセットで、31億レコードのサイズ。 | -| [Amazon顧客レビュー](/getting-started/example-datasets/amazon-reviews) | Amazon製品に関する1.5億以上の顧客レビュー | -| [ブラウン大学ベンチマーク](/getting-started/example-datasets/brown-benchmark) | 機械生成のログデータ用の新しい分析ベンチマーク | -| [GitHubデータを使用したClickHouseでのクエリ作成](/getting-started/example-datasets/github) | ClickHouseリポジトリのすべてのコミットと変更を含むデータセット | -| [ClickHouseを使用したStack Overflowデータの分析](/getting-started/example-datasets/stackoverflow) | ClickHouseを使用してStack Overflowデータを分析します | -| [AMPLabビッグデータベンチマーク](/getting-started/example-datasets/amplab-benchmark) | データウェアハウジングソリューションのパフォーマンスを比較するために使用されるベンチマークデータセット。 | -| [ニューヨーク公共図書館「メニューは何ですか?」データセット](/getting-started/example-datasets/menus) | ホテル、レストラン、カフェのメニューに関する1.3百万レコードの歴史的データを含むデータセット | -| [Laion-400Mデータセット](/getting-started/example-datasets/laion-400m-dataset) | 英語の画像キャプションを持つ4億の画像を含むデータセット | -| [スター・スキーマ・ベンチマーク (SSB, 2009)](/getting-started/example-datasets/star-schema) | スター・スキーマ・ベンチマーク(SSB)データセットとクエリ | -| [英国の不動産価格データセット](/getting-started/example-datasets/uk-price-paid) | 英国の不動産データセットを使用して、頻繁に実行するクエリのパフォーマンスを向上させるためのプロジェクションの使用方法を学びます。このデータセットには、イングランドとウェールズでの不動産の価格に関するデータが含まれています | -| [Redditコメントデータセット](/getting-started/example-datasets/reddit-comments) | 2005年12月から2023年3月までのRedditにおける公開コメントを含むデータセットで、JSON形式で140億行以上のデータがあります | -| [OnTime](/getting-started/example-datasets/ontime) | 航空便の定刻パフォーマンスを含むデータセット | -| [台湾の歴史的気象データセット](/getting-started/example-datasets/tw-weather) | 過去128年間の気象観測データの1.31億行 | -| [OpenSky Network 2020からのクラウドソースされた航空交通データ](/getting-started/example-datasets/opensky) | このデータセットのデータは、COVID-19パンデミック中の航空交通の発展を示すために、完全なOpenSkyデータセットから派生およびクリーニングされています。 | -| [NYPD苦情データ](/getting-started/example-datasets/nypd_complaint_data) | タブ区切り値データを5ステップで取り込み、クエリを実行します | -| [TPC-H (1999)](/getting-started/example-datasets/tpch) | TPC-Hベンチマークデータセットとクエリ。 | -| [Foursquareの場所](/getting-started/example-datasets/foursquare-places) | 地図上の店、レストラン、公園、遊び場、モニュメントに関する情報を含む1億以上のレコードを持つデータセット。 | -| [YouTubeの嫌いデータセット](/getting-started/example-datasets/youtube-dislikes) | YouTube動画の「嫌い」というコレクション。 | -| [セルタワーデータセットを使用した地理データ](/getting-started/example-datasets/cell-towers) | OpenCelliDデータをClickHouseにロードし、Apache SupersetをClickHouseに接続し、データに基づいたダッシュボードを構築する方法を学びます | -| [環境センサーのデータ](/getting-started/example-datasets/environmental-sensors) | Sensor.Communityからの200億以上のデータレコード、貢献者駆動のグローバルセンサーネットワークによるオープン環境データを作成します。 | -| [匿名化されたウェブ分析](/getting-started/example-datasets/metrica) | ヒット数と訪問数を含む匿名化されたウェブ分析データを含む2つのテーブルからなるデータセット | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md.hash deleted file mode 100644 index b9700d62a89..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -a94756431eef4487 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install.md.hash deleted file mode 100644 index f287aae3df0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install.md.hash +++ /dev/null @@ -1 +0,0 @@ -0dc4ed787f9a3c4f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md deleted file mode 100644 index 4701824a230..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -{} ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# ClickHouseをDebian/Ubuntuにインストールする {#install-from-deb-packages} - -> **Debian**または**Ubuntu**には、公式にコンパイルされた`deb`パッケージを使用することを推奨します。 - - - -## Debianリポジトリの設定 {#setup-the-debian-repository} - -ClickHouseをインストールするには、以下のコマンドを実行します。 - -```bash - -# 必要なパッケージをインストール -sudo apt-get install -y apt-transport-https ca-certificates curl gnupg - - -# ClickHouse GPGキーをダウンロードしてキーハンドリングに保存 -curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg - - -# システムアーキテクチャを取得 -ARCH=$(dpkg --print-architecture) - - -# ClickHouseリポジトリをaptソースに追加 -echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg arch=${ARCH}] https://packages.clickhouse.com/deb stable main" | sudo tee /etc/apt/sources.list.d/clickhouse.list - - -# aptパッケージリストを更新 -sudo apt-get update -``` - -- 必要に応じて`stable`を`lts`に置き換えて異なる[リリース種別](/knowledgebase/production)を使用できます。 -- [packages.clickhouse.com](https://packages.clickhouse.com/deb/pool/main/c/)から手動でパッケージをダウンロードしてインストールすることもできます。 -
-
-古いディストリビューションのdebパッケージインストール方法 - -```bash - -# 必要なパッケージをインストール -sudo apt-get install apt-transport-https ca-certificates dirmngr - - -# パッケージを認証するためにClickHouse GPGキーを追加 -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754 - - -# ClickHouseリポジトリをaptソースに追加 -echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \ - /etc/apt/sources.list.d/clickhouse.list - - -# aptパッケージリストを更新 -sudo apt-get update - - -# ClickHouseサーバーおよびクライアントパッケージをインストール -sudo apt-get install -y clickhouse-server clickhouse-client - - -# ClickHouseサーバーサービスを起動 -sudo service clickhouse-server start - - -# ClickHouseコマンドラインクライアントを起動 -clickhouse-client # またはパスワードを設定している場合は "clickhouse-client --password" 。 -``` - -
- -## ClickHouseサーバーおよびクライアントのインストール {#install-clickhouse-server-and-client} - -```bash -sudo apt-get install -y clickhouse-server clickhouse-client -``` - -## ClickHouseを起動する {#start-clickhouse-server} - -ClickHouseサーバーを起動するには、次のコマンドを実行します。 - -```bash -sudo service clickhouse-server start -``` - -ClickHouseクライアントを起動するには、次のコマンドを実行します。 - -```bash -clickhouse-client -``` - -サーバーにパスワードを設定した場合は、次のコマンドを実行する必要があります。 - -```bash -clickhouse-client --password -``` - -## スタンドアロンのClickHouse Keeperをインストールする {#install-standalone-clickhouse-keeper} - -:::tip -本番環境では、ClickHouse Keeperを専用ノードで実行することを強く推奨します。 -テスト環境では、ClickHouse ServerとClickHouse Keeperを同じサーバーで実行する場合、 -ClickHouse KeeperはClickHouseサーバーに含まれているため、別途インストールする必要はありません。 -::: - -スタンドアロンのClickHouse Keeperサーバーに`clickhouse-keeper`をインストールするには、次を実行します。 - -```bash -sudo apt-get install -y clickhouse-keeper -``` - -## ClickHouse Keeperを有効にして起動する {#enable-and-start-clickhouse-keeper} - -```bash -sudo systemctl enable clickhouse-keeper -sudo systemctl start clickhouse-keeper -sudo systemctl status clickhouse-keeper -``` - -
- -## パッケージ {#packages} - -利用可能なさまざまなdebパッケージの詳細は以下の通りです。 - -| パッケージ | 説明 | -|--------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `clickhouse-common-static` | ClickHouseのコンパイルされたバイナリファイルをインストールします。 | -| `clickhouse-server` | `clickhouse-server`のシンボリックリンクを作成し、デフォルトのサーバー構成をインストールします。 | -| `clickhouse-client` | `clickhouse-client`およびその他のクライアント関連ツールのシンボリックリンクを作成し、クライアント構成ファイルをインストールします。 | -| `clickhouse-common-static-dbg` | デバッグ情報付きのClickHouseのコンパイルされたバイナリファイルをインストールします。 | -| `clickhouse-keeper` | 専用のClickHouse KeeperノードにClickHouse Keeperをインストールするために使用します。同じサーバー上でClickHouseサーバーを実行している場合、このパッケージをインストールする必要はありません。ClickHouse KeeperとデフォルトのClickHouse Keeper構成ファイルをインストールします。 | - -
-:::info -特定のバージョンのClickHouseをインストールする場合、すべてのパッケージを同じバージョンでインストールする必要があります: -`sudo apt-get install clickhouse-server=21.8.5.7 clickhouse-client=21.8.5.7 clickhouse-common-static=21.8.5.7` -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md.hash deleted file mode 100644 index 43a7c9f81c1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_deb_install.md.hash +++ /dev/null @@ -1 +0,0 @@ -2566a26d4aff5c78 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md deleted file mode 100644 index f20a5f7026f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -{} ---- - - - - -# ClickHouseをDockerでインストールする - -便利のために、[Docker Hub](https://hub.docker.com/r/clickhouse/clickhouse-server/)のガイドを以下に再現します。利用可能なDockerイメージは、公式のClickHouse debパッケージを使用しています。 - -Docker pullコマンド: - -```bash -docker pull clickhouse/clickhouse-server -``` - -## バージョン {#versions} - -- `latest`タグは、最新の安定ブランチの最新リリースを指します。 -- `22.2`のようなブランチタグは、対応するブランチの最新リリースを指します。 -- `22.2.3`や`22.2.3.5`のようなフルバージョンタブは、対応するリリースを指します。 -- `head`タグは、デフォルトブランチに対する最新のコミットから構築されています。 -- 各タグには、`-alpine`というオプションのサフィックスがあり、これは`alpine`の上に構築されていることを示します。 - -### 互換性 {#compatibility} - -- amd64イメージは、[SSE3命令](https://en.wikipedia.org/wiki/SSE3)のサポートを必要とします。2005年以降のほぼすべてのx86 CPUはSSE3をサポートしています。 -- arm64イメージは、[ARMv8.2-Aアーキテクチャ](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A)のサポートを必要とし、さらにLoad-Acquire RCpcレジスタを必要とします。このレジスタはARMv8.2-Aバージョンではオプションであり、[ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A)では必須です。Graviton >=2、Azure、およびGCPインスタンスでサポートされています。サポートされていないデバイスの例には、Raspberry Pi 4 (ARMv8.0-A)やJetson AGX Xavier/Orin (ARMv8.2-A)があります。 -- ClickHouse 24.11以降、Ubuntuイメージは`ubuntu:22.04`をベースイメージとして使用し始めました。これは、[パッチ](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468)を含むdockerバージョン>= `20.10.10`を必要とします。回避策として、`docker run --security-opt seccomp=unconfined`を使用できますが、セキュリティ上の影響があります。 - -## このイメージの使い方 {#how-to-use-image} - -### サーバーインスタンスの起動 {#start-server-instance} - -```bash -docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server -``` - -デフォルトでは、ClickHouseはDockerネットワーク経由でのみアクセス可能です。以下のネットワーキングセクションを参照してください。 - -デフォルトでは、上記のサーバーインスタンスは、パスワードなしで`default`ユーザーとして実行されます。 - -### ネイティブクライアントからの接続 {#connect-to-it-from-native-client} - -```bash -docker run -it --rm --network=container:some-clickhouse-server --entrypoint clickhouse-client clickhouse/clickhouse-server - -# または -docker exec -it some-clickhouse-server clickhouse-client -``` - -ClickHouseクライアントに関する詳細情報は、[ClickHouseクライアント](/interfaces/cli)を参照してください。 - -### curlを使用して接続 {#connect-to-it-using-curl} - -```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --network=container:some-clickhouse-server buildpack-deps:curl curl 'http://localhost:8123/?query=' -s --data-binary @- -``` - -HTTPインターフェイスに関する詳細情報は、[ClickHouse HTTPインターフェイス](/interfaces/http)を参照してください。 - -### コンテナの停止 / 削除 {#stopping-removing-container} - -```bash -docker stop some-clickhouse-server -docker rm some-clickhouse-server -``` - -### ネットワーキング {#networking} - -:::note -あらかじめ定義されたユーザー`default`は、パスワードが設定されていない限りネットワークアクセスを持ちません。 -以下の「デフォルトデータベースとユーザーの作成方法」および「`default`ユーザーの管理」を参照してください。 -::: - -Dockerで実行しているClickHouseを公開するには、ホストポートを使用してコンテナ内部の特定のポートを[マッピング](https://docs.docker.com/config/containers/container-networking/)します。 - -```bash -docker run -d -p 18123:8123 -p19000:9000 -e CLICKHOUSE_PASSWORD=changeme --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server -echo 'SELECT version()' | curl 'http://localhost:18123/?password=changeme' --data-binary @- -``` - -または、コンテナが[ホストポートを直接使用する](https://docs.docker.com/network/host/)ことを許可し、`--network=host`を使用します(これによりネットワークパフォーマンスが向上します): - -```bash -docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server -echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @- -``` - -:::note -上記の例のデフォルトユーザーは、ローカルホストのリクエストのみに使用可能です。 -::: - -### ボリューム {#volumes} - -通常、永続性を達成するために、以下のフォルダーをコンテナ内にマウントすることをお勧めします: - -- `/var/lib/clickhouse/` - ClickHouseがデータを格納するメインフォルダー -- `/var/log/clickhouse-server/` - ログ - -```bash -docker run -d \ - -v "$PWD/ch_data:/var/lib/clickhouse/" \ - -v "$PWD/ch_logs:/var/log/clickhouse-server/" \ - --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server -``` - -また、次のものをマウントすることを考慮するかもしれません: - -- `/etc/clickhouse-server/config.d/*.xml` - サーバー設定の調整ファイル -- `/etc/clickhouse-server/users.d/*.xml` - ユーザー設定の調整ファイル -- `/docker-entrypoint-initdb.d/` - データベース初期化スクリプトのフォルダー(下記参照)。 - -## Linuxの機能 {#linear-capabilities} - -ClickHouseには、いくつかの[Linux機能](https://man7.org/linux/man-pages/man7/capabilities.7.html)を有効にする必要がある高度な機能があります。 - -これらはオプションであり、次の[dockerコマンドライン引数](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities)を使用して有効にできます: - -```bash -docker run -d \ - --cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \ - --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server -``` - -詳細については、["DockerでのCAP_IPC_LOCKおよびCAP_SYS_NICE機能の設定"](/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker)を参照してください。 - -## 設定 {#configuration} - -コンテナは、[HTTPインターフェイス](https://clickhouse.com/docs/interfaces/http_interface/)用にポート8123を、[ネイティブクライアント](https://clickhouse.com/docs/interfaces/tcp/)用にポート9000を公開しています。 - -ClickHouseの設定は、"config.xml"というファイルで表されます([ドキュメンテーション](https://clickhouse.com/docs/operations/configuration_files/))。 - -### カスタム設定でサーバーインスタンスを起動する {#start-server-instance-with-custom-config} - -```bash -docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml clickhouse/clickhouse-server -``` - -### カスタムユーザーとしてサーバーを起動する {#start-server-custom-user} - -```bash - -# $PWD/data/clickhouseが存在し、現在のユーザーが所有している必要があります -docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server -``` - -ローカルディレクトリをマウントしたイメージを使用する場合、適切なファイル所有権を維持するためにユーザーを指定する必要があるでしょう。`--user`引数を使用して、コンテナ内で`/var/lib/clickhouse`と`/var/log/clickhouse-server`をマウントします。さもなければ、イメージがエラーを出して起動しません。 - -### rootからサーバーを起動する {#start-server-from-root} - -ルートからサーバーを起動することは、ユーザー名前空間が有効な場合に便利です。 -そのために次のように実行します: - -```bash -docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server -``` - -### スタート時にデフォルトデータベースとユーザーを作成する方法 {#how-to-create-default-db-and-user} - -コンテナの起動時に、ユーザー(デフォルトでは`default`という名前のユーザー)がデータベースを作成したい場合があります。環境変数`CLICKHOUSE_DB`、`CLICKHOUSE_USER`、`CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT`、および`CLICKHOUSE_PASSWORD`を使用して行うことができます: - -```bash -docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp clickhouse/clickhouse-server -``` - -#### `default`ユーザーの管理 {#managing-default-user} - -ユーザー`default`は、`CLICKHOUSE_USER`、`CLICKHOUSE_PASSWORD`、または`CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT`が設定されていない場合、デフォルトでネットワークアクセスが無効になっています。 - -環境変数`CLICKHOUSE_SKIP_USER_SETUP`を1に設定することで、`default`ユーザーを安全でなく利用可能にする方法もあります: - -```bash -docker run --rm -e CLICKHOUSE_SKIP_USER_SETUP=1 -p 9000:9000/tcp clickhouse/clickhouse-server -``` - -## このイメージを拡張する方法 {#how-to-extend-image} - -このイメージから派生したイメージで追加の初期化を行うには、`/docker-entrypoint-initdb.d`の下に1つ以上の`*.sql`、`*.sql.gz`、または`*.sh`スクリプトを追加します。エントリポイントが`initdb`を呼び出すと、そのディレクトリにある`*.sql`ファイルが実行され、実行可能な`*.sh`スクリプトが実行され、非実行可能な`*.sh`スクリプトがソースされて、サービスが開始される前に更なる初期化が行われます。 -また、初期化中にclickhouse-clientに使用される環境変数`CLICKHOUSE_USER`と`CLICKHOUSE_PASSWORD`を提供できます。 - -例えば、別のユーザーとデータベースを追加するには、`/docker-entrypoint-initdb.d/init-db.sh`に以下を追加します: - -```bash -#!/bin/bash -set -e - -clickhouse client -n <<-EOSQL - CREATE DATABASE docker; - CREATE TABLE docker.docker (x Int32) ENGINE = Log; -EOSQL -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md.hash deleted file mode 100644 index e50347c17a1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_docker.md.hash +++ /dev/null @@ -1 +0,0 @@ -5cd8e0b435460a63 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md deleted file mode 100644 index 61d41536039..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -{} ---- - - - - -# ClickHouseのtgzアーカイブを使用したインストール - -> `deb` または `rpm` パッケージのインストールが不可能なすべてのLinuxディストリビューションに対して、公式の事前コンパイルされた `tgz` アーカイブを使用することをお勧めします。 - - - -## 最新の安定版をダウンロードしてインストールする {#install-latest-stable} - -必要なバージョンは、https://packages.clickhouse.com/tgz/ から `curl` または `wget` を使用してダウンロードできます。 -その後、ダウンロードしたアーカイブを解凍し、インストールスクリプトを使用してインストールする必要があります。 - -以下は、最新の安定版をインストールする方法の例です。 - -:::note -本番環境では、最新の `stable` バージョンを使用することをお勧めします。 -リリース番号は、この [GitHubページ](https://github.com/ClickHouse/ClickHouse/tags) で -`-stable` の接尾辞を持つものを見つけることができます。 -::: - -## 最新のClickHouseバージョンを取得する {#get-latest-version} - -GitHubから最新のClickHouseバージョンを取得し、`LATEST_VERSION` 変数に格納します。 - -```bash -LATEST_VERSION=$(curl -s https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv | \ - grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1) -export LATEST_VERSION -``` - -## システムアーキテクチャの検出 {#detect-system-architecture} - -システムアーキテクチャを検出し、ARCH変数をそれに応じて設定します。 - -```bash -case $(uname -m) in - x86_64) ARCH=amd64 ;; # Intel/AMD 64ビットプロセッサ用 - aarch64) ARCH=arm64 ;; # ARM 64ビットプロセッサ用 - *) echo "Unknown architecture $(uname -m)"; exit 1 ;; # サポートされていないアーキテクチャの場合は終了 -esac -``` - -## 各ClickHouseコンポーネントのtarボールをダウンロード {#download-tarballs} - -各ClickHouseコンポーネントのtarボールをダウンロードします。ループは先にアーキテクチャ固有の -パッケージを試し、それが失敗した場合は一般的なものにフォールバックします。 - -```bash -for PKG in clickhouse-common-static clickhouse-common-static-dbg clickhouse-server clickhouse-client clickhouse-keeper -do - curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION-${ARCH}.tgz" \ - || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" -done -``` - -## パッケージの抽出とインストール {#extract-and-install} - -以下のコマンドを実行して、次のパッケージを抽出してインストールします: -- `clickhouse-common-static` - -```bash - -# clickhouse-common-staticパッケージを抽出してインストール -tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ - || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" -sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" -``` - - -- `clickhouse-common-static-dbg` - -```bash - -# デバッグシンボルパッケージを抽出してインストール -tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION-${ARCH}.tgz" \ - || tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION.tgz" -sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh" -``` - -- `clickhouse-server` - -```bash - -# 設定付きのサーバーパッケージを抽出してインストール -tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ - || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" -sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure -sudo /etc/init.d/clickhouse-server start # サーバーを起動 -``` - -- `clickhouse-client` - -```bash - -# クライアントパッケージを抽出してインストール -tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ - || tar -xzvf "clickhouse-client-$LATEST_VERSION.tgz" -sudo "clickhouse-client-$LATEST_VERSION/install/doinst.sh" -``` - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md.hash deleted file mode 100644 index 1e9d4967d15..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_linux_tar_install.md.hash +++ /dev/null @@ -1 +0,0 @@ -ce88623104cf637c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md deleted file mode 100644 index f406fa73a86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -{} ---- - -import Image from "@theme/IdealImage"; -import dev_error from "@site/static/images/knowledgebase/fix-the-developer-verification-error-in-macos/dev-verification-error.png"; -import privacy_default from "@site/static/images/knowledgebase/fix-the-developer-verification-error-in-macos/privacy-and-security-default-view.png"; -import privacy_allow from "@site/static/images/knowledgebase/fix-the-developer-verification-error-in-macos/privacy-and-security-screen-allow-anyway.png"; - - -# ClickHouseをHomebrewを使用してインストールする - - - -## コミュニティのHomebrewフォーミュラを使用してインストールする {#install-using-community-homebrew-formula} - -[Homebrew](https://brew.sh/)を使用してmacOSにClickHouseをインストールするには、ClickHouseコミュニティの[homebrewフォーミュラ](https://formulae.brew.sh/cask/clickhouse)を使用できます。 - -```bash -brew install --cask clickhouse -``` - -## MacOSでの開発者検証エラーを修正する {#fix-developer-verification-error-macos} - -`brew`を使用してClickHouseをインストールすると、MacOSからエラーが表示されることがあります。デフォルトでは、MacOSは検証できない開発者によって作成されたアプリケーションやツールを実行しません。 - -任意の`clickhouse`コマンドを実行しようとすると、次のエラーが表示されることがあります: - -MacOSの開発者検証エラーのダイアログ - -この検証エラーを回避するには、以下の方法でMacOSの隔離ビンからアプリを削除する必要があります。システム設定ウィンドウ内の適切な設定を見つけるか、ターミナルを使用するか、ClickHouseを再インストールする方法があります。 - -### システム設定のプロセス {#system-settings-process} - -`clickhouse`実行ファイルを隔離ビンから削除する最も簡単な方法は以下の通りです: - -1. **システム設定**を開きます。 -1. **プライバシーとセキュリティ**に移動します: - - MacOSのプライバシーとセキュリティ設定のデフォルトビュー - -1. ウィンドウの下部までスクロールして、_「clickhouse-macos-aarch64」が未確認の開発者からのものであるため、使用がブロックされました_というメッセージを見つけます。 -1. **許可する**をクリックします。 - - MacOSのプライバシーとセキュリティ設定に「許可する」ボタンが表示されている - -1. MacOSユーザーパスワードを入力します。 - -これでターミナルで`clickhouse`コマンドを実行できるようになるはずです。 - -### ターミナルプロセス {#terminal-process} - -場合によっては、`許可する`ボタンを押してもこの問題が解決しないことがあります。その場合は、コマンドラインを使用してこのプロセスを実行することもできます。また、コマンドラインの使用を好むかもしれません! - -まず、Homebrewが`clickhouse`実行ファイルをインストールした場所を確認します: - -```shell -which clickhouse -``` - -これにより、次のような出力が得られます: - -```shell -/opt/homebrew/bin/clickhouse -``` - -次のコマンドで`xattr -d com.apple.quarantine`を実行し、前のコマンドのパスを続けて入力して、`clickhouse`を隔離ビンから削除します: - -```shell -xattr -d com.apple.quarantine /opt/homebrew/bin/clickhouse -``` - -これで`clickhouse`実行ファイルを実行できるようになります: - -```shell -clickhouse -``` - -これにより、次のような出力が得られます: - -```bash -次のコマンドのいずれかを使用してください: -clickhouse local [args] -clickhouse client [args] -clickhouse benchmark [args] -... - -## ClickHouseを再インストールして問題を修正する {#fix-issue} - -Brewには、インストールされたバイナリの隔離を避けるためのコマンドラインオプションがあります。 - -まず、ClickHouseをアンインストールします: - -```shell -brew uninstall clickhouse -``` - -次に、`--no-quarantine`オプションを使用してClickHouseを再インストールします: - -```shell -brew install --no-quarantine clickhouse -``` - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md.hash deleted file mode 100644 index 02a2a0a8ee1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_macos.md.hash +++ /dev/null @@ -1 +0,0 @@ -3b6e4a216dd3db85 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md deleted file mode 100644 index 71a8dbba4ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{} ---- - - - - -# ClickHouseのインストールスクリプトをcurlを使用して実行する - -本番環境でClickHouseをインストールする必要がない場合、最も迅速な方法は、curlを使用してインストールスクリプトを実行することです。このスクリプトは、あなたのOSに適したバイナリを判断します。 - - - -## curlを使用してClickHouseをインストールする {#install-clickhouse-using-curl} - -次のコマンドを実行して、あなたのオペレーティングシステム用の単一のバイナリをダウンロードします。 - -```bash -curl https://clickhouse.com/ | sh -``` - -:::note -Macユーザーへ: バイナリの開発者が確認できないというエラーが表示される場合は、[こちら](/knowledgebase/fix-developer-verification-error-in-macos)を参照してください。 -::: - -## clickhouse-localを起動する {#start-clickhouse-local} - -`clickhouse-local`を使用すると、ClickHouseの強力なSQL構文を使用してローカルおよびリモートファイルを処理できます。設定を必要とせずに使用できます。テーブルデータは一時的な場所に保存されるため、`clickhouse-local`を再起動した後は、以前に作成したテーブルは利用できなくなります。 - -以下のコマンドを実行して[clickhouse-local](/operations/utilities/clickhouse-local)を起動します: - -```bash -./clickhouse -``` - -## clickhouse-serverを起動する {#start-clickhouse-server} - -データを永続化したい場合は、`clickhouse-server`を実行します。以下のコマンドを使用してClickHouseサーバーを起動できます: - -```bash -./clickhouse server -``` - -## clickhouse-clientを起動する {#start-clickhouse-client} - -サーバーが稼働している状態で、新しいターミナルウィンドウを開き、以下のコマンドを実行して`clickhouse-client`を起動します: - -```bash -./clickhouse client -``` - -次のような表示がされます: - -```response -./clickhouse client -ClickHouse client version 24.5.1.117 (official build). -Connecting to localhost:9000 as user default. -Connected to ClickHouse server version 24.5.1. - -local-host :) -``` - -テーブルデータは現在のディレクトリに保存されており、ClickHouseサーバーを再起動後も利用可能です。必要に応じて、`./clickhouse server`に`-C config.xml`を追加のコマンドライン引数として渡し、設定ファイルでさらなる設定を提供することができます。すべての利用可能な設定は[こちら](/operations/server-configuration-parameters/settings)に文書化されており、[例の設定ファイルテンプレート](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml)にも記載されています。 - -これで、SQLコマンドをClickHouseに送信する準備が整いました! - -:::tip -[クイックスタート](/quick-start.mdx)では、テーブルの作成とデータの挿入に関する手順を説明しています。 -::: - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md.hash deleted file mode 100644 index 6bf44c62680..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_quick_install.md.hash +++ /dev/null @@ -1 +0,0 @@ -d4d61480dfb324cc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md deleted file mode 100644 index 87d9713b548..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -{} ---- - - - - -# ClickHouseをrpmベースのディストリビューションにインストールする {#from-rpm-packages} - -> **CentOS**、**RedHat**、および他のすべてのrpmベースのLinuxディストリビューションには、公式の事前コンパイル済み `rpm` パッケージを使用することをお勧めします。 - - - -## RPMリポジトリをセットアップする {#setup-the-rpm-repository} - -次のコマンドを実行して公式リポジトリを追加します。 - -```bash -sudo yum install -y yum-utils -sudo yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo -``` - -`zypper` パッケージマネージャーを使用しているシステム(openSUSE、SLES)の場合は、次のコマンドを実行します。 - -```bash -sudo zypper addrepo -r https://packages.clickhouse.com/rpm/clickhouse.repo -g -sudo zypper --gpg-auto-import-keys refresh clickhouse-stable -``` - -以下のステップでは、使用しているパッケージマネージャーに応じて、`yum install` を `zypper install` に置き換えることができます。 - -## ClickHouseサーバーとクライアントをインストールする {#install-clickhouse-server-and-client-1} - -ClickHouseをインストールするには、次のコマンドを実行します。 - -```bash -sudo yum install -y clickhouse-server clickhouse-client -``` - -- 必要に応じて、`stable` を `lts` に置き換えて、異なる [リリースタイプ](/knowledgebase/production) を使用することができます。 -- [packages.clickhouse.com/rpm](https://packages.clickhouse.com/rpm/stable) から手動でパッケージをダウンロードしてインストールすることができます。 -- 特定のバージョンを指定するには、パッケージ名の末尾に `-$version` を追加します。例: - -```bash -sudo yum install clickhouse-server-22.8.7.34 -``` - -## ClickHouseサーバーを起動する {#start-clickhouse-server-1} - -ClickHouseサーバーを起動するには、次のコマンドを実行します。 - -```bash -sudo systemctl enable clickhouse-server -sudo systemctl start clickhouse-server -sudo systemctl status clickhouse-server -``` - -ClickHouseクライアントを起動するには、次のコマンドを実行します。 - -```sql -clickhouse-client -``` - -サーバーのパスワードを設定した場合は、次のコマンドを実行する必要があります。 - -```bash -clickhouse-client --password -``` - -## スタンドアロンのClickHouse Keeperをインストールする {#install-standalone-clickhouse-keeper-1} - -:::tip -本番環境では、ClickHouse Keeperを専用ノードで実行することを強くお勧めします。テスト環境では、ClickHouseサーバーとClickHouse Keeperを同一サーバーで実行する場合、ClickHouse KeeperはClickHouseサーバーに含まれているため、インストールする必要はありません。 -::: - -スタンドアロンのClickHouse Keeperサーバーに `clickhouse-keeper` をインストールするには、次のコマンドを実行します。 - -```bash -sudo yum install -y clickhouse-keeper -``` - -## ClickHouse Keeperを有効にして起動する {#enable-and-start-clickhouse-keeper-1} - -```bash -sudo systemctl enable clickhouse-keeper -sudo systemctl start clickhouse-keeper -sudo systemctl status clickhouse-keeper -``` - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md.hash deleted file mode 100644 index bd15d3fe1e0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_rpm_install.md.hash +++ /dev/null @@ -1 +0,0 @@ -a948be50ae4217da diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md deleted file mode 100644 index c85bd29d91e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{} ---- - - - - -# WindowsにWSLでClickHouseをインストールする - -## 要件 {#requirements} - -:::note -WindowsにClickHouseをインストールするには、WSL (Windows Subsystem for Linux) が必要です。 -::: - - - -## WSLをインストールする {#install-wsl} - -管理者としてWindows PowerShellを開き、次のコマンドを実行します: - -```bash -wsl --install -``` - -新しいUNIXユーザー名とパスワードの入力を求められます。希望のユーザー名とパスワードを入力すると、次のようなメッセージが表示されます: - -```bash -Welcome to Ubuntu 24.04.1 LTS (GNU/Linux 5.15.133.1-microsoft-WSL2 x86_64) -``` - -## curlを使用したスクリプトでClickHouseをインストールする {#install-clickhouse-via-script-using-curl} - -次のコマンドを実行して、curlを使用してスクリプトでClickHouseをインストールします: - -```bash -curl https://clickhouse.com/ | sh -``` - -スクリプトが正常に実行されると、次のメッセージが表示されます: - -```bash -Successfully downloaded the ClickHouse binary, you can run it as: - ./clickhouse -``` - -## clickhouse-localを起動する {#start-clickhouse-local} - -`clickhouse-local`を使用すると、ClickHouseの強力なSQL構文を使用してローカルおよびリモートファイルを処理でき、設定の必要がありません。テーブルデータは一時的な場所に保存されるため、`clickhouse-local`を再起動すると、以前に作成したテーブルは利用できなくなります。 - -次のコマンドを実行して[clickhouse-local](/operations/utilities/clickhouse-local)を起動します: - -```bash -./clickhouse -``` - -## clickhouse-serverを起動する {#start-clickhouse-server} - -データを永続化したい場合は、`clickhouse-server`を実行する必要があります。次のコマンドを使用してClickHouseサーバーを起動できます: - -```bash -./clickhouse server -``` - -## clickhouse-clientを起動する {#start-clickhouse-client} - -サーバーが動作している状態で、新しいターミナルウィンドウを開き、次のコマンドを実行して`clickhouse-client`を起動します: - -```bash -./clickhouse client -``` - -次のような出力が表示されます: - -```response -./clickhouse client -ClickHouse client version 24.5.1.117 (official build). -Connecting to localhost:9000 as user default. -Connected to ClickHouse server version 24.5.1. - -local-host :) -``` - -テーブルデータは現在のディレクトリに保存され、ClickHouseサーバーの再起動後も利用可能です。必要に応じて、`./clickhouse server`に追加のコマンドライン引数`-C config.xml`を渡し、設定ファイルでさらに構成を提供できます。利用可能なすべての設定項目は、[こちら](/operations/server-configuration-parameters/settings)および[例の設定ファイルテンプレート](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml)で文書化されています。 - -SQLコマンドをClickHouseに送信する準備が整いました! - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md.hash deleted file mode 100644 index 004016f3eba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/_snippets/_windows_install.md.hash +++ /dev/null @@ -1 +0,0 @@ -00de08e6351e0681 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md deleted file mode 100644 index 77766f2c4b3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: 'ソースから ClickHouse をコンパイルする方法や CI で生成されたバイナリをインストールする手順' -keywords: -- 'ClickHouse' -- 'install' -- 'advanced' -- 'compile from source' -- 'CI generated binary' -sidebar_label: '高度なインストール' -slug: '/install/advanced' -title: '高度なインストール方法' -hide_title: false ---- - - - -## ソースからコンパイルする {#compile-from-source} - -ClickHouseを手動でコンパイルするには、[Linux](/development/build.md)または[macOS](/development/build-osx.md)の手順に従ってください。 - -パッケージをコンパイルしてインストールするか、パッケージをインストールせずにプログラムを使用できます。 - -```xml -Client: /programs/clickhouse-client -Server: /programs/clickhouse-server -``` - -データおよびメタデータフォルダは手動で作成する必要があり、所定のユーザーに対して`chown`する必要があります。これらのパスはサーバー設定(src/programs/server/config.xml)で変更できますが、デフォルトでは次のようになります。 - -```bash -/var/lib/clickhouse/data/default/ -/var/lib/clickhouse/metadata/default/ -``` - -Gentooでは、`emerge clickhouse`を使用してソースからClickHouseをインストールできます。 - -## CI生成バイナリのインストール {#install-a-ci-generated-binary} - -ClickHouseの継続的インテグレーション(CI)インフラストラクチャは、[ClickHouseリポジトリ](https://github.com/clickhouse/clickhouse/)の各コミットに対して特別なビルドを生成します。例として、[sanitized](https://github.com/google/sanitizers)ビルド、最適化されていない(Debug)ビルド、クロスコンパイルされたビルドなどがあります。このようなビルドは通常、開発中にのみ有用ですが、特定の状況ではユーザーにも興味深い場合があります。 - -:::note -ClickHouseのCIは進化しているため、CI生成ビルドのダウンロード手順は変更される可能性があります。また、CIは古すぎるビルドアーティファクトを削除することがあるため、ダウンロードできなくなる場合があります。 -::: - -例えば、ClickHouse v23.4のaarch64バイナリをダウンロードするには、次の手順に従ってください。 - -- リリースv23.4のGitHubプルリクエストを見つける: [リリースプルリクエスト(ブランチ23.4)](https://github.com/ClickHouse/ClickHouse/pull/49238) -- 「Commits」をクリックし、インストールしたい特定のバージョンの「Update autogenerated version to 23.4.2.1 and contributors」に類似したコミットをクリックします。 -- CIチェックのリストを開くために緑のチェックマーク / 黄色の点 / 赤いバツをクリックします。 -- リスト内の「Builds」の横にある「Details」をクリックします。これにより、[このページ](https://s3.amazonaws.com/clickhouse-test-reports/46793/b460eb70bf29b19eadd19a1f959b15d186705394/clickhouse_build_check/report.html)のようなページが開きます。 -- "compiler = 'clang-*-aarch64'" の行を見つけます - 複数の行があります。 -- これらのビルドに対するアーティファクトをダウンロードします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md.hash deleted file mode 100644 index 17b737ec9fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/advanced.md.hash +++ /dev/null @@ -1 +0,0 @@ -af25af033e5c76f8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md deleted file mode 100644 index 8c5b279e6f0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: 'Debian/Ubuntu Linux に ClickHouse をインストールします' -keywords: -- 'ClickHouse' -- 'install' -- 'Debian' -- 'Ubuntu' -- 'deb' -sidebar_label: 'Debian/Ubuntu' -slug: '/install/debian_ubuntu' -title: 'Debian/Ubuntu で ClickHouse をインストール' -hide_title: true ---- - -import DebianProd from './_snippets/_deb_install.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md.hash deleted file mode 100644 index be7d7541fb2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/debian_ubuntu.md.hash +++ /dev/null @@ -1 +0,0 @@ -0df8c4dffb04b138 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md deleted file mode 100644 index 1152df7266d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'Debian/Ubuntu LinuxにClickHouseをインストールする' -keywords: -- 'ClickHouse' -- 'install' -- 'Docker' -sidebar_label: 'Docker' -slug: '/install/docker' -title: 'ClickHouseをDockerを使用してインストールする' -hide_title: true ---- - -import Docker from './_snippets/_docker.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md.hash deleted file mode 100644 index 5913ffa42ad..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/docker.md.hash +++ /dev/null @@ -1 +0,0 @@ -06d7e6938b4a79ec diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx deleted file mode 100644 index 969471cb1e4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -'description': 'ClickHouseをインストールする' -'keywords': -- 'clickhouse' -- 'install' -- 'getting started' -- 'quick start' -'sidebar_label': 'インストール' -'slug': '/install' -'title': 'ClickHouseをインストール' ---- - -import InstallSelector from '@site/src/components/Install/Install' -import Windows from './_snippets/_windows_install.md' -import TarProd from './_snippets/_linux_tar_install.md' -import QuickInstall from './_snippets/_quick_install.md' -import DebianProd from './_snippets/_deb_install.md' -import RPMProd from './_snippets/_rpm_install.md' -import MacOSProd from './_snippets/_macos.md' -import Docker from './_snippets/_docker.md' -import {CardPrimary} from '@clickhouse/click-ui/bundled'; - - -# インストール手順 - - -
- -また、以下からプラットフォーム、ディストリビューション、およびインストール方法を選択して、オープンソースのClickHouseのインストール手順を表示します。 - -} - quickinstall={} - debian_prod={} - rpm_prod={} - tar_prod={} - macos_prod={} - docker={} -/> diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx.hash deleted file mode 100644 index 3e3f943468f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/install.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -5aaf8460e580e424 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md deleted file mode 100644 index 993fee614a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'Install ClickHouse on MacOS' -keywords: -- 'ClickHouse' -- 'install' -- 'MacOS' -sidebar_label: 'MacOS' -slug: '/install/macOS' -title: 'Install ClickHouse using Homebrew' -hide_title: true ---- - -import MacOSProd from './_snippets/_macos.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md.hash deleted file mode 100644 index 99a7f82c2fc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/macos.md.hash +++ /dev/null @@ -1 +0,0 @@ -0a1dc00dd37f7819 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md deleted file mode 100644 index 5ae6b551196..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: 'MacOSにClickHouseをインストールする' -keywords: -- 'ClickHouse' -- 'install' -- 'Linux' -- 'tar' -sidebar_label: 'その他のLinux' -slug: '/install/linux_other' -title: 'tgzアーカイブを使用してClickHouseをインストールする' -hide_title: true ---- - -import Tar from './_snippets/_linux_tar_install.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md.hash deleted file mode 100644 index 360080b83a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/other_linux.md.hash +++ /dev/null @@ -1 +0,0 @@ -fc409de0c18bc080 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md deleted file mode 100644 index fe837627b39..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: 'curl を使用して任意のプラットフォームに ClickHouse をインストールします' -keywords: -- 'ClickHouse' -- 'install' -- 'quick' -- 'curl' -sidebar_label: 'クイックインストール' -slug: '/install/quick-install-curl' -title: 'Install ClickHouse via script using curl' -hide_title: true ---- - -import QuickInstall from './_snippets/_quick_install.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md.hash deleted file mode 100644 index 6fcfd4c00f3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/quick-install-curl.md.hash +++ /dev/null @@ -1 +0,0 @@ -2d2ce2f22a556da5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md deleted file mode 100644 index e120b35e3d5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: 'Redhat/CentOS LinuxにClickHouseをインストールする' -keywords: -- 'ClickHouse' -- 'install' -- 'Redhat' -- 'CentOS' -- 'rpm' -sidebar_label: 'Redhat/CentOS' -slug: '/install/redhat' -title: 'rpmベースのLinuxディストリビューションにClickHouseをインストールする' -hide_title: true ---- - -import RPM from './_snippets/_rpm_install.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md.hash deleted file mode 100644 index 9d465d423a1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/redhat.md.hash +++ /dev/null @@ -1 +0,0 @@ -5622fa1569f522bd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md deleted file mode 100644 index efd59ea4f76..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: 'Install ClickHouse on Windows with WSL' -keywords: -- 'ClickHouse' -- 'install' -- 'Redhat' -- 'rpm' -sidebar_label: 'Windows' -slug: '/install/windows' -title: 'Install ClickHouse on Windows with WSL' -hide_title: true ---- - -import Windows from './_snippets/_windows_install.md' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md.hash deleted file mode 100644 index d6f59b64e08..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/install/windows.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb3ad178ec7d45e2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md deleted file mode 100644 index 463ed6be811..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -description: 'The ClickHouse Playground allows people to experiment with ClickHouse - by running queries instantly, without setting up their server or cluster.' -keywords: -- 'clickhouse' -- 'playground' -- 'getting' -- 'started' -- 'docs' -sidebar_label: 'ClickHouse Playground' -slug: '/getting-started/playground' -title: 'ClickHouse Playground' ---- - - - - -# ClickHouse Playground - -[ClickHouse Playground](https://sql.clickhouse.com) は、サーバーやクラスタをセットアップすることなく、クエリを即座に実行することで ClickHouse を試すことができる環境です。Playground にはいくつかのサンプルデータセットが用意されています。 - -任意の HTTP クライアントを使用して Playground にクエリを送信できます。例えば、[curl](https://curl.haxx.se) や [wget](https://www.gnu.org/software/wget/) を使用するか、[JDBC](../interfaces/jdbc.md) または [ODBC](../interfaces/odbc.md) ドライバーを使用して接続を設定できます。ClickHouse をサポートするソフトウェア製品に関する詳細情報は、[こちら](../integrations/index.mdx)で入手できます。 - -## Credentials {#credentials} - -| パラメータ | 値 | -|:--------------------|:-----------------------------------| -| HTTPS エンドポイント | `https://play.clickhouse.com:443/` | -| Native TCP エンドポイント | `play.clickhouse.com:9440` | -| ユーザー | `explorer` または `play` | -| パスワード | (空) | - -## Limitations {#limitations} - -クエリは読み取り専用ユーザーとして実行されます。これにはいくつかの制限が含まれます: - -- DDL クエリは許可されていません -- INSERT クエリは許可されていません - -このサービスには使用量に制限もあります。 - -## Examples {#examples} - -HTTPS エンドポイントの例(`curl` 使用): - -```bash -curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'" -``` - -TCP エンドポイントの例([CLI](../interfaces/cli.md) 使用): - -```bash -clickhouse client --secure --host play.clickhouse.com --user explorer -``` - -## Playground specifications {#specifications} - -ClickHouse Playground は以下の仕様で運営されています: - -- 米国中央地域(US-Central-1)の Google Cloud (GCE) 上でホストされています -- 3 レプリカのセットアップ -- 各 256 GiB のストレージと 59 の仮想 CPU あり。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md.hash deleted file mode 100644 index 3b0f6f98240..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/getting-started/playground.md.hash +++ /dev/null @@ -1 +0,0 @@ -1ae6338cec94b038 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/_category_.yml deleted file mode 100644 index 5c42e1659b1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 10 -label: 'User Guides' -collapsible: true -collapsed: true -link: - type: generated-index - title: User Guides \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/_category_.yml deleted file mode 100644 index c2f610eedfb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: 'Best Practices' -collapsible: true -collapsed: true -link: - type: generated-index - title: Best Practices - slug: /optimize diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md deleted file mode 100644 index e73215d8171..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/asynchronous-inserts' -sidebar_label: '非同期挿入' -title: '非同期挿入 (async_insert)' -description: 'バッチ処理データの代替手段として非同期挿入を使用します。' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md.hash deleted file mode 100644 index b85503d296e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/asyncinserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -5ab546f57c716696 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md deleted file mode 100644 index bb9a5dfbc65..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/avoid-mutations' -sidebar_label: '変更を避ける' -title: '変更を避ける' -description: '変更とは、テーブルデータを操作するALTERクエリを指します' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_mutations.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md.hash deleted file mode 100644 index b5e00c68f4b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidmutations.md.hash +++ /dev/null @@ -1 +0,0 @@ -bbd22eaa83df58a8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md deleted file mode 100644 index c38348ff2d2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/avoid-nullable-columns' -sidebar_label: 'Nullableカラムの回避' -title: 'Nullableカラムの回避' -description: 'ClickHouseにおいてなぜNullableカラムを避けるべきか' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_nullable_columns.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md.hash deleted file mode 100644 index f253e6ae767..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidnullablecolumns.md.hash +++ /dev/null @@ -1 +0,0 @@ -1a8cf67ce9470fb0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md deleted file mode 100644 index 4d397982132..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/avoidoptimizefinal' -sidebar_label: 'Optimize Finalを避ける' -title: 'Optimize Finalを避ける' -description: 'OPTIMIZE TABLE ... FINALクエリを使用すると、データパーツの予定外のマージが開始されます。' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_avoid_optimize_final.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md.hash deleted file mode 100644 index 2e38d207e1b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/avoidoptimizefinal.md.hash +++ /dev/null @@ -1 +0,0 @@ -425e70d4259aabb4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md deleted file mode 100644 index 4424fadaf6e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/bulk-inserts' -sidebar_label: '一括挿入' -title: '一括挿入' -description: 'データ量が多い挿入を少なくすることで、必要なライティング数を減らすことができます。' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_bulk_inserts.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md.hash deleted file mode 100644 index 4855e1f0fe3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/bulkinserts.md.hash +++ /dev/null @@ -1 +0,0 @@ -43a78e4988da9ee6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md deleted file mode 100644 index 6bebde4f475..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -slug: '/operations/overview' -sidebar_label: 'パフォーマンスと最適化の概要' -description: 'パフォーマンスと最適化の概要ページ' -title: 'パフォーマンスと最適化' ---- - - - - -# パフォーマンスと最適化 - -このセクションでは、ClickHouseのパフォーマンスを向上させるためのヒントとベストプラクティスを紹介します。 -このセクションの前に、ユーザーには[コアコンセプト](/parts)を読むことをお勧めします。 -これにより、パフォーマンスを向上させるために必要な主要な概念がカバーされます。 - -| トピック | 説明 | -|---------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [クエリ最適化ガイド](/optimize/query-optimization) | クエリ最適化を始めるのに良い場所であり、このシンプルなガイドでは、クエリパフォーマンスを向上させるためのさまざまなパフォーマンスと最適化テクニックの使用法についての一般的なシナリオを説明します。 | -| [主インデックスの詳細ガイド](/guides/best-practices/sparse-primary-indexes) | ClickHouseのインデックスに関する詳細な考察を提供し、他のDBシステムとの違いや、ClickHouseがテーブルのスパース主インデックスをどのように構築し使用するか、ClickHouseでインデックスを設定するためのベストプラクティスについて説明します。 | -| [クエリの並列処理](/optimize/query-parallelism) | ClickHouseがプロセッシングレーンとmax_threads設定を使用してクエリの実行を並列化する方法を説明します。レーン全体にデータがどのように分散されるか、max_threadsがどのように適用され、完全には使用されない場合、EXPLAINやトレースログのようなツールを使って実行を検査する方法についても説明します。 | -| [パーティションキー](/optimize/partitioning-key) | ClickHouseのパーティションキーの最適化について詳しく説明します。正しいパーティションキーを選ぶことで、ClickHouseが関連するデータセグメントを迅速に特定できるため、クエリパフォーマンスが大幅に向上することを説明します。効率的なパーティションキーを選ぶためのベストプラクティスと避けるべき潜在的な落とし穴についても解説します。 | -| [データスキッピングインデックス](/optimize/skipping-indexes) | パフォーマンスを最適化するための方法としてのデータスキッピングインデックスについて説明します。 | -| [PREWHERE最適化](/optimize/prewhere) | PREWHEREが不必要なカラムデータの読み取りを避けることでI/Oを削減する方法を説明します。自動的に適用される方法、フィルタリング順序の選択方法、およびEXPLAINやログを使用して監視する方法についても説明します。 | -| [バルクインサート](/optimize/bulk-inserts) | ClickHouseでのバルクインサートの利点について説明します。 | -| [非同期インサート](/optimize/asynchronous-inserts) | ClickHouseの非同期インサート機能に焦点を当てています。非同期インサートがどのように機能するか(サーバー上でデータをバッチ処理して効率的に挿入する)と、その利点(挿入処理をオフロードしてパフォーマンスを向上させる)について説明します。また、非同期インサートを有効にする方法や、ClickHouse環境で効果的に使用するための考慮事項についても触れるかもしれません。 | -| [ミューテーションの回避](/optimize/avoid-mutations) | ClickHouseでミューテーション(更新や削除)を避けることの重要性について説明します。最適なパフォーマンスのために追加のみのインサートを使用することを推奨し、データ変更を処理するための代替アプローチを提案します。 | -| [Nullableカラムの回避](/optimize/avoid-nullable-columns) | なぜNullableカラムを避けることがスペースを節約し、パフォーマンスを向上させるかについて説明します。カラムのデフォルト値を設定する方法を示します。 | -| [OPTIMIZE FINALの回避](/optimize/avoidoptimizefinal) | `OPTIMIZE TABLE ... FINAL`クエリがリソースを多く消費する方法を説明し、ClickHouseのパフォーマンスを最適化するための代替アプローチを提案します。 | -| [アナライザー](/operations/analyzer) | クエリを分析し最適化するためのツールであるClickHouseアナライザーについて見ていきます。アナライザーの動作、利点(例:パフォーマンスのボトルネックの特定)、およびそれを使用してClickHouseクエリの効率を向上させる方法について説明します。 | -| [クエリプロファイリング](/operations/optimizing-performance/sampling-query-profiler)| ClickHouseのサンプリングクエリプロファイラーについて説明し、クエリの実行を分析するのに役立つツールです。 | -| [クエリキャッシュ](/operations/query-cache) | ClickHouseのクエリキャッシュについて詳述し、頻繁に実行される`SELECT`クエリの結果をキャッシュすることでパフォーマンスを向上させることを目的とした機能です。 | -| [ハードウェアのテスト](/operations/performance-test) | ClickHouseパッケージのインストールなしに、任意のサーバーで基本的なClickHouseパフォーマンステストを実行する方法です。(ClickHouse Cloudには適用されません) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md.hash deleted file mode 100644 index 3c74dab3f50..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -6fe52ea489be4c9e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md deleted file mode 100644 index ba57df469b7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -slug: '/optimize/partitioning-key' -sidebar_label: 'パーティションキー' -title: '低基数のパーティションキーを選択する' -description: 'テーブルには低基数のパーティションキーを使用するか、パーティションキーを使用しないようにします。' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/partitioning_keys.mdx'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md.hash deleted file mode 100644 index cbab87140b2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/partitioningkey.md.hash +++ /dev/null @@ -1 +0,0 @@ -84d7119464ae481c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md deleted file mode 100644 index 3deeceb123d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -slug: '/optimize/prewhere' -sidebar_label: 'PREWHERE 最適化' -sidebar_position: 21 -description: 'PREWHERE は、不要なカラムデータの読み取りを回避することにより、I/O を削減します。' -title: 'PREWHERE 最適化はどのように機能しますか?' ---- - -import visual01 from '@site/static/images/guides/best-practices/prewhere_01.gif'; -import visual02 from '@site/static/images/guides/best-practices/prewhere_02.gif'; -import visual03 from '@site/static/images/guides/best-practices/prewhere_03.gif'; -import visual04 from '@site/static/images/guides/best-practices/prewhere_04.gif'; -import visual05 from '@site/static/images/guides/best-practices/prewhere_05.gif'; -import Image from '@theme/IdealImage'; - - -# PREWHERE最適化はどのように機能しますか? - -[PREWHERE句](/sql-reference/statements/select/prewhere)は、ClickHouseにおけるクエリ実行の最適化手法です。これによりI/Oが削減され、不要なデータの読み取りを避け、非フィルタカラムをディスクから読み込む前に無関係なデータがフィルタリングされることで、クエリ速度が向上します。 - -このガイドでは、PREWHEREがどのように機能するのか、その影響を測定する方法、そして最適なパフォーマンスを得るための調整方法について説明します。 - - -## PREWHERE最適化なしのクエリ処理 {#query-processing-without-prewhere-optimization} - -まず、[uk_price_paid_simple](/parts)テーブルに対するクエリがPREWHEREを使用せずに処理される方法を示します: - -PREWHERE最適化なしのクエリ処理 - -

-① クエリには、テーブルの主キーの一部である`town`カラムに対するフィルタが含まれており、したがって主インデックスの一部でもあります。 - -② クエリを加速するために、ClickHouseはテーブルの主インデックスをメモリに読み込みます。 - -③ インデックスエントリをスキャンし、`town`カラムからどのグラニュールが述語に一致する行を含む可能性があるかを特定します。 - -④ これらの潜在的に関連するグラニュールは、クエリに必要な他のカラムからの位置が揃ったグラニュールと共にメモリに読み込まれます。 - -⑤ 残りのフィルタは、クエリ実行中に適用されます。 - -ご覧の通り、PREWHEREがない場合、実際に一致する行が少ない場合でも、すべての潜在的に関連するカラムがフィルタリングされる前に読み込まれます。 - - -## PREWHEREがクエリの効率を向上させる方法 {#how-prewhere-improves-query-efficiency} - -以下のアニメーションは、上記のクエリにPREWHERE句がすべてのクエリ述語に適用された場合の処理方法を示しています。 - -最初の三つの処理ステップは以前と同じです: - -PREWHERE最適化ありのクエリ処理 - -

-① クエリには、テーブルの主キーの一部である`town`カラムに対するフィルタが含まれています。 - -② PREWHERE句がない場合と同様に、クエリを加速するために、ClickHouseは主インデックスをメモリに読み込みます、 - -③ その後、インデックスエントリをスキャンして、`town`カラムからどのグラニュールが述語に一致する行を含む可能性があるかを特定します。 - -ここで、PREWHERE句のおかげで次のステップが異なります:すべての関連カラムを事前に読み込むのではなく、ClickHouseはカラムごとにデータをフィルタリングし、本当に必要なデータのみを読み込みます。これにより、特に幅広いテーブルの場合にI/Oが大幅に削減されます。 - -各ステップでは、前のフィルタを生き残った(つまり、一致した)少なくとも1行が含まれているグラニュールのみが読み込まれます。その結果、各フィルタに対して読み込む必要があるグラニュールの数は一貫して減少します。 - -**ステップ 1: townによるフィルタリング**
-ClickHouseはPREWHERE処理を開始し、① `town`カラムから選択されたグラニュールを読み取り、どれが実際に`London`に一致する行を含むかを確認します。 - -この例では、すべての選択されたグラニュールが一致するため、② 次のフィルタカラムである`date`のために、対応する位置が揃ったグラニュールが選択されます: - -ステップ 1: townによるフィルタリング - -

-**ステップ 2: dateによるフィルタリング**
-次に、ClickHouseは① 選択された`date`カラムのグラニュールを読み取り、フィルタ`date > '2024-12-31'`を評価します。 - -この場合、3つのグラニュールのうち2つに一致する行が含まれているため、② 次のフィルタカラムである`price`のために、それらの位置が揃ったグラニュールのみが選択され、さらに処理が行われます: - -ステップ 2: dateによるフィルタリング - -

-**ステップ 3: priceによるフィルタリング**
-最後に、ClickHouseは① 選択された2つのグラニュールを`price`カラムから読み取り、最後のフィルタ`price > 10_000`を評価します。 - -2つのグラニュールのうち1つのみが一致する行を含んでいるため、② その位置が揃ったグラニュールのみが`SELECT`カラムである`street`のために読み込まれます: - -ステップ 3: priceによるフィルタリング - -

-最終ステップでは、一致する行を含む最小限のカラムグラニュールのセットのみが読み込まれます。これにより、メモリ使用量が低下し、ディスクI/Oが削減され、クエリ実行が速くなります。 - -:::note PREWHEREは読み取るデータを削減し、処理する行は削減しない -ClickHouseはPREWHEREバージョンでも非PREWHEREバージョンでも、同じ数の行を処理します。ただし、PREWHERE最適化が適用されている場合、処理された各行のすべてのカラム値を読み込む必要はありません。 -::: - -## PREWHERE最適化は自動的に適用される {#prewhere-optimization-is-automatically-applied} - -PREWHERE句は手動で追加することができますが、上記の例のようにPREWHEREを手動で書く必要はありません。[`optimize_move_to_prewhere`](/operations/settings/settings#optimize_move_to_prewhere)設定が有効になっている場合(デフォルトでtrue)、ClickHouseは自動的にWHEREからPREWHEREにフィルタ条件を移動し、読み取りボリュームを最も削減できる条件を優先します。 - -小さいカラムはスキャンが速いため、より大きなカラムが処理されるまでに、ほとんどのグラニュールがすでにフィルタリングされているという考え方です。すべてのカラムに同じ数の行があるため、カラムのサイズは主にそのデータ型によって決まります。たとえば、`UInt8`カラムは通常`String`カラムよりもはるかに小さくなります。 - -ClickHouseはバージョン[23.2](https://clickhouse.com/blog/clickhouse-release-23-02#multi-stage-prewhere--alexander-gololobov)からこの戦略をデフォルトで採用しており、PREWHEREフィルタカラムを未圧縮サイズの昇順でマルチステップ処理のためにソートします。 - -バージョン[23.11](https://clickhouse.com/blog/clickhouse-release-23-11#column-statistics-for-prewhere)以降、オプションのカラム統計を使用することで、カラムサイズだけでなく、実際のデータの選択性に基づいてフィルタ処理の順序を選択することができ、さらに改善されます。 - - -## PREWHEREの影響を測定する方法 {#how-to-measure-prewhere-impact} - -PREWHEREがクエリに役立っていることを確認するために、`optimize_move_to_prewhere`設定が有効な場合と無効な場合のクエリ性能を比較することができます。 - -まず、`optimize_move_to_prewhere`設定が無効の状態でクエリを実行します: - -```sql -SELECT - street -FROM - uk.uk_price_paid_simple -WHERE - town = 'LONDON' and date > '2024-12-31' and price < 10_000 -SETTINGS optimize_move_to_prewhere = false; -``` - -```txt - ┌─street──────┐ -1. │ MOYSER ROAD │ -2. │ AVENUE ROAD │ -3. │ AVENUE ROAD │ - └─────────────┘ - -3 行がセットにあります。経過時間: 0.056秒。処理された行数: 2.31百万行、23.36 MB (41.09百万行/秒、415.43 MB/秒。) -ピークメモリ使用量: 132.10 MiB. -``` - -ClickHouseはクエリの処理中に**23.36 MB**のカラムデータを読み込みました。 - -次に、`optimize_move_to_prewhere`設定が有効な状態でクエリを実行します。(この設定はオプションですが、デフォルトでは有効です): -```sql -SELECT - street -FROM - uk.uk_price_paid_simple -WHERE - town = 'LONDON' and date > '2024-12-31' and price < 10_000 -SETTINGS optimize_move_to_prewhere = true; -``` - -```txt - ┌─street──────┐ -1. │ MOYSER ROAD │ -2. │ AVENUE ROAD │ -3. │ AVENUE ROAD │ - └─────────────┘ - -3 行がセットにあります。経過時間: 0.017秒。処理された行数: 2.31百万行、6.74 MB (135.29百万行/秒、394.44 MB/秒。) -ピークメモリ使用量: 132.11 MiB. -``` - -処理された行数は同じ (2.31百万) ですが、PREWHEREのおかげでClickHouseはカラムデータを3倍以上少なく読み込みました—23.36 MBの代わりにわずか6.74 MBであり、全体の実行時間を3分の1に短縮しました。 - -ClickHouseがPREWHEREをどのように適用しているかをより深く理解するために、EXPLAINとトレースログを使用します。 - -[EXPLAIN](/sql-reference/statements/explain#explain-plan)句を使用してクエリの論理プランを調べます: -```sql -EXPLAIN PLAN actions = 1 -SELECT - street -FROM - uk.uk_price_paid_simple -WHERE - town = 'LONDON' and date > '2024-12-31' and price < 10_000; -``` - -```txt -... -Prewhere info - Prewhere filter column: - and(greater(__table1.date, '2024-12-31'_String), - less(__table1.price, 10000_UInt16), - equals(__table1.town, 'LONDON'_String)) -... -``` - -ここではプランの出力の大部分を省略していますが、それはかなり冗長です。要するに、すべての3つのカラム述語が自動的にPREWHEREに移動されたことを示しています。 - -これを自分で再現すると、クエリプランの中でこれらの述語の順序がカラムのデータ型サイズに基づいていることもわかります。カラム統計が有効になっていないため、ClickHouseはサイズをPREWHERE処理の順序を決定するためのフォールバックとして使用しています。 - -さらに深く掘り下げたい場合は、クエリ実行中にすべてのテストレベルのログエントリを返すようにClickHouseに指示することで、各PREWHERE処理ステップを観察できます: -```sql -SELECT - street -FROM - uk.uk_price_paid_simple -WHERE - town = 'LONDON' and date > '2024-12-31' and price < 10_000 -SETTINGS send_logs_level = 'test'; -``` - -```txt -... - ... Condition greater(date, '2024-12-31'_String) moved to PREWHERE - ... Condition less(price, 10000_UInt16) moved to PREWHERE - ... Condition equals(town, 'LONDON'_String) moved to PREWHERE -... - ... Executing prewhere actions on block: greater(__table1.date, '2024-12-31'_String) - ... Executing prewhere actions on block: less(__table1.price, 10000_UInt16) -... -``` - -## 重要なポイント {#key-takeaways} - -* PREWHEREは後でフィルタリングされるカラムデータの読み取りを回避し、I/Oとメモリを節約します。 -* `optimize_move_to_prewhere`が有効な場合(デフォルト)には自動的に機能します。 -* フィルタリングの順序は重要です:小さく選択的なカラムを最初に配置すべきです。 -* `EXPLAIN`やログを使用してPREWHEREが適用されていることを確認し、その効果を理解することができます。 -* PREWHEREは、幅広いテーブルや選択的フィルタによる大規模なスキャンに最も影響を与えます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md.hash deleted file mode 100644 index f19c9394166..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/prewhere.md.hash +++ /dev/null @@ -1 +0,0 @@ -153b016212076ebe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md deleted file mode 100644 index 750cc282d02..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md +++ /dev/null @@ -1,771 +0,0 @@ ---- -slug: '/optimize/query-optimization' -sidebar_label: 'クエリ最適化' -title: 'クエリ最適化ガイド' -description: 'クエリパフォーマンスを向上させるための一般的な方法を説明したシンプルな最適化ガイド' ---- - -import queryOptimizationDiagram1 from '@site/static/images/guides/best-practices/query_optimization_diagram_1.png'; -import Image from '@theme/IdealImage'; - -# クエリ最適化のためのシンプルなガイド - -このセクションでは、[analyzer](/operations/analyzer)、[クエリプロファイリング](/operations/optimizing-performance/sampling-query-profiler)、または[Nullableカラムを避ける](/optimize/avoid-nullable-columns)などの異なるパフォーマンスおよび最適化技術を使用する方法を、一般的なシナリオを通じて説明し、ClickHouseのクエリパフォーマンスを改善します。 -## クエリパフォーマンスの理解 {#understand-query-performance} - -パフォーマンス最適化を考える最適なタイミングは、データをClickHouseに初めて取り込む前に[データスキーマ](/data-modeling/schema-design)をセットアップしているときです。 - -しかし、正直に言うと、データの成長量や実行されるクエリの種類を予測するのは難しいです。 - -既存のデプロイメントがあり、パフォーマンスを向上させたいクエリがいくつかある場合、最初のステップは、それらのクエリがどのように実行されているか、なぜ一部が数ミリ秒で実行され、他のものは時間がかかるのかを理解することです。 - -ClickHouseには、クエリがどのように実行され、実行するためにどのリソースが消費されるかを理解するのに役立つ豊富なツールセットがあります。 - -このセクションでは、それらのツールとその使用方法を見ていきます。 -## 一般的な考慮事項 {#general-considerations} - -クエリパフォーマンスを理解するために、クエリがClickHouseで実行されるときに何が起こるかを見てみましょう。 - -以下の部分は意図的に簡略化されており、いくつかの省略を行っています。ここでのアイデアは、詳細を詰め込みすぎず、基本的なコンセプトを速やかに把握できるようにすることです。詳細については、[クエリアナライザー](/operations/analyzer)について読んでください。 - -非常に高いレベルの視点から、ClickHouseがクエリを実行すると、以下のことが起こります: - - - **クエリの解析と分析** - -クエリは解析され、分析され、一般的なクエリ実行計画が作成されます。 - - - **クエリの最適化** - -クエリ実行計画は最適化され、不必要なデータは剪定され、クエリ計画からクエリパイプラインが構築されます。 - - - **クエリパイプラインの実行** - -データは並行して読み取られ、処理されます。この段階では、ClickHouseがフィルタリング、集計、並べ替えなどのクエリ操作を実行します。 - - - **最終処理** - -結果はマージされ、並べ替えられ、クライアントに送信される前に最終結果にフォーマットされます。 - -実際には、多くの[最適化](/concepts/why-clickhouse-is-so-fast)が行われており、このガイドではそれらについてもう少し詳しく説明しますが、今のところ、これらの主要な概念は、ClickHouseがクエリを実行する際に何が裏で起こっているかを理解するのに役立ちます。 - -この高レベルの理解をもとに、ClickHouseが提供するツールとそれを使用してクエリパフォーマンスに影響を与えるメトリックを追跡する方法を検討してみましょう。 -## データセット {#dataset} - -クエリパフォーマンスにアプローチする方法を示すために、実際の例を使用します。 - -NYCのタクシーのデータセットを使用します。このデータセットには、NYCのタクシーの乗車データが含まれています。最初に、最適化なしでNYCタクシーデータセットを取り込みます。 - -以下は、テーブルを作成し、S3バケットからデータを挿入するためのコマンドです。データからスキーマを自動的に推測することに注意してください。これは最適化されていません。 - -```sql --- 推測されたスキーマでテーブルを作成 -CREATE TABLE trips_small_inferred -ORDER BY () EMPTY -AS SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/clickhouse-academy/nyc_taxi_2009-2010.parquet'); - --- 推測されたスキーマでテーブルにデータを挿入 -INSERT INTO trips_small_inferred -SELECT * -FROM s3Cluster -('default','https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/clickhouse-academy/nyc_taxi_2009-2010.parquet'); -``` - -データから自動的に推測されたテーブルスキーマを見てみましょう。 - -```sql ---- 推測されたテーブルスキーマを表示 -SHOW CREATE TABLE trips_small_inferred - -Query id: d97361fd-c050-478e-b831-369469f0784d - -CREATE TABLE nyc_taxi.trips_small_inferred -( - `vendor_id` Nullable(String), - `pickup_datetime` Nullable(DateTime64(6, 'UTC')), - `dropoff_datetime` Nullable(DateTime64(6, 'UTC')), - `passenger_count` Nullable(Int64), - `trip_distance` Nullable(Float64), - `ratecode_id` Nullable(String), - `pickup_location_id` Nullable(String), - `dropoff_location_id` Nullable(String), - `payment_type` Nullable(Int64), - `fare_amount` Nullable(Float64), - `extra` Nullable(Float64), - `mta_tax` Nullable(Float64), - `tip_amount` Nullable(Float64), - `tolls_amount` Nullable(Float64), - `total_amount` Nullable(Float64) -) -ORDER BY tuple() -``` -## 遅いクエリを見つける {#spot-the-slow-queries} -### クエリログ {#query-logs} - -デフォルトでは、ClickHouseは実行されたクエリに関する情報を[クエリログ](/operations/system-tables/query_log)に収集し、記録します。このデータはテーブル`system.query_log`に保存されます。 - -実行された各クエリについて、ClickHouseはクエリ実行時間、読み取った行数、CPUやメモリ使用量、ファイルシステムキャッシュヒットなどのリソース使用量などの統計を記録します。 - -したがって、クエリログは遅いクエリを調査する際の良い出発点です。実行に時間がかかるクエリを簡単に見つけ、それぞれに対するリソース使用情報を表示できます。 - -NYCタクシーデータセットで、上位5つの長時間実行されるクエリを見つけてみましょう。 - -```sql --- 過去1時間のnyc_taxiデータベースから上位5つの長時間実行されるクエリを見つける -SELECT - type, - event_time, - query_duration_ms, - query, - read_rows, - tables -FROM clusterAllReplicas(default, system.query_log) -WHERE has(databases, 'nyc_taxi') AND (event_time >= (now() - toIntervalMinute(60))) AND type='QueryFinish' -ORDER BY query_duration_ms DESC -LIMIT 5 -FORMAT VERTICAL - -Query id: e3d48c9f-32bb-49a4-8303-080f59ed1835 - -Row 1: -────── -type: QueryFinish -event_time: 2024-11-27 11:12:36 -query_duration_ms: 2967 -query: WITH - dateDiff('s', pickup_datetime, dropoff_datetime) as trip_time, - trip_distance / trip_time * 3600 AS speed_mph -SELECT - quantiles(0.5, 0.75, 0.9, 0.99)(trip_distance) -FROM - nyc_taxi.trips_small_inferred -WHERE - speed_mph > 30 -FORMAT JSON -read_rows: 329044175 -tables: ['nyc_taxi.trips_small_inferred'] - -Row 2: -────── -type: QueryFinish -event_time: 2024-11-27 11:11:33 -query_duration_ms: 2026 -query: SELECT - payment_type, - COUNT() AS trip_count, - formatReadableQuantity(SUM(trip_distance)) AS total_distance, - AVG(total_amount) AS total_amount_avg, - AVG(tip_amount) AS tip_amount_avg -FROM - nyc_taxi.trips_small_inferred -WHERE - pickup_datetime >= '2009-01-01' AND pickup_datetime < '2009-04-01' -GROUP BY - payment_type -ORDER BY - trip_count DESC; - -read_rows: 329044175 -tables: ['nyc_taxi.trips_small_inferred'] - -Row 3: -────── -type: QueryFinish -event_time: 2024-11-27 11:12:17 -query_duration_ms: 1860 -query: SELECT - avg(dateDiff('s', pickup_datetime, dropoff_datetime)) -FROM nyc_taxi.trips_small_inferred -WHERE passenger_count = 1 or passenger_count = 2 -FORMAT JSON -read_rows: 329044175 -tables: ['nyc_taxi.trips_small_inferred'] - -Row 4: -────── -type: QueryFinish -event_time: 2024-11-27 11:12:31 -query_duration_ms: 690 -query: SELECT avg(total_amount) FROM nyc_taxi.trips_small_inferred WHERE trip_distance > 5 -FORMAT JSON -read_rows: 329044175 -tables: ['nyc_taxi.trips_small_inferred'] - -Row 5: -────── -type: QueryFinish -event_time: 2024-11-27 11:12:44 -query_duration_ms: 634 -query: SELECT -vendor_id, -avg(total_amount), -avg(trip_distance), -FROM -nyc_taxi.trips_small_inferred -GROUP BY vendor_id -ORDER BY 1 DESC -FORMAT JSON -read_rows: 329044175 -tables: ['nyc_taxi.trips_small_inferred'] -``` - -`query_duration_ms`フィールドは、その特定のクエリの実行にかかった時間を示します。クエリログの結果を見ると、最初のクエリが2967msの実行時間を要していることが分かります。これは改善可能です。 - -また、メモリやCPUを最も消費しているクエリを調べることで、システムに負荷をかけているクエリも知りたいかもしれません。 - -```sql --- メモリ使用量によるトップクエリ -SELECT - type, - event_time, - query_id, - formatReadableSize(memory_usage) AS memory, - ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UserTimeMicroseconds')] AS userCPU, - ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SystemTimeMicroseconds')] AS systemCPU, - (ProfileEvents['CachedReadBufferReadFromCacheMicroseconds']) / 1000000 AS FromCacheSeconds, - (ProfileEvents['CachedReadBufferReadFromSourceMicroseconds']) / 1000000 AS FromSourceSeconds, - normalized_query_hash -FROM clusterAllReplicas(default, system.query_log) -WHERE has(databases, 'nyc_taxi') AND (type='QueryFinish') AND ((event_time >= (now() - toIntervalDay(2))) AND (event_time <= now())) AND (user NOT ILIKE '%internal%') -ORDER BY memory_usage DESC -LIMIT 30 -``` - -見つかった長時間実行されるクエリを隔離し、応答時間を理解するために数回再実行してみましょう。 - -この時点で、再現性を向上させるために、`enable_filesystem_cache`設定を0に設定してファイルシステムキャッシュをオフにすることが重要です。 - -```sql --- ファイルシステムキャッシュを無効にする -set enable_filesystem_cache = 0; - --- クエリ 1を実行 -WITH - dateDiff('s', pickup_datetime, dropoff_datetime) as trip_time, - trip_distance / trip_time * 3600 AS speed_mph -SELECT - quantiles(0.5, 0.75, 0.9, 0.99)(trip_distance) -FROM - nyc_taxi.trips_small_inferred -WHERE - speed_mph > 30 -FORMAT JSON - ----- -1行の結果。経過時間: 1.699秒。329.04百万行、8.88 GBを処理、(193.72百万行/秒、5.23 GB/秒) -ピークメモリ使用量: 440.24 MiB。 - --- クエリ 2を実行 -SELECT - payment_type, - COUNT() AS trip_count, - formatReadableQuantity(SUM(trip_distance)) AS total_distance, - AVG(total_amount) AS total_amount_avg, - AVG(tip_amount) AS tip_amount_avg -FROM - nyc_taxi.trips_small_inferred -WHERE - pickup_datetime >= '2009-01-01' AND pickup_datetime < '2009-04-01' -GROUP BY - payment_type -ORDER BY - trip_count DESC; - ---- -4行の結果。経過時間: 1.419秒。329.04百万行、5.72 GBを処理、(231.86百万行/秒、4.03 GB/秒) -ピークメモリ使用量: 546.75 MiB。 - --- クエリ 3を実行 -SELECT - avg(dateDiff('s', pickup_datetime, dropoff_datetime)) -FROM nyc_taxi.trips_small_inferred -WHERE passenger_count = 1 or passenger_count = 2 -FORMAT JSON - ---- -1行の結果。経過時間: 1.414秒。329.04百万行、8.88 GBを処理、(232.63百万行/秒、6.28 GB/秒) -ピークメモリ使用量: 451.53 MiB。 -``` - -分かりやすくテーブルにまとめましょう。 - -| 名前 | 経過時間 | 処理された行数 | ピークメモリ | -| -------- | ---------- | -------------- | ------------ | -| クエリ1 | 1.699 秒 | 329.04百万行 | 440.24 MiB | -| クエリ2 | 1.419 秒 | 329.04百万行 | 546.75 MiB | -| クエリ3 | 1.414 秒 | 329.04百万行 | 451.53 MiB | - -それぞれのクエリの達成する目的をもう少し理解しましょう。 - -- クエリ1は、平均速度が30マイルを超える乗車の距離分布を計算します。 -- クエリ2は、週ごとの乗車数と平均コストを見つけます。 -- クエリ3は、データセット内の各乗車の平均時間を計算します。 - -これらのクエリのいずれも非常に複雑な処理を行っているわけではなく、特に最初のクエリは、クエリが実行されるたびにトリップタイムをその場で計算しています。しかし、これらのクエリはいずれも実行に1秒以上かかっており、ClickHouseの世界では非常に長い時間です。また、これらのクエリのメモリ使用量には、各クエリで約400MBが消費されています。また、各クエリは同じ数の行(329.04百万行)を読み込んでいるようです。このテーブルに何行あるかをすぐに確認してみましょう。 - -```sql --- テーブル内の行数を数える -SELECT count() -FROM nyc_taxi.trips_small_inferred - -Query id: 733372c5-deaf-4719-94e3-261540933b23 - - ┌───count()─┐ -1. │ 329044175 │ -- 329.04百万行 - └───────────┘ -``` - -テーブルには329.04百万行が含まれているため、各クエリはテーブルのフルスキャンを行っています。 -### EXPLAIN文 {#explain-statement} - -長時間実行されるクエリをいくつか持ったので、これらがどのように実行されているのかを理解しましょう。そのために、ClickHouseは[EXPLAIN文コマンド](/sql-reference/statements/explain)をサポートしています。これは、実際にクエリを実行せずに、すべてのクエリ実行段階の詳細なビューを提供する非常に便利なツールです。ClickHouseのエキスパートでない場合には圧倒されるかもしれませんが、クエリがどのように実行されるかを理解するための重要なツールです。 - -文書では、EXPLAIN文が何であるか、そしてクエリ実行を分析するためにどのように使用するかに関する詳細な[ガイド](/guides/developer/understanding-query-execution-with-the-analyzer)を提供しています。このガイドの内容を繰り返すのではなく、クエリ実行パフォーマンスのボトルネックを見つけるのに役立ついくつかのコマンドに焦点を当ててみましょう。 - -**EXPLAIN indexes = 1** - -まず、EXPLAIN indexes = 1を使用してクエリプランを検査します。クエリプランは、クエリがどのように実行されるかを示すツリーです。ここには、クエリの句がどの順序で実行されるかが表示されます。EXPLAIN文によって返されたクエリプランは、下から上に読み取ることができます。 - -最初の長時間実行されるクエリを使ってみましょう。 - -```sql -EXPLAIN indexes = 1 -WITH - dateDiff('s', pickup_datetime, dropoff_datetime) AS trip_time, - (trip_distance / trip_time) * 3600 AS speed_mph -SELECT quantiles(0.5, 0.75, 0.9, 0.99)(trip_distance) -FROM nyc_taxi.trips_small_inferred -WHERE speed_mph > 30 - -Query id: f35c412a-edda-4089-914b-fa1622d69868 - - ┌─explain─────────────────────────────────────────────┐ -1. │ Expression ((Projection + Before ORDER BY)) │ -2. │ Aggregating │ -3. │ Expression (Before GROUP BY) │ -4. │ Filter (WHERE) │ -5. │ ReadFromMergeTree (nyc_taxi.trips_small_inferred) │ - └─────────────────────────────────────────────────────┘ -``` - -出力はわかりやすいです。クエリは`nyc_taxi.trips_small_inferred`テーブルからデータを読み取ることから始まります。次に、WHERE句が適用されて、計算された値に基づいて行がフィルタリングされます。フィルタリングされたデータは集約のために準備され、分位数が計算されます。最終的に、結果は並べ替えられ、出力されます。 - -ここでは、プライマリーキーが使用されていないことに注目できます。これは、テーブルを作成した際にプライマリーキーを定義しなかったためです。その結果、ClickHouseはクエリのためにテーブル全体をスキャンしています。 - -**EXPLAIN PIPELINE** - -EXPLAIN PIPELINEは、クエリの具体的な実行戦略を示します。ここでは、以前に見た一般的なクエリプランがClickHouseによって実際にどのように実行されたかを見ることができます。 - -```sql -EXPLAIN PIPELINE -WITH - dateDiff('s', pickup_datetime, dropoff_datetime) AS trip_time, - (trip_distance / trip_time) * 3600 AS speed_mph -SELECT quantiles(0.5, 0.75, 0.9, 0.99)(trip_distance) -FROM nyc_taxi.trips_small_inferred -WHERE speed_mph > 30 - -Query id: c7e11e7b-d970-4e35-936c-ecfc24e3b879 - - ┌─explain─────────────────────────────────────────────────────────────────────────────┐ - 1. │ (Expression) │ - 2. │ ExpressionTransform × 59 │ - 3. │ (Aggregating) │ - 4. │ Resize 59 → 59 │ - 5. │ AggregatingTransform × 59 │ - 6. │ StrictResize 59 → 59 │ - 7. │ (Expression) │ - 8. │ ExpressionTransform × 59 │ - 9. │ (Filter) │ -10. │ FilterTransform × 59 │ -11. │ (ReadFromMergeTree) │ -12. │ MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread) × 59 0 → 1 │ -``` - -ここでは、クエリを実行するために使用されるスレッドの数に注目できます: 59スレッド。これは高い並列化を示しており、クエリを実行するのに、より小さなマシンでは時間がかかるでしょう。並行して実行されるスレッドの数が多いことは、このクエリが使用するメモリの多さを説明するかもしれません。 - -理想的には、すべての遅いクエリを同じように調査し、不必要に複雑なクエリプランを特定し、各クエリによって読み取られる行の数と消費されるリソースを理解する必要があります。 -## 方法論 {#methodology} - -本番デプロイメント上で問題のあるクエリを特定することは困難です。なぜなら、その時点でClickHouseデプロイメント上で実行されているクエリの数が多いためです。 - -どのユーザー、データベース、またはテーブルに問題があるかを知っていれば、`system.query_logs`の`user`、`tables`、または`databases`フィールドを使用して検索を絞り込むことができます。 - -最適化したいクエリを特定したら、それに対して最適化作業を開始できます。この段階で開発者がよく犯す一般的な間違いは、同時に複数のことを変更し、アドホックな実験を行うことです。通常、混合結果に終わり、より重要なこととしてクエリがなぜ速くなったのかの良い理解を欠いてしまいます。 - -クエリ最適化には構造が必要です。高度なベンチマークのことを言っているのではなく、変更がクエリパフォーマンスにどのように影響するかを理解するための単純なプロセスを持つことが重要です。 - -まず、クエリログから遅いクエリを特定し、その後、孤立した状態で改善の可能性を調査します。クエリをテストするときは、ファイルシステムキャッシュを無効にすることを忘れないでください。 - -> ClickHouseは、[キャッシング](/operations/caches)を活用して、クエリパフォーマンスをさまざまな段階で向上させます。これはクエリのパフォーマンスには良いですが、トラブルシューティング中には、潜在的なI/Oボトルネックや不良なテーブルスキーマを隠蔽する可能性があります。そのため、テスト中はファイルシステムキャッシュをオフにすることをお勧めします。プロダクション環境では有効にしてください。 - -潜在的な最適化を特定したら、それを一つずつ実装して、パフォーマンスに与える影響をより良く追跡することをお勧めします。以下は、一般的なアプローチを説明するダイアグラムです。 - -Optimization workflow - -_最後に、外れ値に注意してください; ユーザーがアドホックな高コストのクエリを試したり、システムが別の理由でストレスを受けている場合、クエリが遅くなることは非常に一般的です。フィールドnormalized_query_hashでグループ化して、定期的に実行されている高コストのクエリを特定できます。それらはおそらく、調査したいものです。_ -## 基本的な最適化 {#basic-optimization} - -フレームワークをテストする準備ができたので、最適化を始めましょう。 - -最適化の最初のステップは、データがどのように保存されているかを確認することです。どのデータベースでも同じですが、読み取るデータが少ないほど、クエリが早く実行されます。 - -データをどのように取り込んだかによって、ClickHouseの[機能](/interfaces/schema-inference)を利用して、取り込まれたデータに基づいてテーブルスキーマを推測しているかもしれません。これは始めるには非常に便利ですが、クエリパフォーマンスを最適化したい場合は、データスキーマを再評価して、ユースケースに最適になるよう調整する必要があります。 -### Nullable {#nullable} - -[ベストプラクティス文書](/best-practices/select-data-types#avoid-nullable-columns)で説明されているように、可能な限りNullableカラムは避けるべきです。これらはしばしば使いたくなりますが、データ取り込みメカニズムをより柔軟にする反面、追加のカラムが毎回処理されるため、パフォーマンスに悪影響を与えます。 - -NULL値を持つ行を数えるSQLクエリを実行すれば、実際にNullable値が必要なカラムを簡単に明らかにすることができます。 - -```sql --- NULLでない値のカラムを見つける -SELECT - countIf(vendor_id IS NULL) AS vendor_id_nulls, - countIf(pickup_datetime IS NULL) AS pickup_datetime_nulls, - countIf(dropoff_datetime IS NULL) AS dropoff_datetime_nulls, - countIf(passenger_count IS NULL) AS passenger_count_nulls, - countIf(trip_distance IS NULL) AS trip_distance_nulls, - countIf(fare_amount IS NULL) AS fare_amount_nulls, - countIf(mta_tax IS NULL) AS mta_tax_nulls, - countIf(tip_amount IS NULL) AS tip_amount_nulls, - countIf(tolls_amount IS NULL) AS tolls_amount_nulls, - countIf(total_amount IS NULL) AS total_amount_nulls, - countIf(payment_type IS NULL) AS payment_type_nulls, - countIf(pickup_location_id IS NULL) AS pickup_location_id_nulls, - countIf(dropoff_location_id IS NULL) AS dropoff_location_id_nulls -FROM trips_small_inferred -FORMAT VERTICAL - -Query id: 4a70fc5b-2501-41c8-813c-45ce241d85ae - -Row 1: -────── -vendor_id_nulls: 0 -pickup_datetime_nulls: 0 -dropoff_datetime_nulls: 0 -passenger_count_nulls: 0 -trip_distance_nulls: 0 -fare_amount_nulls: 0 -mta_tax_nulls: 137946731 -tip_amount_nulls: 0 -tolls_amount_nulls: 0 -total_amount_nulls: 0 -payment_type_nulls: 69305 -pickup_location_id_nulls: 0 -dropoff_location_id_nulls: 0 -``` - -NULL値を持つカラムは`mta_tax`と`payment_type`の2つだけです。残りのフィールドは`Nullable`カラムを使用すべきではありません。 -### 低いカーディナリティ {#low-cardinality} - -文字列に対する簡単な最適化は、LowCardinalityデータ型を最大限に活用することです。LowCardinalityに関する[文書](/sql-reference/data-types/lowcardinality)で説明されているように、ClickHouseはLowCardinalityカラムに辞書コーディングを適用し、クエリパフォーマンスを大幅に向上させます。 - -LowCardinalityに適したカラムを判断する簡単なルールは、ユニークな値が10,000未満のカラムは理想的な候補です。 - -以下のSQLクエリを使用して、ユニークな値が少ないカラムを見つけることができます。 - -```sql --- 低カーディナリティカラムを特定 -SELECT - uniq(ratecode_id), - uniq(pickup_location_id), - uniq(dropoff_location_id), - uniq(vendor_id) -FROM trips_small_inferred -FORMAT VERTICAL - -Query id: d502c6a1-c9bc-4415-9d86-5de74dd6d932 - -Row 1: -────── -uniq(ratecode_id): 6 -uniq(pickup_location_id): 260 -uniq(dropoff_location_id): 260 -uniq(vendor_id): 3 -``` - -低いカーディナリティを持つこれらの4つのカラム、`ratecode_id`、`pickup_location_id`、`dropoff_location_id`、および`vendor_id`は、LowCardinalityフィールドタイプの良い候補です。 -### データ型の最適化 {#optimize-data-type} - -ClickHouseは、多くのデータ型をサポートしています。ユースケースに適合する、できるだけ小さなデータ型を選択してパフォーマンスを最適化し、ディスク上のデータストレージスペースを削減してください。 - -数値の場合は、データセット内の最小/最大値を確認して、現在の精度がデータセットの実際の値に合っているかを確認することができます。 - -```sql --- payment_typeフィールドの最小/最大値を見つける -SELECT - min(payment_type),max(payment_type), - min(passenger_count), max(passenger_count) -FROM trips_small_inferred - -Query id: 4306a8e1-2a9c-4b06-97b4-4d902d2233eb - - ┌─min(payment_type)─┬─max(payment_type)─┐ -1. │ 1 │ 4 │ - └───────────────────┴───────────────────┘ -``` - -日付の場合は、データセットにマッチする精度を選択し、実行予定のクエリに最適なものを選びましょう。 -### 最適化を適用 {#apply-the-optimizations} - -最適化されたスキーマを使用するために新しいテーブルを作成し、データを再取り込みましょう。 - -```sql --- 最適化されたデータでテーブルを作成 -CREATE TABLE trips_small_no_pk -( - `vendor_id` LowCardinality(String), - `pickup_datetime` DateTime, - `dropoff_datetime` DateTime, - `passenger_count` UInt8, - `trip_distance` Float32, - `ratecode_id` LowCardinality(String), - `pickup_location_id` LowCardinality(String), - `dropoff_location_id` LowCardinality(String), - `payment_type` Nullable(UInt8), - `fare_amount` Decimal32(2), - `extra` Decimal32(2), - `mta_tax` Nullable(Decimal32(2)), - `tip_amount` Decimal32(2), - `tolls_amount` Decimal32(2), - `total_amount` Decimal32(2) -) -ORDER BY tuple(); - --- データを挿入 -INSERT INTO trips_small_no_pk SELECT * FROM trips_small_inferred -``` - -新しいテーブルを使用してクエリを再実行して、改善されたかどうかを確認します。 - -| 名前 | 初回実行 - 経過時間 | 経過時間 | 処理された行数 | ピークメモリ | -| -------- | ------------------- | ---------- | -------------- | ------------ | -| クエリ1 | 1.699 秒 | 1.353 秒 | 329.04百万行 | 337.12 MiB | -| クエリ2 | 1.419 秒 | 1.171 秒 | 329.04百万行 | 531.09 MiB | -| クエリ3 | 1.414 秒 | 1.188 秒 | 329.04百万行 | 265.05 MiB | - -クエリ処理時間とメモリ使用量の改善が見られます。データスキーマの最適化により、データの全体量が減少し、メモリ消費が改善され、処理時間が短縮されました。 - -テーブルのサイズを確認してみましょう。違いがあるか見てみます。 - -```sql -SELECT - `table`, - formatReadableSize(sum(data_compressed_bytes) AS size) AS compressed, - formatReadableSize(sum(data_uncompressed_bytes) AS usize) AS uncompressed, - sum(rows) AS rows -FROM system.parts -WHERE (active = 1) AND ((`table` = 'trips_small_no_pk') OR (`table` = 'trips_small_inferred')) -GROUP BY - database, - `table` -ORDER BY size DESC - -Query id: 72b5eb1c-ff33-4fdb-9d29-dd076ac6f532 - - ┌─table────────────────┬─compressed─┬─uncompressed─┬──────rows─┐ -1. │ trips_small_inferred │ 7.38 GiB │ 37.41 GiB │ 329044175 │ -2. │ trips_small_no_pk │ 4.89 GiB │ 15.31 GiB │ 329044175 │ - └──────────────────────┴────────────┴──────────────┴───────────┘ -``` - -新しいテーブルは以前のものよりかなり小さくなっています。テーブルのディスクスペースが約34%削減(7.38 GiB対4.89 GiB)されていることが分かります。 -## プライマリーキーの重要性 {#the-importance-of-primary-keys} - -ClickHouseにおけるプライマリーキーは、ほとんどの従来のデータベースシステムとは異なる動作をします。これらのシステムでは、プライマリーキーは一意性とデータの整合性を強制します。重複するプライマリーキー値を挿入しようとすれば、拒否され、通常は高速検索のためにBツリーまたはハッシュベースのインデックスが作成されます。 - -ClickHouseでは、プライマリーキーの[目的](/guides/best-practices/sparse-primary-indexes#a-table-with-a-primary-key)が異なり、一意性を強制したり、データの整合性を助けるものではありません。代わりに、クエリパフォーマンスを最適化することを目的としています。プライマリーキーは、ディスク上のデータが保存される順序を定義し、各グラニュールの最初の行へのポインタを保存するスパースインデックスとして実装されます。 - -> ClickHouseのグラニュールは、クエリ実行中に読み取られる最小のデータ単位です。これらは最大で固定数の行を含み、index_granularityによって決定され、デフォルト値は8192行です。グラニュールは連続的に保存され、プライマリキーによってソートされます。 - -良いプライマリーキーのセットを選択することはパフォーマンスに重要であり、特定のクエリセットを加速するために、異なるテーブルに同じデータを保存し、異なるプライマリーキーを使用することは一般的です。 - -他にも、ClickHouseがサポートするオプション、プロジェクションやマテリアライズドビューなどは、同じデータに異なるプライマリーキーのセットを使用することを可能にします。このブログシリーズの後半では、これをさらに詳しく説明します。 -``` -### Choose primary keys {#choose-primary-keys} - -正しい主キーのセットを選択することは複雑なテーマであり、最適な組み合わせを見つけるためにはトレードオフや実験が必要になることがあります。  - -今のところ、以下のシンプルなプラクティスに従うことにします:  - -- ほとんどのクエリでフィルタリングに使用されるフィールドを使用する -- まず低いカーディナリティのカラムを選択する  -- 主キーに時間ベースのコンポーネントを考慮する。タイムスタンプデータセットの時間によるフィルタリングは非常に一般的です。  - -私たちの場合、以下の主キーで実験を行います: `passenger_count`, `pickup_datetime`, `dropoff_datetime`。  - -`passenger_count`のカーディナリティは少なく(24のユニークな値)、遅いクエリで使用されます。また、フィルタリングされることが多いタイムスタンプフィールド(`pickup_datetime`および`dropoff_datetime`)を追加します。 - -主キーを持つ新しいテーブルを作成し、データを再インジェストします。 - -```sql -CREATE TABLE trips_small_pk -( - `vendor_id` UInt8, - `pickup_datetime` DateTime, - `dropoff_datetime` DateTime, - `passenger_count` UInt8, - `trip_distance` Float32, - `ratecode_id` LowCardinality(String), - `pickup_location_id` UInt16, - `dropoff_location_id` UInt16, - `payment_type` Nullable(UInt8), - `fare_amount` Decimal32(2), - `extra` Decimal32(2), - `mta_tax` Nullable(Decimal32(2)), - `tip_amount` Decimal32(2), - `tolls_amount` Decimal32(2), - `total_amount` Decimal32(2) -) -PRIMARY KEY (passenger_count, pickup_datetime, dropoff_datetime); - --- データを挿入 -INSERT INTO trips_small_pk SELECT * FROM trips_small_inferred -``` - -その後、クエリを再実行します。3つの実験からの結果をまとめて、経過時間、処理された行数、およびメモリ使用量の改善を確認します。  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クエリ 1
実行 1実行 2実行 3
経過時間1.699 sec1.353 sec0.765 sec
処理された行数329.04 million329.04 million329.04 million
ピークメモリ440.24 MiB337.12 MiB444.19 MiB
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クエリ 2
実行 1実行 2実行 3
経過時間1.419 sec1.171 sec0.248 sec
処理された行数329.04 million329.04 million41.46 million
ピークメモリ546.75 MiB531.09 MiB173.50 MiB
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
クエリ 3
実行 1実行 2実行 3
経過時間1.414 sec1.188 sec0.431 sec
処理された行数329.04 million329.04 million276.99 million
ピークメモリ451.53 MiB265.05 MiB197.38 MiB
- -実行時間と使用メモリの両方で大きな改善が見られます。  - -クエリ 2 は主キーの恩恵を最も受けています。クエリプランが以前とどう異なるか見てみましょう。 - -```sql -EXPLAIN indexes = 1 -SELECT - payment_type, - COUNT() AS trip_count, - formatReadableQuantity(SUM(trip_distance)) AS total_distance, - AVG(total_amount) AS total_amount_avg, - AVG(tip_amount) AS tip_amount_avg -FROM nyc_taxi.trips_small_pk -WHERE (pickup_datetime >= '2009-01-01') AND (pickup_datetime < '2009-04-01') -GROUP BY payment_type -ORDER BY trip_count DESC - -Query id: 30116a77-ba86-4e9f-a9a2-a01670ad2e15 - - ┌─explain──────────────────────────────────────────────────────────────────────────────────────────────────────────┐ - 1. │ Expression ((Projection + Before ORDER BY [lifted up part])) │ - 2. │ Sorting (Sorting for ORDER BY) │ - 3. │ Expression (Before ORDER BY) │ - 4. │ Aggregating │ - 5. │ Expression (Before GROUP BY) │ - 6. │ Expression │ - 7. │ ReadFromMergeTree (nyc_taxi.trips_small_pk) │ - 8. │ Indexes: │ - 9. │ PrimaryKey │ -10. │ Keys: │ -11. │ pickup_datetime │ -12. │ Condition: and((pickup_datetime in (-Inf, 1238543999]), (pickup_datetime in [1230768000, +Inf))) │ -13. │ Parts: 9/9 │ -14. │ Granules: 5061/40167 │ - └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -主キーのおかげで、テーブルのグラニュールのサブセットのみが選択されています。これにより、ClickHouseが処理しなければならないデータ量が著しく減少し、クエリ性能が大幅に向上します。 -## Next steps {#next-steps} - -このガイドが、ClickHouseを使用して遅いクエリを調査し、それらをより高速にする方法についての良い理解を得る助けになることを願っています。このトピックについてさらに探求するには、[クエリアナライザー](/operations/analyzer)や[プロファイリング](/operations/optimizing-performance/sampling-query-profiler)について読み、ClickHouseがいかにしてクエリを実行しているかをより深く理解してください。 - -ClickHouse特有の機能に慣れてきたら、[パーティショニングキー](/optimize/partitioning-key)や[データスキッピングインデックス](/optimize/skipping-indexes)についても読んで、クエリを加速するために使用できるより高度なテクニックについて学ぶことをお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md.hash deleted file mode 100644 index 7601a72fd38..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-optimization.md.hash +++ /dev/null @@ -1 +0,0 @@ -aeb560bcaf5fa134 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md deleted file mode 100644 index f7f1c1d6370..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -slug: '/optimize/query-parallelism' -sidebar_label: 'Query Parallelism' -sidebar_position: 20 -description: 'ClickHouseクエリ実行の並列化には、処理レーンとmax_threads設定が使用されます。' -title: 'How ClickHouse executes a query in parallel' ---- - -import visual01 from '@site/static/images/guides/best-practices/query-parallelism_01.gif'; -import visual02 from '@site/static/images/guides/best-practices/query-parallelism_02.gif'; -import visual03 from '@site/static/images/guides/best-practices/query-parallelism_03.gif'; -import visual04 from '@site/static/images/guides/best-practices/query-parallelism_04.gif'; -import visual05 from '@site/static/images/guides/best-practices/query-parallelism_05.png'; -import Image from '@theme/IdealImage'; - - - -# ClickHouseがクエリを並行して実行する方法 - -ClickHouseは[スピードのために構築されています](/concepts/why-clickhouse-is-so-fast)。それは、利用可能なすべてのCPUコアを使用し、処理レーンにデータを分散させ、しばしばハードウェアをその限界に近づけて、クエリを高度に並行して実行します。 - -このガイドでは、ClickHouseのクエリ並行性がどのように機能し、大規模なワークロードでのパフォーマンスを向上させるためにそれを調整または監視する方法について説明します。 - -主要な概念を説明するために、[uk_price_paid_simple](/parts)データセットに対する集約クエリを使用します。 - -## 手順: ClickHouseが集約クエリを並行化する方法 {#step-by-step-how-clickHouse-parallelizes-an-aggregation-query} - -ClickHouseが ① プライマリキーにフィルタをかけた集約クエリを実行すると、② プライマリインデックスがメモリに読み込まれ、③ どのグラニュールを処理する必要があるか、どれを安全にスキップできるかを特定します: - -インデックス分析 - -### 処理レーンにまたがる作業の分散 {#distributing-work-across-processing-lanes} - -選択されたデータは、`n`並行[処理レーン](/academic_overview#4-2-multi-core-parallelization)に[動的に](#load-balancing-across-processing-lanes)分散され、データは[ブロック](/development/architecture#block)ごとにストリームされ、処理され、最終結果にまとめられます: - -4つの並行処理レーン - -

-`n`の並行処理レーンの数は、[max_threads](/operations/settings/settings#max_threads)設定によって制御され、デフォルトではサーバー上でClickHouseが利用できるCPUコアの数に一致します。上記の例では、`4`コアを仮定しています。 - -`8`コアのマシンでは、クエリ処理のスループットは概ね2倍になります(ただし、メモリ使用量もそれに応じて増加します)。より多くのレーンが並行してデータを処理するためです: - -8つの並行処理レーン - -

-効率的なレーン分配は、CPUの利用率を最大化し、総クエリ時間を短縮するための鍵です。 - -### シャードテーブル上のクエリ処理 {#processing-queries-on-sharded-tables} - -テーブルデータが複数のサーバーに[シャード](/shards)として分散されている場合、各サーバーはそのシャードを並行して処理します。各サーバー内では、ローカルデータが上記で説明したように並行処理レーンを使用して処理されます: - -分散レーン - -

-最初にクエリを受信したサーバーは、シャードからすべてのサブ結果を集約し、最終的なグローバル結果に統合します。 - -シャード間でクエリ負荷を分散させることで、特に高スループット環境において並行性の水平スケーリングを可能にします。 - -:::note ClickHouse Cloudはシャードの代わりに並行レプリカを使用します -ClickHouse Cloudでは、同じ並行性が[並行レプリカ](https://clickhouse.com/docs/deployment-guides/parallel-replicas)を通じて実現されており、これはシャードが共有なしのクラスターで機能するのと類似しています。各ClickHouse Cloudレプリカは、ステートレスなコンピュートノードであり、並行してデータの一部を処理し、独立したシャードのように最終結果に貢献します。 -::: - -## クエリ並行性の監視 {#monitoring-query-parallelism} - -これらのツールを使用して、クエリが利用可能なCPUリソースを完全に活用しているかどうかを確認し、そうでない場合に診断します。 - -私たちは59のCPUコアを持つテストサーバーでこれを実行しており、ClickHouseはそのクエリ並行性を完全に示すことができます。 - -例のクエリがどのように実行されるかを観察するために、ClickHouseサーバーに集約クエリ中にすべてのトレースレベルのログエントリを返すように指示できます。このデモのために、クエリの述語を削除しました—そうでなければ、3つのグラニュールしか処理されず、ClickHouseが複数の並行処理レーンを利用するには不十分なデータとなります: -```sql runnable=false -SELECT - max(price) -FROM - uk.uk_price_paid_simple -SETTINGS send_logs_level='trace'; -``` - -```txt -① ...: 3609マークを3つのレンジから読み取ります -② ...: ストリーム間でマーク範囲を分散 -② ...: 約29,564,928行を59のストリームで読み取る -``` - -私たちは次のことがわかります - - - -* ① ClickHouseは3,609グラニュール(トレースログにマークとして表示される)を3つのデータ範囲から読み取る必要があります。 -* ② 59のCPUコアを使用して、これは59の並行処理ストリームに分配されます—レーンごとに1つです。 - -また、[EXPLAIN](/sql-reference/statements/explain#explain-pipeline)句を使用して集約クエリの[物理演算子プラン](/academic_overview#4-2-multi-core-parallelization)—通称「クエリパイプライン」を検査できます: -```sql runnable=false -EXPLAIN PIPELINE -SELECT - max(price) -FROM - uk.uk_price_paid_simple; -``` - -```txt - ┌─explain───────────────────────────────────────────────────────────────────────────┐ - 1. │ (式) │ - 2. │ ExpressionTransform × 59 │ - 3. │ (集約) │ - 4. │ Resize 59 → 59 │ - 5. │ AggregatingTransform × 59 │ - 6. │ StrictResize 59 → 59 │ - 7. │ (式) │ - 8. │ ExpressionTransform × 59 │ - 9. │ (ReadFromMergeTree) │ -10. │ MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread) × 59 0 → 1 │ - └───────────────────────────────────────────────────────────────────────────────────┘ -``` - -注意: 上記の演算子プランは、下から上へ読み取ってください。各行は、ストレージからデータを読み取るのを開始点とし、最終的な処理ステップで終了します。`× 59`でマークされた演算子は、59の並行処理レーンにわたって重複のないデータ領域で同時に実行されます。これは`max_threads`の値を反映し、クエリの各ステージがCPUコアにわたってどのように並行化されているかを示しています。 - -ClickHouseの[埋め込まれたWeb UI](/interfaces/http)(/playエンドポイントで利用可能)は、上記の物理プランをグラフィカルな視覚化としてレンダリングできます。この例では、視覚化をコンパクトに保つため、`max_threads`を`4`に設定し、4つの並行処理レーンのみを表示します: - -クエリパイプライン - -注意: 視覚化を左から右に読み取ってください。各行は、データをブロックごとにストリーミングし、フィルタリング、集約、最終処理ステップなどの変換を適用する並行処理レーンを表しています。この例では、`max_threads = 4`設定に対応する4つの並行レーンを確認できます。 - -### 処理レーン間の負荷分散 {#load-balancing-across-processing-lanes} - -上記の物理プランの`Resize`演算子は、処理レーン間でデータブロックストリームを[再分割し再配布](/academic_overview#4-2-multi-core-parallelization)して均等に活用されるようにします。この再バランス処理は、データ範囲がクエリ述語に一致する行数で異なる場合には特に重要です。さもなければ、一部のレーンが過負荷になり、他のレーンがアイドル状態になるかもしれません。作業を再分配することで、より早いレーンが遅いものを効果的に助け、全体のクエリ実行時間を最適化します。 - -## なぜmax_threadsは常に尊重されないのか {#why-max-threads-isnt-always-respected} - -上記のように、`n`の並行処理レーンの数は、デフォルトでサーバー上でClickHouseが利用できるCPUコア数に一致する`max_threads`設定によって制御されます: -```sql runnable=false -SELECT getSetting('max_threads'); -``` - -```txt - ┌─getSetting('max_threads')─┐ -1. │ 59 │ - └───────────────────────────┘ -``` - -ただし、処理のために選択したデータ量に応じて`max_threads`値が無視される場合があります: -```sql runnable=false -EXPLAIN PIPELINE -SELECT - max(price) -FROM - uk.uk_price_paid_simple -WHERE town = 'LONDON'; -``` - -```txt -... -(ReadFromMergeTree) -MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread) × 30 -``` - -上記の演算子プランの抜粋に示されているように、`max_threads`が`59`に設定されているにもかかわらず、ClickHouseはデータのスキャンに**30**の同時ストリームしか使用していません。 - -それではクエリを実行してみましょう: -```sql runnable=false -SELECT - max(price) -FROM - uk.uk_price_paid_simple -WHERE town = 'LONDON'; -``` - -```txt - ┌─max(price)─┐ -1. │ 594300000 │ -- 594.30百万円 - └────────────┘ - -1行がセットにあります。経過時間: 0.013秒。処理された行: 2.31百万行、13.66 MB (173.12百万行/秒、1.02 GB/秒)。 -ピークメモリ使用量: 27.24 MiB。 -``` - -出力で示されているように、クエリは2.31百万行を処理し、13.66MBのデータを読み取りました。これは、インデックス分析フェーズ中にClickHouseが**282グラニュール**を処理のために選択したためです。各グラニュールには8,192行が含まれ、合計で約2.31百万行となります: - -```sql runnable=false -EXPLAIN indexes = 1 -SELECT - max(price) -FROM - uk.uk_price_paid_simple -WHERE town = 'LONDON'; -``` - -```txt - ┌─explain───────────────────────────────────────────────┐ - 1. │ Expression ((Project names + Projection)) │ - 2. │ Aggregating │ - 3. │ Expression (GROUP BYの前) │ - 4. │ 式 │ - 5. │ ReadFromMergeTree (uk.uk_price_paid_simple) │ - 6. │ インデックス: │ - 7. │ 主キー │ - 8. │ キー: │ - 9. │ town │ -10. │ 条件: (town in ['LONDON', 'LONDON']) │ -11. │ パーツ: 3/3 │ -12. │ グラニュール: 282/3609 │ - └───────────────────────────────────────────────────────┘ -``` - -設定された`max_threads`値にかかわらず、ClickHouseは十分なデータがない場合追加の並行処理レーンを割り当てません。`max_threads`の「max」は上限を示すものであり、使用されるスレッド数が保証されるわけではありません。 - -「十分なデータ」とは何かは、主にそれぞれの処理レーンが処理すべき行数の最小限(デフォルトは163,840)と最小バイト数(デフォルトは2,097,152)で決定されます: - -共有なしのクラスター用: -* [merge_tree_min_rows_for_concurrent_read](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_rows_for_concurrent_read) -* [merge_tree_min_bytes_for_concurrent_read](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_bytes_for_concurrent_read) - -共有ストレージがあるクラスター用(例:ClickHouse Cloud): -* [merge_tree_min_rows_for_concurrent_read_for_remote_filesystem](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_rows_for_concurrent_read_for_remote_filesystem) -* [merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem) - -さらに、読み取りタスクサイズには厳しい下限があり、以下で制御されています: -* [Merge_tree_min_read_task_size](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_read_task_size) + [merge_tree_min_bytes_per_task_for_remote_reading](https://clickhouse.com/docs/operations/settings/settings#merge_tree_min_bytes_per_task_for_remote_reading) - -:::warning これらの設定を変更しないでください -これらの設定を本番環境で変更することはお勧めしません。これらは、`max_threads`が常に実際の並行性レベルを決定しない理由を示すためにのみここに示されています。 -::: - -デモ目的で、これらの設定を上書きして最大の同時実行性を強制するために物理プランを検査しましょう: -```sql runnable=false -EXPLAIN PIPELINE -SELECT - max(price) -FROM - uk.uk_price_paid_simple -WHERE town = 'LONDON' -SETTINGS - max_threads = 59, - merge_tree_min_read_task_size = 0, - merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 0, - merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 0; -``` - -```txt -... -(ReadFromMergeTree) -MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread) × 59 -``` - -これでClickHouseはデータをスキャンするために59の同時ストリームを使用し、設定された`max_threads`に完全に従います。 - -これは、小さなデータセットに対するクエリにおいて、ClickHouseが意図的に同時実行性を制限することを示しています。設定の上書きはテスト用のみ使用し、本番環境では利用しないでください。このような変更は非効率的な実行やリソース競合を引き起こす可能性があります。 - -## 主なポイント {#key-takeaways} - -* ClickHouseは`max_threads`に関連付けられた処理レーンを使用してクエリを並行化します。 -* 実際のレーンの数は、処理のために選択されたデータのサイズに依存します。 -* `EXPLAIN PIPELINE`とトレースログを使用してレーン使用状況を分析します。 - -## さらなる情報を見つけるには {#where-to-find-more-information} - -ClickHouseがクエリを並行して実行する方法や、スケールアップ時に高性能を達成する方法についてさらに深く探求したい場合は、以下のリソースを参照してください: - -* [クエリ処理層 – VLDB 2024 論文 (Web版)](/academic_overview#4-query-processing-layer) - ClickHouseの内部実行モデルに関する詳細な説明で、スケジューリング、パイプライン、演算子設計を含みます。 - -* [部分集約状態の説明](https://clickhouse.com/blog/clickhouse_vs_elasticsearch_mechanics_of_count_aggregations#-multi-core-parallelization) - 部分集約状態が処理レーンの並行実行を効率的に可能にする方法に関する技術的な深掘り。 - -* ClickHouseのクエリ処理ステップを詳細に解説したビデオチュートリアル: - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md.hash deleted file mode 100644 index 5cea0620856..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/query-parallelism.md.hash +++ /dev/null @@ -1 +0,0 @@ -aa56721341d037cf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md deleted file mode 100644 index c701faf3f14..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -slug: '/optimize/skipping-indexes' -sidebar_label: 'データスキッピングインデックス' -sidebar_position: 2 -description: 'スキップインデックスは、ClickHouseが一致する値がないことが保証されているデータの大きなチャンクを読み飛ばすことを可能にします。' -title: 'ClickHouse データスキッピングインデックスの理解' ---- - -import simple_skip from '@site/static/images/guides/best-practices/simple_skip.png'; -import bad_skip from '@site/static/images/guides/best-practices/bad_skip.png'; -import Image from '@theme/IdealImage'; - - - -# ClickHouse データスキッピングインデックスの理解 - -## はじめに {#introduction} - -多くの要因が ClickHouse のクエリ性能に影響を与えます。ほとんどのシナリオで重要な要素は、ClickHouse がクエリの WHERE 句の条件を評価する際に主キーを使用できるかどうかです。それに応じて、最も一般的なクエリパターンに適用される主キーを選択することは、効果的なテーブル設計のために不可欠です。 - -しかし、どんなに慎重に調整された主キーであっても、効率的に使用できないクエリのユースケースが必然的に存在します。ユーザーは一般的に時間系列データを扱いますが、顧客 ID、ウェブサイトの URL、製品番号など、他のビジネス次元に基づいて同じデータを分析したいと考えることがよくあります。その場合、WHERE 句の条件を適用するために、各カラムの値をフルスキャンする必要があるため、クエリのパフォーマンスが大幅に悪化する可能性があります。ClickHouse はそれでも比較的高速ですが、数百万または数十億の個々の値を評価することは、「インデックス未使用」のクエリが主キーに基づくクエリよりもはるかに遅く実行される原因となります。 - -従来の関係データベースでは、この問題へのアプローチの一つは、テーブルに一つ以上の「セカンダリ」インデックスを追加することです。これは b-tree 構造であり、データベースがディスク上のすべての一致する行を O(log(n)) 時間で見つけることを可能にします(ここで n は行数)。しかし、このタイプのセカンダリインデックスは、ディスク上に個々の行が存在しないため、ClickHouse(または他の列指向データベース)には適用できません。 - -その代わりに、ClickHouse は特定の状況でクエリ速度を大幅に改善できる別のタイプのインデックスを提供します。これらの構造は「スキップ」インデックスと呼ばれ、ClickHouse が一致する値がないことが保証されている重大なデータチャンクの読取りをスキップできるようにします。 - -## 基本的な操作 {#basic-operation} - -ユーザーは、MergeTree ファミリーのテーブルに対してのみデータスキッピングインデックスを使用できます。各データスキッピングインデックスには、4 つの主な引数があります。 - -- インデックス名。インデックス名は、各パーティション内にインデックスファイルを作成するために使用されます。また、インデックスを削除またはマテリアライズする際のパラメーターとしても必要です。 -- インデックス式。インデックス式は、インデックス内に保存される値のセットを計算するために使用されます。カラム、単純演算子、および/またはインデックスタイプによって決定される関数のサブセットの組み合わせであることができます。 -- TYPE。インデックスのタイプは、各インデックスブロックの読み取りと評価をスキップできるかどうかを決定する計算を制御します。 -- GRANULARITY。各インデックスブロックは GRANULARITY グラニュールから構成されます。たとえば、主テーブルインデックスのグラニュラリティが 8192 行で、インデックスのグラニュラリティが 4 の場合、各インデックス「ブロック」は 32768 行になります。 - -ユーザーがデータスキッピングインデックスを作成すると、テーブルの各データパートディレクトリに 2 つの追加ファイルが作成されます。 - -- `skp_idx_{index_name}.idx` には、順序付けられた式の値が含まれています。 -- `skp_idx_{index_name}.mrk2` には、関連するデータカラムファイルへの対応するオフセットが含まれています。 - -クエリを実行して関連するカラムファイルを読み込むときに、WHERE 句のフィルタリング条件の一部がスキップインデックス式に一致する場合、ClickHouse はインデックスファイルデータを使用して、各関連データブロックを処理する必要があるか、バイパスできるかを判断します(ブロックが主キーの適用によってすでに除外されていないと仮定)。非常に単純化された例を考えてみましょう。予測可能なデータでロードされた以下のテーブルです。 - -```sql -CREATE TABLE skip_table -( - my_key UInt64, - my_value UInt64 -) -ENGINE MergeTree primary key my_key -SETTINGS index_granularity=8192; - -INSERT INTO skip_table SELECT number, intDiv(number,4096) FROM numbers(100000000); -``` - -主キーを使用しないシンプルなクエリを実行すると、`my_value` カラムの 1 億件のエントリ全てがスキャンされます: - -```sql -SELECT * FROM skip_table WHERE my_value IN (125, 700) - -┌─my_key─┬─my_value─┐ -│ 512000 │ 125 │ -│ 512001 │ 125 │ -│ ... | ... | -└────────┴──────────┘ - -8192 行がセットにあります。経過時間: 0.079 秒。処理された行数 100.00百万行、800.10 MB (1.26 十億行/s.、10.10 GB/s.) -``` - -非常に基本的なスキップインデックスを追加します: - -```sql -ALTER TABLE skip_table ADD INDEX vix my_value TYPE set(100) GRANULARITY 2; -``` - -通常、スキップインデックスは新しく挿入されたデータのみに適用されるため、インデックスを追加しただけでは上記のクエリには影響しません。 - -既存のデータにインデックスを付与するには、このステートメントを使用します: - -```sql -ALTER TABLE skip_table MATERIALIZE INDEX vix; -``` - -新しく作成されたインデックスを使用してクエリを再実行します: - -```sql -SELECT * FROM skip_table WHERE my_value IN (125, 700) - -┌─my_key─┬─my_value─┐ -│ 512000 │ 125 │ -│ 512001 │ 125 │ -│ ... | ... | -└────────┴──────────┘ - -8192 行がセットにあります。経過時間: 0.051 秒。処理された行数 32.77 千行、360.45 KB (643.75 千行/s.、7.08 MB/s.) -``` - -ClickHouse は 800 メガバイトの 1 億行を処理する代わりに、32768 行の 360 キロバイトのみを読み取って分析しました -- 8192 行ずつの 4 つのグラニュールです。 - -よりビジュアルな形で、`my_value` が 125 の 4096 行がどのように読み取られ選択されたか、そして次の行がディスクから読み込むことなくスキップされたかの様子を示しています: - -Simple Skip - -ユーザーは、クエリを実行する際にトレースを有効にすることによって、スキップインデックス使用に関する詳細情報にアクセスできます。clickhouse-client から、`send_logs_level` を設定します: - -```sql -SET send_logs_level='trace'; -``` -これにより、SQL クエリやテーブルインデックスを調整する際の便利なデバッグ情報が提供されます。上記の例では、デバッグログにスキップインデックスが 6102/6104 グラニュールを削除したことが示されています: - -```sql - default.skip_table (933d4b2c-8cea-4bf9-8c93-c56e900eefd1) (SelectExecutor): Index `vix` has dropped 6102/6104 granules. -``` - -## スキップインデックスの種類 {#skip-index-types} - -### minmax {#minmax} - -この軽量インデックスタイプはパラメーターを必要としません。インデックス式の最小値と最大値を各ブロックについて保存します(式がタプルである場合、タプルの要素の各メンバーの値が別々に保存されます)。このタイプは、値によって緩やかにソートされる傾向のあるカラムに理想的です。このインデックスタイプは、クエリ処理中に適用する際のコストが最も低いことが一般的です。 - -このタイプのインデックスは、スカラーまたはタプル式にのみ正しく機能します -- インデックスは、配列またはマップデータ型を返す式には決して適用されません。 - -### set {#set} - -この軽量インデックスタイプは、ブロックごとの値セットの max_size の単一パラメーターを受け付けます(0 は、無限の離散値を許可します)。このセットにはブロック内のすべての値が含まれます(または値の数が max_size を超える場合は空になります)。このインデックスタイプは各グラニュール内の低いカーディナリティのカラムに適しており(本質的に「一緒に塊になっている」)、全体的には高いカーディナリティです。 - -このインデックスのコスト、性能、効果は、ブロック内のカーディナリティに依存します。各ブロックにユニークな値が多数含まれている場合、大きなインデックスセットに対してクエリ条件を評価することは非常に高価になるか、max_size を超えたためインデックスが空であるため、インデックスが適用されない可能性があります。 - -### ブルームフィルタータイプ {#bloom-filter-types} - -*ブルームフィルター*は、わずかな確率の偽陽性のコストを伴い、効率的にセットメンバーシップをテストできるデータ構造です。スキップインデックスの場合、偽陽性はそれほど重要ではありません。なぜなら、唯一の欠点は、いくつかの不要なブロックを読み込むことだからです。しかし、偽陽性の可能性があるため、インデックス式は真であることが期待されるべきであり、そうでない場合は、有効なデータがスキップされる可能性があります。 - -ブルームフィルターは、大量の離散値をテストするのをより効率的に処理できるため、テストする値が増える条件式に適しています。特に、ブルームフィルターインデックスは配列に適用でき、配列のすべての値がテストされ、マップには mapKeys または mapValues 関数を使用してキーまたは値を配列に変換することによって適用できます。 - -ブルームフィルターに基づくデータスキッピングインデックスの種類は 3 つあります: - -* 基本的な **bloom_filter** は、0 および 1 の間の許容される「偽陽性」率の単一のオプションパラメータを取ります(指定されていない場合、.025 が使用されます)。 - -* 専門の **tokenbf_v1**。これは、ブルームフィルターを調整するための 3 つのパラメータを持っています:(1) バイト単位のフィルターのサイズ(大きいフィルターは偽陽性が少なくなりますが、ストレージにコストがかかります)、(2) 適用されるハッシュ関数の数(さらに、ハッシュフィルターが増えるほど偽陽性が減ります)、および (3) ブルームフィルターのハッシュ関数用のシード。これらのパラメータがブルームフィルターの機能にどのように影響するかについては、[こちら](https://hur.st/bloomfilter/)の計算機を参照してください。このインデックスは、String、FixedString、および Map データ型にのみ適用されます。入力式は、非英数字で区切られた文字列のシーケンスに分割されます。たとえば、`This is a candidate for a "full text" search` のカラム値には、トークン `This` `is` `a` `candidate` `for` `full` `text` `search` が含まれます。LIKE、EQUALS、IN、hasToken() および長い文字列内の単語や他の値を検索するための類似検索に用いることを意図しています。たとえば、可能な使用法の一つは、自由形式のアプリケーションログ行の列における少数のクラス名や行番号を検索することかもしれません。 - -* 専門の **ngrambf_v1**。このインデックスは token インデックスと同じように機能します。ブルームフィルター設定の前に 1 つの追加パラメーター、インデックス化される ngram のサイズを取ります。ngram は、任意の文字の長さ `n` の文字列です。したがって、`A short string` の ngram サイズ 4 では、次のようにインデックス化されます: - ```text - 'A sh', ' sho', 'shor', 'hort', 'ort ', 'rt s', 't st', ' str', 'stri', 'trin', 'ring' - ``` -このインデックスは、単語の区切りがない言語、たとえば中国語などのテキスト検索に特に役立つ場合があります。 - -## スキップインデックス関数 {#skip-index-functions} - -データスキッピングインデックスの核心的な目的は、一般的なクエリによって分析されるデータの量を制限することです。ClickHouse データの分析的性質を考慮すると、これらのクエリのパターンのほとんどは関数式を含みます。したがって、スキップインデックスは共通の関数と正しく相互作用しなければ効率的ではありません。これは次のいずれかで発生します: -* データが挿入され、インデックスが関数式として定義される(式の結果がインデックスファイルに格納される)、または -* クエリが処理され、式が格納されたインデックス値に適用されてブロックを除外するかどうかを決定します。 - -各タイプのスキップインデックスは、[こちら](/engines/table-engines/mergetree-family/mergetree/#functions-support)にリストされているインデックス実装に適した ClickHouse の利用可能な関数のサブセットで機能します。一般に、セットインデックスとブルームフィルターに基づくインデックス(別のタイプのセットインデックス)はともに無順序であり、したがって範囲では機能しません。それに対して、minmax インデックスは範囲に非常に適しており、範囲が交差するかどうかを決定する速度が非常に速いです。部分一致関数 LIKE、startsWith、endsWith、hasToken の有効性は、使用されるインデックスタイプ、インデックス式、およびデータの特定の形状に依存します。 - -## スキップインデックス設定 {#skip-index-settings} - -スキップインデックスに適用できる 2 つの設定があります。 - -* **use_skip_indexes** (0 または 1、デフォルトは 1)。すべてのクエリがスキップインデックスを効率的に使用できるわけではありません。特定のフィルタリング条件がほとんどのグラニュールを含む可能性がある場合、データスキッピングインデックスを適用することは不要な、時には重要なコストを伴います。スキップインデックスから利益を得る可能性が低いクエリには、0 に設定してください。 -* **force_data_skipping_indices** (カンマ区切りのインデックス名リスト)。この設定は、非効率的なクエリの一部を防ぐために使用できます。スキップインデックスなしではテーブルをクエリに必要以上のコストがかかる場合、この設定を 1 つ以上のインデックス名で使用すると、リストされたインデックスを使用しないいかなるクエリにも例外が返されます。これにより、悪意のあるクエリがサーバーリソースを消費するのを防ぐことができます。 - -## スキップベストプラクティス {#skip-best-practices} - -スキップインデックスは直感的ではなく、特に RDMS 領域からのセカンダリ行ベースインデックスやドキュメントストアからの逆インデックスに慣れたユーザーにとってはそうです。利益を得るには、ClickHouse のデータスキッピングインデックスは、インデックス計算のコストを相殺するだけの十分なグラニュール読み取りを回避する必要があります。特に、ある値がインデックス化されたブロック内に1度でも出現する場合、そのブロック全体がメモリに読み込まれ評価される必要があるため、インデックスコストが不必要に発生します。 - -以下のデータ分布を考えてみましょう: - -Bad Skip - -主キー/順序付けキーが `timestamp` であり、`visitor_id` にインデックスがあると仮定します。次のクエリを考えてみましょう: - -```sql -SELECT timestamp, url FROM table WHERE visitor_id = 1001` -``` - -この種のデータ分布では、従来のセカンダリインデックスが非常に有利です。要求された visitor_id を持つ 5 行を見つけるためにすべての 32768 行を読む代わりに、セカンダリインデックスは単に 5 つの行の位置を含むだけで、その 5 行のみがディスクから読み取られます。ClickHouse のデータスキッピングインデックスではその正反対のことが当てはまります。いかなるスキップインデックスのタイプに関わらず、`visitor_id` カラム内のすべての 32768 値がテストされます。 - -したがって、主キーのカラムに単にインデックスを追加することで ClickHouse のクエリを高速化しようとする自然な衝動は、しばしば誤りです。この高度な機能は、主キーの変更([主キーの選び方](../best-practices/sparse-primary-indexes.md)を参照)、プロジェクションの使用、またはマテリアライズドビューの使用など、他の代替案を調査した後にのみ使用すべきです。データスキッピングインデックスが適切である場合でも、インデックスとテーブルの両方を慎重に調整する必要があることがよくあります。 - -ほとんどのケースでは、有用なスキップインデックスは主キーとターゲットにした非主キーのカラム/式の間に強い相関関係が必要です。相関がない場合(上の図のように)、数千の値のブロック内の少なくとも 1 つの行によってフィルタリング条件が満たされる可能性が高く、少なくとも多くのブロックがスキップされることはありません。対照的に、主キーの値の範囲(例えば、1日の時間)が潜在的なインデックスカラムの値(例えば、テレビ視聴者の年齢)と強く関連付けられている場合、minmax タイプのインデックスは有益である可能性が高いです。データを挿入する際に、主キーの順序付けキーに追加のカラムを含めるか、主キーに関連する値が挿入時にグループ化されるようにバッチ挿入することで、この相関を高めることが可能かもしれません。たとえば、特定の site_id のすべてのイベントをインジェストプロセスによって一緒にグループ化して挿入することができます。これは、多くの granules を生成し、特定の site_id 値を検索する際に多くのブロックをスキップできる結果となります。 - -スキップインデックスのもう一つの良い候補は、任意の値がデータ内で比較的スパースである高いカーディナリティの式です。たとえば、ある API リクエストにおけるエラーコードを追跡する可視化プラットフォームなどの例が考えられます。特定のエラーコードはデータ内で稀であっても、検索には特に重要かもしれません。エラーコードカラムのセットスキップインデックスを使用することで、エラーを含まない大部分のブロックをスキップし、エラー関連のクエリの性能を大幅に向上させることが可能です。 - -最後に、重要なベストプラクティスは、テストを繰り返すことです。再度、b-tree セカンダリインデックスやドキュメントを検索するための逆インデックスとは異なり、データスキッピングインデックスの動作は予測が容易ではありません。テーブルに追加すると、データの取り込みとインデックスから恩恵を受けないクエリの両方において意味のあるコストが発生します。実世界のデータ型で常にテストし、テストには型、グラニュラリティサイズやその他のパラメータのバリエーションを含むべきです。テストはしばしば、考察だけでは明らかでないパターンや落とし穴を明らかにします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md.hash deleted file mode 100644 index d18effe146d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/skipping-indexes.md.hash +++ /dev/null @@ -1 +0,0 @@ -3e2cd02bc66b9544 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md deleted file mode 100644 index 7d4668db096..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md +++ /dev/null @@ -1,1469 +0,0 @@ ---- -sidebar_label: '主キーインデックス' -sidebar_position: 1 -description: 'このガイドでは、ClickHouseのインデックスに詳しく入ります。' -title: 'ClickHouseにおける主キーインデックスの実践的な紹介' -slug: '/guides/best-practices/sparse-primary-indexes' ---- - -import sparsePrimaryIndexes01 from '@site/static/images/guides/best-practices/sparse-primary-indexes-01.png'; -import sparsePrimaryIndexes02 from '@site/static/images/guides/best-practices/sparse-primary-indexes-02.png'; -import sparsePrimaryIndexes03a from '@site/static/images/guides/best-practices/sparse-primary-indexes-03a.png'; -import sparsePrimaryIndexes03b from '@site/static/images/guides/best-practices/sparse-primary-indexes-03b.png'; -import sparsePrimaryIndexes04 from '@site/static/images/guides/best-practices/sparse-primary-indexes-04.png'; -import sparsePrimaryIndexes05 from '@site/static/images/guides/best-practices/sparse-primary-indexes-05.png'; -import sparsePrimaryIndexes06 from '@site/static/images/guides/best-practices/sparse-primary-indexes-06.png'; -import sparsePrimaryIndexes07 from '@site/static/images/guides/best-practices/sparse-primary-indexes-07.png'; -import sparsePrimaryIndexes08 from '@site/static/images/guides/best-practices/sparse-primary-indexes-08.png'; -import sparsePrimaryIndexes09a from '@site/static/images/guides/best-practices/sparse-primary-indexes-09a.png'; -import sparsePrimaryIndexes09b from '@site/static/images/guides/best-practices/sparse-primary-indexes-09b.png'; -import sparsePrimaryIndexes09c from '@site/static/images/guides/best-practices/sparse-primary-indexes-09c.png'; -import sparsePrimaryIndexes10 from '@site/static/images/guides/best-practices/sparse-primary-indexes-10.png'; -import sparsePrimaryIndexes11 from '@site/static/images/guides/best-practices/sparse-primary-indexes-11.png'; -import sparsePrimaryIndexes12a from '@site/static/images/guides/best-practices/sparse-primary-indexes-12a.png'; -import sparsePrimaryIndexes12b1 from '@site/static/images/guides/best-practices/sparse-primary-indexes-12b-1.png'; -import sparsePrimaryIndexes12b2 from '@site/static/images/guides/best-practices/sparse-primary-indexes-12b-2.png'; -import sparsePrimaryIndexes12c1 from '@site/static/images/guides/best-practices/sparse-primary-indexes-12c-1.png'; -import sparsePrimaryIndexes12c2 from '@site/static/images/guides/best-practices/sparse-primary-indexes-12c-2.png'; -import sparsePrimaryIndexes13a from '@site/static/images/guides/best-practices/sparse-primary-indexes-13a.png'; -import sparsePrimaryIndexes14a from '@site/static/images/guides/best-practices/sparse-primary-indexes-14a.png'; -import sparsePrimaryIndexes14b from '@site/static/images/guides/best-practices/sparse-primary-indexes-14b.png'; -import sparsePrimaryIndexes15a from '@site/static/images/guides/best-practices/sparse-primary-indexes-15a.png'; -import sparsePrimaryIndexes15b from '@site/static/images/guides/best-practices/sparse-primary-indexes-15b.png'; -import Image from '@theme/IdealImage'; - - - -# ClickHouseにおける主キーインデックスの実用的な導入 -## はじめに {#introduction} - -このガイドでは、ClickHouseのインデックスについて詳しく掘り下げていきます。以下について詳細に説明し、議論します: -- [ClickHouseにおけるインデクシングが従来のリレーショナルデータベース管理システムとどのように異なるか](#an-index-design-for-massive-data-scales) -- [ClickHouseがテーブルのスパース主キーインデックスをどのように構築し使用しているか](#a-table-with-a-primary-key) -- [ClickHouseにおけるインデクシングのベストプラクティスは何か](#using-multiple-primary-indexes) - -このガイドに記載されているすべてのClickHouse SQLステートメントとクエリを自分のマシンで実行することもできます。 -ClickHouseのインストールと開始方法については、[クイックスタート](/quick-start.mdx)を参照してください。 - -:::note -このガイドはClickHouseのスパース主キーインデックスに焦点を当てています。 - -ClickHouseの[セカンダリーデータスキッピングインデックス](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes)については、[チュートリアル](/guides/best-practices/skipping-indexes.md)を参照してください。 -::: -### データセット {#data-set} - -このガイドでは、サンプルの匿名化されたウェブトラフィックデータセットを使用します。 - -- サンプルデータセットから887万行(イベント)のサブセットを使用します。 -- 圧縮されていないデータサイズは887万イベントで約700MBです。ClickHouseに保存すると圧縮後は200MBになります。 -- サブセットの各行には、特定の時刻にURL(`URL`カラム)をクリックしたインターネットユーザー(`UserID`カラム)を示す3つのカラムがあります。 - -これら3つのカラムを使用して、次のような典型的なウェブ分析クエリをすでに策定できます: - -- 「特定のユーザーにとって最もクリックされた上位10のURLは何ですか?」 -- 「特定のURLを最も頻繁にクリックした上位10のユーザーは誰ですか?」 -- 「ユーザーが特定のURLをクリックする際の最も人気のある時間(例えば、曜日)は何ですか?」 -### テストマシン {#test-machine} - -このドキュメントに示すすべての実行時間数値は、Apple M1 Proチップを搭載したMacBook Pro上でClickHouse 22.2.1をローカルで実行したものです。 -### フルテーブルスキャン {#a-full-table-scan} - -主キーなしでデータセット上でクエリがどのように実行されるかを見るために、次のSQL DDLステートメントを実行してテーブル(MergeTreeテーブルエンジンを使用)を作成します: - -```sql -CREATE TABLE hits_NoPrimaryKey -( - `UserID` UInt32, - `URL` String, - `EventTime` DateTime -) -ENGINE = MergeTree -PRIMARY KEY tuple(); -``` - -次に、次のSQL挿入ステートメントを使用して、ヒットデータセットのサブセットをテーブルに挿入します。 -これは、クリックハウスのリモートホストにホストされているフルデータセットのサブセットをロードするために[URLテーブル関数](/sql-reference/table-functions/url.md)を使用します: - -```sql -INSERT INTO hits_NoPrimaryKey SELECT - intHash32(UserID) AS UserID, - URL, - EventTime -FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz', 'TSV', 'WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8') -WHERE URL != ''; -``` -応答は次のようになります: -```response -Ok. - -0 rows in set. Elapsed: 145.993 sec. Processed 8.87 million rows, 18.40 GB (60.78 thousand rows/s., 126.06 MB/s.) -``` - -ClickHouseクライアントの結果出力は、上記のステートメントがテーブルに887万行挿入されたことを示しています。 - -最後に、後の議論を簡素化し、図や結果を再現可能にするために、FINALキーワードを使用してテーブルを[最適化](/sql-reference/statements/optimize.md)します: - -```sql -OPTIMIZE TABLE hits_NoPrimaryKey FINAL; -``` - -:::note -一般的には、データをロードした後にすぐにテーブルを最適化することは必要も推奨もされません。この例でこれが必要な理由は明らかになります。 -::: - -次に、最初のウェブ分析クエリを実行します。以下は、ユーザーID 749927693を持つインターネットユーザーのために最もクリックされた上位10のURLを計算します: - -```sql -SELECT URL, count(URL) as Count -FROM hits_NoPrimaryKey -WHERE UserID = 749927693 -GROUP BY URL -ORDER BY Count DESC -LIMIT 10; -``` -応答は次のようになります: -```response -┌─URL────────────────────────────┬─Count─┐ -│ http://auto.ru/chatay-barana.. │ 170 │ -│ http://auto.ru/chatay-id=371...│ 52 │ -│ http://public_search │ 45 │ -│ http://kovrik-medvedevushku-...│ 36 │ -│ http://forumal │ 33 │ -│ http://korablitz.ru/L_1OFFER...│ 14 │ -│ http://auto.ru/chatay-id=371...│ 14 │ -│ http://auto.ru/chatay-john-D...│ 13 │ -│ http://auto.ru/chatay-john-D...│ 10 │ -│ http://wot/html?page/23600_m...│ 9 │ -└────────────────────────────────┴───────┘ - -10 rows in set. Elapsed: 0.022 sec. - -# highlight-next-line -Processed 8.87 million rows, -70.45 MB (398.53 million rows/s., 3.17 GB/s.) -``` - -ClickHouseクライアントの結果出力は、ClickHouseがフルテーブルスキャンを実行したことを示しています!私たちのテーブルの887万行の各行がClickHouseにストリームされました。これはスケールしません。 - -これを(大幅に)効率的かつ(はるかに)高速にするためには、適切な主キーを持つテーブルを使用する必要があります。これにより、ClickHouseは自動的に(主キーのカラムに基づいて)スパース主キーインデックスを作成し、それを使用して例のクエリの実行速度を大幅に向上させることができます。 -### 関連コンテンツ {#related-content} -- ブログ: [ClickHouseのクエリを超高速化する](https://clickhouse.com/blog/clickhouse-faster-queries-with-projections-and-primary-indexes) -## ClickHouseインデックス設計 {#clickhouse-index-design} -### 大規模データスケールのためのインデックス設計 {#an-index-design-for-massive-data-scales} - -従来のリレーショナルデータベース管理システムでは、主インデックスにはテーブル行ごとに1つのエントリが含まれます。これにより、主インデックスには887万エントリが含まれることになります。このようなインデックスは特定の行を迅速に特定することができるため、ルックアップクエリやポイントアップデートに対して高い効率をもたらします。`B(+)-Tree`データ構造でエントリを検索する平均時間計算量は`O(log n)`です;より正確には、`log_b n = log_2 n / log_2 b`であり、ここで`b`は`B(+)-Tree`の分岐因子、`n`はインデックスされた行の数です。通常、`b`は数百から数千の間にあるため、`B(+)-Trees`は非常に浅い構造であり、レコードを特定するために必要なディスクシークは少数です。887万行と分岐因子が1000の場合、平均して2.3回のディスクシークが必要です。この能力にはコストが伴います:新しい行をテーブルに追加し、インデックスにエントリを追加する際の追加的なディスクおよびメモリーオーバーヘッド、挿入コストの増加、時にはB-Treeの再バランス。 - -B-Treeインデックスに関連する課題を考慮すると、ClickHouseのテーブルエンジンは異なるアプローチを利用しています。ClickHouseの[MergeTreeエンジンファミリー](/engines/table-engines/mergetree-family/index.md)は、大量のデータボリュームを処理するために設計および最適化されています。これらのテーブルは、毎秒数百万の行挿入を受け取り、非常に大きな(100ペタバイト以上の)データボリュームを保存するように設計されています。データは、バックグラウンドで部分を結合するルールが適用されながら、テーブルに[部分ごとに](/engines/table-engines/mergetree-family/mergetree.md/#mergetree-data-storage)迅速に書き込まれます。ClickHouseでは、各部分にそれぞれの主インデックスがあります。部分がマージされると、マージされた部分の主インデックスもマージされます。ClickHouseが設計された非常に大きなスケールにおいて、ディスクとメモリーの効率が非常に重要です。したがって、すべての行をインデックスするのではなく、部分の主インデックスは行のグループ(「グラニュール」と呼ばれる)ごとに1つのインデックスエントリ(「マーク」と呼ばれる)を持ちます。このテクニックは**スパースインデックス**と呼ばれます。 - -スパースインデクシングが可能なのは、ClickHouseが部分の行を主キーのカラムに基づいてディスクに順序付けて保存しているためです。単一の行を直接特定する代わりに(B-Treeベースのインデックスのように)、スパース主インデックスはインデックスエントリのバイナリ検索を介して迅速に一致する可能性がある行のグループを特定できます。見つかった一致する可能性のある行のグループ(グラニュール)は、その後ClickHouseエンジンに並行してストリーミングされて一致を見つけます。このインデックス設計により、主インデックスは小さく(完全にメインメモリにフィットすることが可能であり、及びそれが必要です)、クエリ実行時間を大幅に短縮します:特にデータ分析のユースケースにおいて典型的な範囲クエリの場合に。 - -以下に、ClickHouseがスパース主インデックスを構築し使用する方法を詳しく示します。後のセクションでは、インデックスを構築するために使用されるテーブルカラム(主キーのカラム)の選択、削除、順序付けのベストプラクティスについて議論します。 -### 主キーを持つテーブル {#a-table-with-a-primary-key} - -UserIDとURLのキーカラムを持つ複合主キーのあるテーブルを作成します: - -```sql -CREATE TABLE hits_UserID_URL -( - `UserID` UInt32, - `URL` String, - `EventTime` DateTime -) -ENGINE = MergeTree --- highlight-next-line -PRIMARY KEY (UserID, URL) -ORDER BY (UserID, URL, EventTime) -SETTINGS index_granularity = 8192, index_granularity_bytes = 0, compress_primary_key = 0; -``` - -[//]: # (
) -
- - DDLステートメントの詳細 - -

- -後の議論を簡素化し、図や結果を再現可能にするために、DDLステートメントは: - -

- -

-
- - -上記のDDLステートメントの主キーは、指定された2つのキーカラムに基づいて主インデックスを作成する原因となります。 - -
-次にデータを挿入します: - -```sql -INSERT INTO hits_UserID_URL SELECT - intHash32(UserID) AS UserID, - URL, - EventTime -FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz', 'TSV', 'WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8') -WHERE URL != ''; -``` -応答は次のようになります: -```response -0 rows in set. Elapsed: 149.432 sec. Processed 8.87 million rows, 18.40 GB (59.38 thousand rows/s., 123.16 MB/s.) -``` - - -
-テーブルを最適化します: - -```sql -OPTIMIZE TABLE hits_UserID_URL FINAL; -``` - -
-次のクエリを使用してテーブルのメタデータを取得できます: - -```sql -SELECT - part_type, - path, - formatReadableQuantity(rows) AS rows, - formatReadableSize(data_uncompressed_bytes) AS data_uncompressed_bytes, - formatReadableSize(data_compressed_bytes) AS data_compressed_bytes, - formatReadableSize(primary_key_bytes_in_memory) AS primary_key_bytes_in_memory, - marks, - formatReadableSize(bytes_on_disk) AS bytes_on_disk -FROM system.parts -WHERE (table = 'hits_UserID_URL') AND (active = 1) -FORMAT Vertical; -``` - -応答は次のようになります: - -```response -part_type: Wide -path: ./store/d9f/d9f36a1a-d2e6-46d4-8fb5-ffe9ad0d5aed/all_1_9_2/ -rows: 8.87 million -data_uncompressed_bytes: 733.28 MiB -data_compressed_bytes: 206.94 MiB -primary_key_bytes_in_memory: 96.93 KiB -marks: 1083 -bytes_on_disk: 207.07 MiB - - -1 rows in set. Elapsed: 0.003 sec. -``` - -ClickHouseクライアントの出力は次のことを示しています: - -- テーブルのデータは、ディスク上の特定のディレクトリに[広い形式](/engines/table-engines/mergetree-family/mergetree.md/#mergetree-data-storage)で保存されており、そのディレクトリ内にはテーブルカラムごとに1つのデータファイル(および1つのマークファイル)があります。 -- テーブルには887万行があります。 -- すべての行の圧縮されていないデータサイズは733.28MBです。 -- すべての行のディスク上の圧縮サイズは206.94MBです。 -- テーブルには1083エントリ(「マーク」と呼ばれる)の主インデックスがあり、そのインデックスのサイズは96.93KBです。 -- 合計で、テーブルのデータとマークファイル、および主インデックスファイルはディスク上で207.07MBを占めています。 -### データは主キーのカラムによって順序付けられてディスクに保存される {#data-is-stored-on-disk-ordered-by-primary-key-columns} - -上記で作成したテーブルは以下の特性を持っています: -- 複合[主キー](/engines/table-engines/mergetree-family/mergetree.md/#primary-keys-and-indexes-in-queries) `(UserID, URL)`と -- 複合[ソートキー](/engines/table-engines/mergetree-family/mergetree.md/#choosing-a-primary-key-that-differs-from-the-sorting-key) `(UserID, URL, EventTime)`。 - -:::note -- もしソートキーのみを指定していた場合、主キーは暗黙的にソートキーと等しいと定義されます。 -- メモリ効率を高めるために、クエリがフィルタリングするカラムのみを含む主キーを明示的に指定しました。主キーに基づく主インデックスは、完全にメインメモリにロードされています。 -- ガイドの図や情報の一貫性を確保し、圧縮率を最適化するため、すべてのテーブルカラムを含む別のソートキーを定義しました(同じカラムに類似のデータが近接すればするほど、例えばソートを行うことで、データはより良く圧縮されます)。 -- 両方が指定されている場合、主キーはソートキーのプレフィックスである必要があります。 -::: - -挿入された行は、主キーのカラム(およびソートキーの追加的な `EventTime` カラム)によって、ディスク上で辞書式順序(昇順)で保存されています。 - -:::note -ClickHouseは、同一の主キーのカラム値を持つ複数の行を挿入することを許可します。この場合(以下に図の行1と行2を参照)、最終的な順番は指定されたソートキーによって決まるため、`EventTime`カラムの値によって決まります。 -::: - -ClickHouseは列指向のデータベース管理システムです。以下の図に示すように -- ディスク上の表現では、各テーブルカラムに対して1つのデータファイル(*.bin)があり、すべての値は圧縮された形式で保存され、 -- 887万の行はディスク上で主キーのカラム(および追加のソートキーのカラム)によって辞書式昇順で保存されます。すなわち、この場合 - - 最初は `UserID`によって、 - - 次は `URL`によって、 - - 最後に `EventTime`によって: - -Sparse Primary Indices 01 - -`UserID.bin`、`URL.bin`、および `EventTime.bin` は、`UserID`、`URL`、および `EventTime`カラムの値が保存されるディスク上のデータファイルです。 - -:::note -- 主キーはディスク上の行の辞書式順序を定義するため、テーブルには1つの主キーしか持てません。 -- 行を0から始まる番号付けしているのは、ClickHouseの内部行番号付けスキームと一致させ、ログメッセージにも使用されるためです。 -::: -### データは並列データ処理のためにグラニュールに整理される {#data-is-organized-into-granules-for-parallel-data-processing} - -データ処理の目的のために、テーブルのカラムの値は論理的にグラニュールに分割されます。 -グラニュールはClickHouseにストリーミングされる最小の不可分なデータセットです。 -これにより、数個の行を読み取るのではなく、ClickHouseは常に行のグループ(グラニュール)全体をストリーミング方式かつ並行して読み取ります。 -:::note -カラムの値は物理的にグラニュール内に保存されるわけではありません:グラニュールはクエリ処理のためのカラム値の論理的な定義です。 -::: - -以下の図は、当テーブルの8.87百万行の(カラムの値)が、テーブルのDDLステートメントに`index_granularity`(デフォルト値の8192に設定)を含むことから、1083グラニュールに整理される様子を示しています。 - -Sparse Primary Indices 02 - -最初の(物理的なディスク上の順序に基づく)8192行(そのカラムの値)は論理的にグラニュール0に属し、その後の8192行(そのカラムの値)はグラニュール1に属します。 - -:::note -- 最後のグラニュール(グラニュール1082)は、8192行未満を「含む」ことがあります。 -- このガイドの冒頭で「DDLステートメントの詳細」において、私たちは[適応インデックスグラニュラティ](/whats-new/changelog/2019.md/#experimental-features-1)を無効にしたことに言及しました(ガイドの議論を簡素化し、図や結果を再現可能にするために)。 - - したがって、私たちの例のテーブルのすべてのグラニュール(最後のものを除く)のサイズは同じです。 - -- 適応インデックスグラニュラティを持つテーブルの場合(index granularityは[デフォルトで適応的](/operations/settings/merge-tree-settings#index_granularity_bytes)であり)、一部のグラニュールのサイズは8192行より少なくなる場合があります。 - -- 私たちは主キーのカラム(`UserID`、`URL`)の一部のカラム値をオレンジでマーキングしています。 - これらのオレンジでマークされたカラム値は、各グラニュールの最初の行の主キーのカラム値になります。 - 以下で見ていくように、これらのオレンジでマークされたカラム値はテーブルの主インデックスのエントリになります。 - -- グラニュールには0から番号を付けており、ClickHouseの内部の番号付けスキームと一致させ、ログメッセージにも使用されます。 -::: -### 主インデックスはグラニュールごとに1つのエントリを持つ {#the-primary-index-has-one-entry-per-granule} - -主インデックスは、上記の図に示すグラニュールに基づいて作成されます。このインデックスは圧縮されていないフラットな配列ファイル(primary.idx)であり、0から始まるいわゆる数値インデックスマークを含みます。 - -以下の図は、インデックスが各グラニュールの最初の行の主キーのカラム値(上記の図でオレンジでマークされた値)を保存していることを示しています。 -言い換えれば:主インデックスは、テーブルのすべての8192行における主キーのカラム値を保存しています(物理的な行順序に基づいて主キーのカラムによって定義されます)。 -例えば、 -- 最初のインデックスエントリ(上の図で「マーク0」と呼ばれる)は、上の図でグラニュール0の最初の行のキーのカラム値を保存しています。 -- 2番目のインデックスエントリ(上の図で「マーク1」と呼ばれる)は、上の図でグラニュール1の最初の行のキーのカラム値を保存しています、そして続きます。 - -Sparse Primary Indices 03a - -私たちのテーブルには887万行と1083グラニュールがあるため、インデックスには合計1083エントリがあります: - -Sparse Primary Indices 03b - -:::note -- [適応インデックスグラニュラティ](/whats-new/changelog/2019.md/#experimental-features-1)を持つテーブルの場合、テーブルの最後の行の主キーのカラム値を記録する1つの「最終」の追加マークも主インデックスに保存されますが、適応インデックスグラニュラティを無効にしたため(このガイドの議論を簡素化し、図や結果を再現可能にするため)、私たちの例のテーブルのインデックスにはこの最終のマークは含まれていません。 - -- 主インデックスファイルは完全にメインメモリにロードされます。ファイルのサイズが利用可能な空きメモリのサイズを超える場合は、ClickHouseはエラーを発生させます。 -::: - -
- - 主インデックスの内容を検査する - -

- -セルフマネージドのClickHouseクラスタ上で、以下の手順を踏むことで、例のテーブルの主インデックスのコンテンツを調査するためにfileテーブル関数を使用できます。 - -そのために、まず、稼働中のクラスタのノードのuser_files_pathに主インデックスファイルをコピーする必要があります: -

    -
  • ステップ1:主インデックスファイルを含む部分のパスを取得します
  • -` -SELECT path FROM system.parts WHERE table = 'hits_UserID_URL' AND active = 1 -` - -はテストマシン上で`/Users/tomschreiber/Clickhouse/store/85f/85f4ee68-6e28-4f08-98b1-7d8affa1d88c/all_1_9_4`を返します。 - -
  • ステップ2:user_files_pathを取得します
  • -デフォルトのuser_files_pathは、Linuxでは -`/var/lib/clickhouse/user_files/` - -であり、Linuxでは変更されたかどうかを確認できます:`$ grep user_files_path /etc/clickhouse-server/config.xml` - -テストマシン上のパスは`/Users/tomschreiber/Clickhouse/user_files/`です。 - -
  • ステップ3:主インデックスファイルをuser_files_pathにコピーします
  • - -`cp /Users/tomschreiber/Clickhouse/store/85f/85f4ee68-6e28-4f08-98b1-7d8affa1d88c/all_1_9_4/primary.idx /Users/tomschreiber/Clickhouse/user_files/primary-hits_UserID_URL.idx` - -
- -
-これで、SQLを介して主インデックスの内容を検査できます: -
    -
  • エントリの数を取得します
  • -` -SELECT count( )
    FROM file('primary-hits_UserID_URL.idx', 'RowBinary', 'UserID UInt32, URL String'); -` -は `1083` を返します。 - -
  • 最初の2つのインデックスマークを取得します
  • -` -SELECT UserID, URL
    FROM file('primary-hits_UserID_URL.idx', 'RowBinary', 'UserID UInt32, URL String')
    LIMIT 0, 2; -` - -は次のように返します: - -` -240923, http://showtopics.html%3...
    -4073710, http://mk.ru&pos=3_0 -` - -
  • 最後のインデックスマークを取得します
  • -` -SELECT UserID, URL FROM file('primary-hits_UserID_URL.idx', 'RowBinary', 'UserID UInt32, URL String')
    LIMIT 1082, 1; -` -は -` -4292714039 │ http://sosyal-mansetleri... -` -と返します。 -
-
-これは、私たちの例のテーブルの主インデックス内容の図と正確に一致します: -

-
- -主キーエントリはインデックスマークと呼ばれます。なぜなら、各インデックスエントリが特定のデータ範囲の開始を示すためです。具体的には例のテーブルに関して: -- UserIDインデックスマーク: - - 主インデックスに保存された`UserID`の値は昇順にソートされています。
- 上記の図の「マーク1」は、グラニュール1のすべてのテーブル行の`UserID`の値、およびすべての後続のグラニュールの`UserID`の値が4.073.710以上であることを保証します。 - - [後で確認するように](#the-primary-index-is-used-for-selecting-granules)、このグローバルな順序により、ClickHouseはクエリが主キーの最初のカラムでフィルタリングされるときにインデックスマークに対してバイナリサーチアルゴリズムを使用することができるからです。 - -- URLインデックスマーク: - - 主キーのカラム`UserID`と`URL`の類似の基数により、一般的に主キーの最初のカラムの後に位置するすべてのキーカラムのインデックスマークは、前のキーのカラム値がグラニュール内のすべてのテーブル行で同じである限りデータ範囲を示します。
- 例えば、上記の図でマーク0とマーク1のUserID値が異なるため、ClickHouseはグラニュール0内のすべてのテーブル行のURLの値が`'http://showtopics.html%3...'`以上であるとは仮定できません。しかし、上記の図でマーク0とマーク1のUserID値が同じであれば(すなわち、UserIDの値がグラニュール0内のすべてのテーブル行で同じであれば)、ClickHouseはグラニュール0内のすべてのテーブル行のURLの値が`'http://showtopics.html%3...'`以上であると仮定できたでしょう。 - - これは、クエリ実行パフォーマンスに対しての影響について、後で詳しく説明します。 -``` -### 主キーはグラニュールを選択するために使用されます {#the-primary-index-is-used-for-selecting-granules} - -現在、主キーのサポートを使用してクエリを実行できます。 - -次のクエリは、UserID 749927693 の上位 10 件のクリックされた URL を計算します。 - -```sql -SELECT URL, count(URL) AS Count -FROM hits_UserID_URL -WHERE UserID = 749927693 -GROUP BY URL -ORDER BY Count DESC -LIMIT 10; -``` - -返答は次のようになります: - -```response -┌─URL────────────────────────────┬─Count─┐ -│ http://auto.ru/chatay-barana.. │ 170 │ -│ http://auto.ru/chatay-id=371...│ 52 │ -│ http://public_search │ 45 │ -│ http://kovrik-medvedevushku-...│ 36 │ -│ http://forumal │ 33 │ -│ http://korablitz.ru/L_1OFFER...│ 14 │ -│ http://auto.ru/chatay-id=371...│ 14 │ -│ http://auto.ru/chatay-john-D...│ 13 │ -│ http://auto.ru/chatay-john-D...│ 10 │ -│ http://wot/html?page/23600_m...│ 9 │ -└────────────────────────────────┴───────┘ - -10 行がセットにあります。経過時間: 0.005 秒。 - -# highlight-next-line -処理された行数: 8.19 千, -740.18 KB (1.53 百万行/s., 138.59 MB/s.) -``` - -ClickHouse クライアントの出力は、フルテーブルスキャンを実行する代わりに、8.19 千の行のみが ClickHouse にストリーミングされたことを示しています。 - -もし トレースログ が有効になっていると、ClickHouse サーバーログファイルは ClickHouse が 1083 の UserID インデックスマークに対して 二分探索 を実行して、`749927693` の UserID カラム値を持つ行を含んでいる可能性のあるグラニュールを特定したことを示しています。これには平均で `O(log2 n)` の時間計算量を必要とします: -```response -...Executor): Key condition: (column 0 in [749927693, 749927693]) - -# highlight-next-line -...Executor): Running binary search on index range for part all_1_9_2 (1083 marks) -...Executor): Found (LEFT) boundary mark: 176 -...Executor): Found (RIGHT) boundary mark: 177 -...Executor): Found continuous range in 19 steps -...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, - -# highlight-next-line - 1/1083 marks by primary key, 1 marks to read from 1 ranges -...Reading ...approx. 8192 rows starting from 1441792 -``` - -上記のトレースログから、1083 の既存のマークのうち 1 つがクエリを満たしていることが分かります。 - -
- - トレースログの詳細 - -

- -マーク 176 が特定されました(「見つかった左境界マーク」は包含的で、「見つかった右境界マーク」は排他的です)、したがって、グラニュール 176 からのすべての 8192 行(これは行 1.441.792 から始まります - これは後でこのガイドで確認します)が ClickHouse にストリーミングされ、`749927693` の UserID カラム値を持つ実際の行が見つかります。 -

-
- -この例のクエリで EXPLAIN 句 を使用してこれを再現することもできます: -```sql -EXPLAIN indexes = 1 -SELECT URL, count(URL) AS Count -FROM hits_UserID_URL -WHERE UserID = 749927693 -GROUP BY URL -ORDER BY Count DESC -LIMIT 10; -``` - -返答は次のようになります: - -```response -┌─explain───────────────────────────────────────────────────────────────────────────────┐ -│ Expression (Projection) │ -│ Limit (preliminary LIMIT (without OFFSET)) │ -│ Sorting (Sorting for ORDER BY) │ -│ Expression (Before ORDER BY) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ Filter (WHERE) │ -│ SettingQuotaAndLimits (Set limits and quota after reading from storage) │ -│ ReadFromMergeTree │ -│ Indexes: │ -│ PrimaryKey │ -│ Keys: │ -│ UserID │ -│ Condition: (UserID in [749927693, 749927693]) │ -│ Parts: 1/1 │ - -# highlight-next-line -│ Granules: 1/1083 │ -└───────────────────────────────────────────────────────────────────────────────────────┘ - -16 行がセットにあります。経過時間: 0.003 秒。 -``` -クライアントの出力は、1083 のグラニュールのうち 1 つが UserID カラム値 749927693 を持つ行を含んでいる可能性があるとして選択されたことを示しています。 - -:::note 結論 -クエリが複合キーの一部であり、最初のキー列であるカラムをフィルタリングする場合、ClickHouse はキー列のインデックスマークの上で二分探索アルゴリズムを実行します。 -::: - -
- -上記で述べたように、ClickHouse は自社のスパース主インデックスを使用して、クエリに一致する可能性のある行を含むグラニュールを迅速に(二分探索を介して)選択しています。 - -これは ClickHouse のクエリ実行の **第一段階(グラニュール選択)** です。 - -**第二段階(データ読み取り)** では、ClickHouse は選択したグラニュールを見つけて、それらのすべての行を ClickHouse エンジンにストリーミングして、クエリに実際に一致する行を見つけるために使用します。 - -この第二段階について、次のセクションで詳しく説明します。 -### マークファイルはグラニュールを特定するために使用されます {#mark-files-are-used-for-locating-granules} - -以下の図は、私たちのテーブルの主インデックスファイルの一部を示しています。 - -Sparse Primary Indices 04 - -上記で述べたように、インデックスの 1083 の UserID マークに対する二分探索を通じて、マーク 176 が特定されました。したがって、対応するグラニュール 176 はおそらく UserID カラム値 749.927.693 を持つ行を含んでいる可能性があります。 - -
- - グラニュール選択の詳細 - -

- -上記の図は、マーク 176 が関連グラニュール 176 の最小 UserID 値が 749.927.693 より小さく、次のマーク(マーク 177)のグラニュール 177 の最小 UserID 値がこの値より大きいという最初のインデックスエントリであることを示しています。したがって、マーク 176 に対応するグラニュール 176 のみが UserID カラム値が 749.927.693 を持つ行を含んでいる可能性があります。 -

-
- -グラニュール 176 の中に UserID カラム値が 749.927.693 を持つ行が含まれているかどうかを確認するためには、このグラニュールに属するすべての 8192 行を ClickHouse にストリーミングする必要があります。 - -これを達成するために、ClickHouse はグラニュール 176 の物理的位置を知る必要があります。 - -ClickHouse では、テーブルのすべてのグラニュールの物理的位置がマークファイルに格納されています。データファイルと同様に、カラムごとに 1 つのマークファイルがあります。 - -以下の図は、テーブルの `UserID`、`URL`、および `EventTime` カラムのグラニュールの物理位置を保存している 3 つのマークファイル `UserID.mrk`、`URL.mrk`、および `EventTime.mrk` を示しています。 - -Sparse Primary Indices 05 - -主インデックスが 0 から始まる番号を付けられたインデックスマークを含むフラットな未圧縮配列ファイル (primary.idx) であることを説明してきました。 - -同様に、マークファイルも 0 から始まる番号が付けられたマークを含むフラットな未圧縮配列ファイル (*.mrk) です。 - -ClickHouse がマッチする可能性のある行を含むグラニュールのインデックスマークを特定して選択した後、マークファイルにおいて位置配列のルックアップが実行され、そのグラニュールの物理位置を取得します。 - -特定のカラムの各マークファイルエントリは、オフセットの形式で 2 つの位置を保存しています。 - -- 最初のオフセット(上記の図の「block_offset」)は、選択されたグラニュールの圧縮バージョンを含む ブロック が、圧縮されたカラムデータファイルの中でどこにあるかを指し示しています。この圧縮ブロックは、おそらくいくつかの圧縮されたグラニュールを含んでいます。見つかった圧縮ファイルブロックは、読み込み時に主メモリに展開されます。 - -- マークファイルの 2 番目のオフセット(上記の図の「granule_offset」)は、非圧縮ブロックデータ内のグラニュールの位置を提供します。 - -その後、見つかった非圧縮グラニュールに属するすべての 8192 行が、さらなる処理のために ClickHouse にストリーミングされます。 - - -:::note - -- [ワイドフォーマット](/engines/table-engines/mergetree-family/mergetree.md/#mergetree-data-storage)のテーブルで、[適応インデックス粒度](/whats-new/changelog/2019.md/#experimental-features-1)がない場合、ClickHouse は上記のように視覚化された `.mrk` マークファイルを使用し、各エントリには 8 バイトのアドレスが 2 つ含まれています。これらのエントリは、同じサイズを持つすべてのグラニュールの物理位置です。 - -インデックス粒度は [デフォルトで適応式](/operations/settings/merge-tree-settings#index_granularity_bytes)ですが、例のために、適応インデックス粒度を無効にしました(このガイドでの議論を簡素化し、図や結果を再現しやすくするため)。私たちのテーブルは、データのサイズが [min_bytes_for_wide_part](/operations/settings/merge-tree-settings#min_bytes_for_wide_part) より大きいため、ワイドフォーマットを使用しています(これはセルフマネージドクラスターのデフォルトで 10 MB です)。 - -- ワイドフォーマットのテーブルで、適応インデックス粒度がある場合、ClickHouse は `.mrk2` マークファイルを使用し、`.mrk` マークファイルと似たエントリを持っていますが、各エントリに対して追加の 3 番目の値、すなわち現在のエントリに関連するグラニュールの行数があります。 - -- [コンパクトフォーマット](/engines/table-engines/mergetree-family/mergetree.md/#mergetree-data-storage)のテーブルでは、ClickHouse は `.mrk3` マークファイルを使用します。 - -::: - - -:::note マークファイルの理由 - -なぜ主インデックスは、インデックスマークに対応するグラニュールの物理位置を直接含まないのでしょうか? - -ClickHouse が設計されている非常に大規模なスケールにおいては、非常にディスクおよびメモリ効率が良いことが重要です。 - -主インデックスファイルは主メモリに収まる必要があります。 - -私たちの例のクエリでは、ClickHouse は主インデックスを使用しておそらくマッチする行を含むことができる単一のグラニュールを選択しました。その単一のグラニュールのためにのみ、ClickHouse は対応する行をストリーミングするための物理位置が必要です。 - -さらに、このオフセット情報は、クエリに使用されていないカラム(例えば `EventTime`)には必要ありません。 - -サンプルクエリの場合、ClickHouse は UserID データファイル (UserID.bin) のグラニュール 176 の 2 つの物理位置オフセットと、URL データファイル (URL.bin) のグラニュール 176 の 2 つの物理位置オフセットのみが必要です。 - -マークファイルによって提供される間接性は、すべての 1083 グラニュールの物理位置のエントリを主インデックスの中に直接格納することを避けることで、メインメモリ内に不要な(使用されていない)データを持つことを回避します。 -::: - -以下の図とその後のテキストは、例のクエリのために ClickHouse が UserID.bin データファイル内のグラニュール 176 をどのように特定するかを示しています。 - -Sparse Primary Indices 06 - -このガイドで以前に述べたように、ClickHouse は主インデックスマーク 176 を選択し、したがって私たちのクエリに一致する行を含む可能性のあるグラニュール 176 を選択しました。 - -ClickHouse は今、選択されたマーク番号 (176) を使用して、UserID.mrk マークファイル内で位置配列ルックアップを行って、グラニュール 176 の位置を特定するための 2 つのオフセットを取得します。 - -示されているように、最初のオフセットは、UserID.bin データファイル内でグラニュール 176 の圧縮ファイルブロックを特定しています。 - -見つかったファイルブロックが主メモリに展開されると、マークファイルからの 2 番目のオフセットを使って、非圧縮データ内のグラニュール 176 を特定できます。 - -ClickHouse は UserID.bin データファイルと URL.bin データファイルの両方からグラニュール 176 を特定し(すべての値をストリーミングする)、サンプルクエリ(UserID 749.927.693 のインターネットユーザーの上位 10 件のクリックされた URL)を実行する必要があります。 - -上記の図は、ClickHouse が UserID.bin データファイルのグラニュールを特定する方法を示しています。 - -並行して、ClickHouse は URL.bin データファイルのグラニュール 176 に対しても同様の処理を行います。対応する 2 つのグラニュールは整列して ClickHouse エンジンにストリーミングされ、UserID が 749.927.693 であるすべての行の URL 値をグループごとに集約およびカウントし、最終的に 10 の最大の URL グループを降順で出力します。 -## 複数の主インデックスを使用する {#using-multiple-primary-indexes} - - -### 二次キー列は(非効率的)である可能性がある {#secondary-key-columns-can-not-be-inefficient} - - -クエリが複合キーの一部であり、最初のキー列であるカラムをフィルタリングしている場合、[ClickHouse はキー列のインデックスマークに対して二分探索アルゴリズムを実行します](#the-primary-index-is-used-for-selecting-granules)。 - -しかし、クエリが複合キーの一部であるが最初のキー列ではないカラムをフィルタリングする場合に何が起こるでしょうか? - -:::note -ここでは、クエリが最初のキー列ではなく、二次キー列でフィルタリングしているシナリオについて議論します。 - -クエリが最初のキー列とその後の任意のキー列でフィルタリングしている場合、ClickHouse は最初のキー列のインデックスマークに対して二分探索を実行します。 -::: - -
-
- - -次のクエリを使用して、最も頻繁に「http://public_search」の URL をクリックした上位 10 人のユーザーを計算します: - -```sql -SELECT UserID, count(UserID) AS Count -FROM hits_UserID_URL -WHERE URL = 'http://public_search' -GROUP BY UserID -ORDER BY Count DESC -LIMIT 10; -``` - -返答は次のとおりです: -```response -┌─────UserID─┬─Count─┐ -│ 2459550954 │ 3741 │ -│ 1084649151 │ 2484 │ -│ 723361875 │ 729 │ -│ 3087145896 │ 695 │ -│ 2754931092 │ 672 │ -│ 1509037307 │ 582 │ -│ 3085460200 │ 573 │ -│ 2454360090 │ 556 │ -│ 3884990840 │ 539 │ -│ 765730816 │ 536 │ -└────────────┴───────┘ - -10 行がセットにあります。経過時間: 0.086 秒。 - -# highlight-next-line -処理された行数: 8.81 百万, -799.69 MB (102.11 百万行/s., 9.27 GB/s.) -``` - -クライアント出力は、ClickHouse が複合主キーの一部である [URL カラム](#a-table-with-a-primary-key) に対してほぼフルテーブルスキャンを実行したことを示しています! ClickHouse は 887 万行のテーブルから 881 万行を読み取ります。 - -もし [trace_logging](/operations/server-configuration-parameters/settings#logger) が有効になっている場合、ClickHouse サーバーログファイルは、ClickHouse が 1083 の URL インデックスマークに対して 一般的な除外検索 を使用して、「http://public_search」という URL カラム値を持つ行を含む可能性のあるグラニュールを特定したことを示しています: -```response -...Executor): Key condition: (column 1 in ['http://public_search', - 'http://public_search']) - -# highlight-next-line -...Executor): Used generic exclusion search over index for part all_1_9_2 - with 1537 steps -...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, - -# highlight-next-line - 1076/1083 marks by primary key, 1076 marks to read from 5 ranges -...Executor): Reading approx. 8814592 rows with 10 streams -``` -上記のサンプルトレースログから、1076(マークによる)マークのうちの 1083 が、マッチする URL 値を持つ行を含んでいる可能性があるとして選択されたことがわかります。 - -その結果、ClickHouse エンジンのために 881 万行がストリーミングされ(10 ストリームを使用して並列で)、実際に「http://public_search」という URL 値が含まれている行を特定します。 - -しかし、後で見ますが、その選択した 1076 のグラニュールのうち、実際に一致する行を持つのは 39 のグラニュールだけです。 - -複合主キー(UserID、URL)に基づく主インデックスは、特定の UserID 値を持つ行のフィルタリングを迅速に行うためには非常に便利でしたが、特定の URL 値を持つ行のフィルタリングのクエリを迅速に行う際には大きな助けにはなっていません。 - -その理由は、URL カラムが最初のキー列ではないため、ClickHouse は URL カラムのインデックスマークに対して一般的な除外検索アルゴリズム(代わりに二分検索)を使用しており、**そのアルゴリズムの効果は、URL カラムとその前のキー列である UserID との間の基数の違いに依存します**。 - -これを説明するために、一般的な除外検索がどのように機能するかの詳細をいくつか示します。 - - -### 一般的な除外検索アルゴリズム {#generic-exclusion-search-algorithm} - -以下は、ClickHouse の一般的な除外検索アルゴリズムが、前のキー列が低いまたは高い基数を持つ二次列でグラニュールが選択されるときにどのように機能するかを示しています。 - -どちらのケースについても、次の仮定をします: -- URL 値 = "W3" の行を検索するクエリ。 -- UserID および URL の簡略値を持つ抽象バージョンのヒットテーブル。 -- インデックスの複合主キー(UserID、URL)。これは、行が最初に UserID 値で並べられ、同じ UserID 値を持つ行が URL で並べられていることを意味します。 -- グラニュールサイズは 2 です。すなわち、各グラニュールには 2 行が含まれています。 - -以下の図では、各グラニュールの最初のテーブル行のキー列値をオレンジ色でマークしています。 - -**前のキー列が低い基数を持つ場合** - -UserID に低い基数があると仮定してください。この場合、同じ UserID 値が複数のテーブル行およびグラニュール、したがってインデックスマークに広がっている可能性が高いです。同じ UserID のインデックスマークの URL 値は、昇順にソートされます(テーブル行は最初に UserID によって、次に URL で並べられるため)。これにより、効率的なフィルタリングが可能です。 - -Sparse Primary Indices 06 - -上の図には、抽象的なサンプルデータに基づくグラニュール選択プロセスの 3 つの異なるシナリオが示されています: - -1. **URL 値が W3 より小さく、次のインデックスマークの URL 値も W3 より小さいインデックスマーク 0** は、マーク 0 と 1 が同じ UserID 値を持っていますので除外できます。この除外前提条件により、グラニュール 0 はすべて U1 UserID 値で構成されていることが確認でき、ClickHouse はグラニュール 0 内の最大 URL 値も W3 より小さいと仮定し、グラニュールを除外できます。 - -2. **URL 値が W3 より小さい(または等しい)インデックスマーク 1 と直接後続のインデックスマークの URL 値が W3 より大きい(または等しい)場合は選択されます**。これはグラニュール 1 がおそらく URL W3 を含むことを意味します。 - -3. **URL 値が W3 より大きいインデックスマーク 2 および 3** は除外できます。なぜなら、プライマリインデックスのインデックスマークは、各グラニュールの最初のテーブル行のキー列値を保存しており、テーブル行はキー列値に基づいてディスクにソートされるため、グラニュール 2 および 3 では URL 値 W3 が存在できないためです。 - -**前のキー列が高い基数を持つ場合** - -UserID に高い基数がある場合、同じ UserID 値が複数のテーブル行およびグラニュールに広がる可能性は低くなります。これは、インデックスマークの URL 値が単調に増加しないことを意味します: - -Sparse Primary Indices 06 - -上記の図では、W3 よりも URL 値が小さいすべてのマークがその関連するグラニュールの行を ClickHouse エンジンにストリーミングするための選択を受けていることが示されています。 - -これは、図内のすべてのインデックスマークがシナリオ 1 に該当するが、示された除外前提条件を満たしていないためです。それは、*直接後続のインデックスマークが現在のマークと同じ UserID 値を持つ*ことから、除外できないからです。 - -例えば、**URL 値が W3 より小さいインデックスマーク 0** に注目すると、その直接後続のインデックスマーク 1 も W3 より小さいが、*マーク 1 の UserID 値は 0 と異なるため*除外できません。 - -これが最終的に、ClickHouse がグラニュール 0 の最大 URL 値についての仮定を行うことを妨げます。代わりに、ClickHouse はグラニュール 0 に行が存在する可能性があると仮定し、マーク 0 の選択を余儀なくされます。 - -同様のシナリオがマーク 1、2、および 3 に対しても当てはまります。 - -:::note 結論 -ClickHouse が一般的な除外検索アルゴリズムを使用するのは、前のキー列が低い基数を持つ場合において、特に効果的です。 -::: - -サンプルデータセットでは、両方のキー列(UserID、URL)が高い基数を持ち、説明されたように、一般的な除外検索アルゴリズムは、URL カラムの前のキー列が高い(または等しい)基数を持つ場合にはあまり効果的ではありません。 -### データスキップインデックスについての注意事項 {#note-about-data-skipping-index} - - -UserID と URL の基数が似て高いため、私たちの [URL でのフィルタリングクエリ](/guides/best-practices/sparse-primary-indexes#secondary-key-columns-can-not-be-inefficient) も、複合主キー (UserID、URL) の URL カラムに対する [二次データスキッピングインデックス](./skipping-indexes.md) 作成からあまり利益を得ることはできません。 - -例えば、次の 2 つのステートメントは、テーブルの URL カラムに対する [minmax](/engines/table-engines/mergetree-family/mergetree.md/#primary-keys-and-indexes-in-queries) データスキッピングインデックスを作成し、充填します: -```sql -ALTER TABLE hits_UserID_URL ADD INDEX url_skipping_index URL TYPE minmax GRANULARITY 4; -ALTER TABLE hits_UserID_URL MATERIALIZE INDEX url_skipping_index; -``` -ClickHouse は、4 つの連続する [グラニュール](#data-is-organized-into-granules-for-parallel-data-processing) のグループごとに最小および最大の URL 値を保存する追加のインデックスを作成しました(上記の `ALTER TABLE` ステートメントの `GRANULARITY 4` 句に注目)。 - -Sparse Primary Indices 13a - -最初のインデックスエントリ(上の図の「マーク 0」)は、テーブルの最初の 4 つのグラニュールに属する行の最小および最大の URL 値を保存しています。 - -2 番目のインデックスエントリ(「マーク 1」)は、テーブルの次の 4 つのグラニュールに属する行に対する最小および最大の URL 値を保存し、以下同様です。 - -(ClickHouse は、インデックスマークに関連付けられたグラニュールのグループを [特定](#mark-files-are-used-for-locating-granules)するための [特別なマークファイル](#mark-files-are-used-for-locating-granules) も作成しました。) - - -UserID と URL の基数が似て高いため、この二次データスキッピングインデックスは、私たちの [URL でのフィルタリングクエリ](/guides/best-practices/sparse-primary-indexes#secondary-key-columns-can-not-be-inefficient) が実行された場合にグラニュールの選択から除外するのに役立つことはありません。 - -クエリが探している特定の URL 値(すなわち 'http://public_search')は、インデックスがそれぞれのグラニュールグループに保存している最小値と最大値の間にある可能性が高く、そのため ClickHouse はグラニュールグループを選択せざるを得ません(それらがクエリと一致する行を含んでいる可能性があるため)。 -### 複数の主インデックスを使用する必要性 {#a-need-to-use-multiple-primary-indexes} - - -その結果、特定の URL を持つ行のためにサンプルクエリを大幅に高速化する必要がある場合、クエリに最適化された主インデックスを使用する必要があります。 - -さらに、特定の UserID を持つ行のためにサンプルクエリの良好なパフォーマンスを維持したい場合、複数の主インデックスを使用する必要があります。 - -これは、次のような方法で実現できます。 - - -### 追加の主インデックスを作成するオプション {#options-for-creating-additional-primary-indexes} - - -特定の UserID を持つ行をフィルタリングするサンプルクエリと特定の URL を持つ行をフィルタリングするサンプルクエリの両方を大幅に高速化したい場合、次の 3 つのオプションのいずれかを使用して、複数の主インデックスを使用する必要があります: - -- **異なる主キーを持つ第二のテーブルを作成する**。 -- **既存のテーブルにマテリアライズドビューを作成する**。 -- **既存のテーブルにプロジェクションを追加する**。 - -これら 3 つのオプションは、テーブルの主インデックスおよび行のソート順を再編成するために、サンプルデータを追加のテーブルに効果的に複製します。 - -しかし、3 つのオプションは、クエリのルーティングや挿入ステートメントに関して、ユーザーに対する追加のテーブルの透過性において異なります。 - -**異なる主キーを持つ第二のテーブル**を作成する場合、クエリはクエリに最適なテーブルバージョンに明示的に送信する必要があり、新しいデータは両方のテーブルに明示的に挿入されて、テーブルを同期する必要があります: - -Sparse Primary Indices 09a - -**マテリアライズドビュー**の場合、追加のテーブルは自動的に作成され、データは両方のテーブル間で自動的に同期されます: - -Sparse Primary Indices 09b - -そして、**プロジェクション**は最も透過的なオプションであり、暗黙的に作成された(そして隠された)追加のテーブルをデータの変更に基づいて自動的に同期させるだけでなく、ClickHouse はクエリに最も効果的なテーブルバージョンを自動的に選択します: - -Sparse Primary Indices 09c - -以下では、複数の主インデックスを作成して使用するための 3 つのオプションについて、さらに詳細に、実際の例と共に議論します。 - - -### Option 1: セカンダリテーブル {#option-1-secondary-tables} - - -プライマリキーのキーカラムの順序を元のテーブルと比較して入れ替えた新しい追加テーブルを作成します。 - -```sql -CREATE TABLE hits_URL_UserID -( - `UserID` UInt32, - `URL` String, - `EventTime` DateTime -) -ENGINE = MergeTree --- highlight-next-line -PRIMARY KEY (URL, UserID) -ORDER BY (URL, UserID, EventTime) -SETTINGS index_granularity = 8192, index_granularity_bytes = 0, compress_primary_key = 0; -``` - -元のテーブルからすべての 8.87百万行を追加テーブルに挿入します: - -```sql -INSERT INTO hits_URL_UserID -SELECT * from hits_UserID_URL; -``` - -レスポンスは次のようになります: - -```response -Ok. - -0 rows in set. Elapsed: 2.898 sec. Processed 8.87 million rows, 838.84 MB (3.06 million rows/s., 289.46 MB/s.) -``` - -最後にテーブルを最適化します: -```sql -OPTIMIZE TABLE hits_URL_UserID FINAL; -``` - -プライマリキーのカラムの順序を変更したため、挿入された行はディスクに異なる辞書順で保存され(元のテーブルと比較して)、そのテーブルの 1083 グラニュールも以前とは異なる値を含んでいます: - -Sparse Primary Indices 10 - -これが結果のプライマリキーです: - -Sparse Primary Indices 11 - -これを使用して、URL カラムでフィルタリングされた例のクエリの実行を大幅に高速化できます。これは、最も頻繁に「http://public_search」をクリックしたトップ 10 のユーザーを計算するためのクエリです: -```sql -SELECT UserID, count(UserID) AS Count --- highlight-next-line -FROM hits_URL_UserID -WHERE URL = 'http://public_search' -GROUP BY UserID -ORDER BY Count DESC -LIMIT 10; -``` - -レスポンスは次のようになります: - - -```response -┌─────UserID─┬─Count─┐ -│ 2459550954 │ 3741 │ -│ 1084649151 │ 2484 │ -│ 723361875 │ 729 │ -│ 3087145896 │ 695 │ -│ 2754931092 │ 672 │ -│ 1509037307 │ 582 │ -│ 3085460200 │ 573 │ -│ 2454360090 │ 556 │ -│ 3884990840 │ 539 │ -│ 765730816 │ 536 │ -└────────────┴───────┘ - -10 rows in set. Elapsed: 0.017 sec. - -# highlight-next-line -Processed 319.49 thousand rows, -11.38 MB (18.41 million rows/s., 655.75 MB/s.) -``` - -今や、[ほぼ全テーブルスキャンを行う代わりに](/guides/best-practices/sparse-primary-indexes#efficient-filtering-on-secondary-key-columns)、ClickHouse はそのクエリをはるかに効果的に実行しました。 - -元のテーブルのプライマリインデックスでは、UserID が最初で、URL が 2 番目のキーカラムでしたが、ClickHouse はクエリを実行するためにインデックスマークの上で [一般的な排他検索](/guides/best-practices/sparse-primary-indexes#generic-exclusion-search-algorithm) を使用し、UserID と URL の間の同様に高いカーディナリティにより、あまり効果的ではありませんでした。 - -URL をプライマリインデックスの最初のカラムとして使用することで、ClickHouse は現在、インデックスマークの上で二分探索を実行しています。 -ClickHouse サーバーログファイルの対応するトレースログがそれを確認しました: -```response -...Executor): Key condition: (column 0 in ['http://public_search', - 'http://public_search']) - -# highlight-next-line -...Executor): Running binary search on index range for part all_1_9_2 (1083 marks) -...Executor): Found (LEFT) boundary mark: 644 -...Executor): Found (RIGHT) boundary mark: 683 -...Executor): Found continuous range in 19 steps -...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, - -# highlight-next-line - 39/1083 marks by primary key, 39 marks to read from 1 ranges -...Executor): Reading approx. 319488 rows with 2 streams -``` -ClickHouse は、一般的な排他検索を使用した際の 1076 ではなく、わずか 39 インデックスマークを選択しました。 - -追加テーブルは、URL でフィルタリングされた例のクエリの実行を高速化するために最適化されています。 - -元のテーブルでのクエリの[悪いパフォーマンス](/guides/best-practices/sparse-primary-indexes#secondary-key-columns-can-not-be-inefficient)と同様に、`UserIDs` に対するフィルタリングの例のクエリは新しい追加テーブルであまり効果的には実行されません。なぜなら、UserID がこのテーブルのプライマリインデックスの 2 番目のキーカラムになったからであり、ClickHouse はそのため、グラニュール選択に一般的な排他検索を使用するからです。UserID と URL のカーディナリティが同じように高い場合(/guides/best-practices/sparse-primary-indexes#generic-exclusion-search-algorithm)。 - -詳細を知りたい場合は、詳細ボックスを開いてください。 - -
- - UserIDs に対するフィルタリングのクエリのパフォーマンスは悪い - -

- -```sql -SELECT URL, count(URL) AS Count -FROM hits_URL_UserID -WHERE UserID = 749927693 -GROUP BY URL -ORDER BY Count DESC -LIMIT 10; -``` - -レスポンスは以下のようになります: - -```response -┌─URL────────────────────────────┬─Count─┐ -│ http://auto.ru/chatay-barana.. │ 170 │ -│ http://auto.ru/chatay-id=371...│ 52 │ -│ http://public_search │ 45 │ -│ http://kovrik-medvedevushku-...│ 36 │ -│ http://forumal │ 33 │ -│ http://korablitz.ru/L_1OFFER...│ 14 │ -│ http://auto.ru/chatay-id=371...│ 14 │ -│ http://auto.ru/chatay-john-D...│ 13 │ -│ http://auto.ru/chatay-john-D...│ 10 │ -│ http://wot/html?page/23600_m...│ 9 │ -└────────────────────────────────┴───────┘ - -10 rows in set. Elapsed: 0.024 sec. - -# highlight-next-line -Processed 8.02 million rows, -73.04 MB (340.26 million rows/s., 3.10 GB/s.) -``` - -サーバーログ: -```response -...Executor): Key condition: (column 1 in [749927693, 749927693]) - -# highlight-next-line -...Executor): Used generic exclusion search over index for part all_1_9_2 - with 1453 steps -...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, - -# highlight-next-line - 980/1083 marks by primary key, 980 marks to read from 23 ranges -...Executor): Reading approx. 8028160 rows with 10 streams -``` -

-
- -私たちは現在、2 つのテーブルを所有しています。`UserIDs` に対するフィルタリングのクエリを高速化するために最適化され、URLs に対するクエリを高速化するために最適化されたテーブルです。 - -### Option 2: マテリアライズドビュウ {#option-2-materialized-views} - -既存のテーブルに対してマテリアライズドビューを作成します。 -```sql -CREATE MATERIALIZED VIEW mv_hits_URL_UserID -ENGINE = MergeTree() -PRIMARY KEY (URL, UserID) -ORDER BY (URL, UserID, EventTime) -POPULATE -AS SELECT * FROM hits_UserID_URL; -``` - -レスポンスは次のようになります: - -```response -Ok. - -0 rows in set. Elapsed: 2.935 sec. Processed 8.87 million rows, 838.84 MB (3.02 million rows/s., 285.84 MB/s.) -``` - -:::note -- ビューのプライマリキーのキーカラムの順序を(元のテーブルと比較して)入れ替えます -- マテリアライズドビューは、所定のプライマリキーディフィニションに基づいて、**暗黙的に作成されたテーブル**によってバックアップされています -- 暗黙的に作成されたテーブルは、`SHOW TABLES` クエリによってリスト表示され、名前は `.inner` で始まります -- マテリアライズドビューのバックアップテーブルを最初に明示的に作成し、その後、`TO [db].[table]` [句](/sql-reference/statements/create/view.md)を通じてそのテーブルをターゲットにすることも可能です -- `POPULATE` キーワードを使用して、元のテーブル [hits_UserID_URL](#a-table-with-a-primary-key) から 8.87 百万行すべてで暗黙的に作成されたテーブルを即座に埋めます -- 新しい行がソーステーブル hits_UserID_URL に挿入されると、その行は暗黙的に作成されたテーブルにも自動的に挿入されます -- 実際には、暗黙的に作成されたテーブルは、[セカンダリテーブルとして明示的に作成したテーブル](#option-1-secondary-tables) と同じ行の順序およびプライマリインデックスを持っています: - -Sparse Primary Indices 12b1 - -ClickHouse は、[カラムデータファイル](#data-is-stored-on-disk-ordered-by-primary-key-columns) (*.bin)、[マークファイル](#mark-files-are-used-for-locating-granules) (*.mrk2)、および暗黙的に作成されたテーブルの[プライマリインデックス](#the-primary-index-has-one-entry-per-granule) (primary.idx)を、ClickHouse サーバーディレクトリの特別なフォルダに保存します: - -Sparse Primary Indices 12b2 - -::: - -暗黙的に作成されたテーブル(およびそのプライマリインデックス)は、URL カラムでフィルタリングされた例のクエリの実行を大幅に高速化するために今や使用できます: -```sql -SELECT UserID, count(UserID) AS Count --- highlight-next-line -FROM mv_hits_URL_UserID -WHERE URL = 'http://public_search' -GROUP BY UserID -ORDER BY Count DESC -LIMIT 10; -``` - -レスポンスは次のようになります: - -```response -┌─────UserID─┬─Count─┐ -│ 2459550954 │ 3741 │ -│ 1084649151 │ 2484 │ -│ 723361875 │ 729 │ -│ 3087145896 │ 695 │ -│ 2754931092 │ 672 │ -│ 1509037307 │ 582 │ -│ 3085460200 │ 573 │ -│ 2454360090 │ 556 │ -│ 3884990840 │ 539 │ -│ 765730816 │ 536 │ -└────────────┴───────┘ - -10 rows in set. Elapsed: 0.026 sec. - -# highlight-next-line -Processed 335.87 thousand rows, -13.54 MB (12.91 million rows/s., 520.38 MB/s.) -``` - -実際に、プライマリインデックスのバックアップとして暗黙的に作成されたテーブルは、[セカンダリテーブルとして明示的に作成したテーブル](#option-1-secondary-tables) と同一のものであり、このためクエリは明示的に作成したテーブルと同じ効果的な方法で実行されます。 - -ClickHouse サーバーログファイルの対応するトレースログは、ClickHouse がインデックスマークの上で二分探索を実行していることを確認します: - -```response -...Executor): Key condition: (column 0 in ['http://public_search', - 'http://public_search']) - -# highlight-next-line -...Executor): Running binary search on index range ... -... -...Executor): Selected 4/4 parts by partition key, 4 parts by primary key, - -# highlight-next-line - 41/1083 marks by primary key, 41 marks to read from 4 ranges -...Executor): Reading approx. 335872 rows with 4 streams -``` - -### Option 3: プロジェクション {#option-3-projections} - -既存のテーブルにプロジェクションを作成します: -```sql -ALTER TABLE hits_UserID_URL - ADD PROJECTION prj_url_userid - ( - SELECT * - ORDER BY (URL, UserID) - ); -``` - -そしてプロジェクションをマテリアライズします: -```sql -ALTER TABLE hits_UserID_URL - MATERIALIZE PROJECTION prj_url_userid; -``` - -:::note -- プロジェクションは、所定の `ORDER BY` 句に基づく行の順序とプライマリインデックスを持つ**隠れたテーブル**を作成します -- 隠れたテーブルは、`SHOW TABLES` クエリではリスト表示されません -- `MATERIALIZE` キーワードを使用して、元のテーブル [hits_UserID_URL](#a-table-with-a-primary-key) から 8.87 百万行すべてで隠れたテーブルを即座に埋めます -- 新しい行がソーステーブル hits_UserID_URL に挿入されると、その行は暗黙的に作成されたテーブルにも自動的に挿入されます -- クエリは常に(文法的に)ソーステーブル hits_UserID_URL をターゲットにしていますが、もし隠れたテーブルの行の順序とプライマリインデックスがより効果的なクエリ実行を可能にする場合、その隠れたテーブルが代わりに使用されます -- プロジェクションは、プロジェクションの ORDER BY ステートメントが一致していても、ORDER BY を使用するクエリがより効率的になるわけではありません (see https://github.com/ClickHouse/ClickHouse/issues/47333) -- 実際には、暗黙的に作成された隠れたテーブルは、[セカンダリテーブルとして明示的に作成したテーブル](#option-1-secondary-tables) と同じ行の順序およびプライマリインデックスを持っています: - -Sparse Primary Indices 12c1 - -ClickHouse は、[カラムデータファイル](#data-is-stored-on-disk-ordered-by-primary-key-columns) (*.bin)、[マークファイル](#mark-files-are-used-for-locating-granules) (*.mrk2)、および隠れたテーブルの[プライマリインデックス](#the-primary-index-has-one-entry-per-granule) (primary.idx) を、ソーステーブルのデータファイル、マークファイル、プライマリインデックスファイルの隣にある特別なフォルダ(下のスクリーンショットでオレンジ色でマーク)に保存します: - -Sparse Primary Indices 12c2 - -::: - -プロジェクションによって作成された隠れたテーブル(およびそのプライマリインデックス)は、URL カラムでフィルタリングされた例のクエリの実行を大幅に高速化するために今や使用できます。クエリは文法的にプロジェクションのソーステーブルをターゲットにしています。 -```sql -SELECT UserID, count(UserID) AS Count --- highlight-next-line -FROM hits_UserID_URL -WHERE URL = 'http://public_search' -GROUP BY UserID -ORDER BY Count DESC -LIMIT 10; -``` - -レスポンスは以下のようになります: - -```response -┌─────UserID─┬─Count─┐ -│ 2459550954 │ 3741 │ -│ 1084649151 │ 2484 │ -│ 723361875 │ 729 │ -│ 3087145896 │ 695 │ -│ 2754931092 │ 672 │ -│ 1509037307 │ 582 │ -│ 3085460200 │ 573 │ -│ 2454360090 │ 556 │ -│ 3884990840 │ 539 │ -│ 765730816 │ 536 │ -└────────────┴───────┘ - -10 rows in set. Elapsed: 0.029 sec. - -# highlight-next-line -Processed 319.49 thousand rows, -11.38 MB (11.05 million rows/s., 393.58 MB/s.) -``` - -実際に、プロジェクションによって作成された隠れたテーブル(およびそのプライマリインデックス)は、[セカンダリテーブルとして明示的に作成したテーブル](#option-1-secondary-tables) と同一であり、このためクエリは明示的に作成したテーブルと同じ効果的な方法で実行されます。 - -ClickHouse サーバーログファイルの対応するトレースログは、ClickHouse がインデックスマークの上で二分探索を実行していることを確認します: - -```response -...Executor): Key condition: (column 0 in ['http://public_search', - 'http://public_search']) - -# highlight-next-line -...Executor): Running binary search on index range for part prj_url_userid (1083 marks) -...Executor): ... - -# highlight-next-line -...Executor): Choose complete Normal projection prj_url_userid -...Executor): projection required columns: URL, UserID -...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, - -# highlight-next-line - 39/1083 marks by primary key, 39 marks to read from 1 ranges -...Executor): Reading approx. 319488 rows with 2 streams -``` - -### Summary {#summary} - -UserID と URL の複合プライマリキーを持つテーブルのプライマリインデックスは、[UserID に基づくクエリのフィルタリングを高速化する](#the-primary-index-is-used-for-selecting-granules)のに役立ちました。しかし、そのインデックスは、[URL に基づくクエリのフィルタリングを高速化する](#secondary-key-columns-can-not-be-inefficient)のにはあまり明確な助けは提供しません。URL カラムが複合プライマリキーの一部であってもですが。 - -そして、逆もまた然りです: -URL と UserID の複合プライマリキーを持つテーブルのプライマリインデックスは、[URL に基づくクエリのフィルタリングを高速化する](#secondary-key-columns-can-not-be-inefficient)のには役立ちましたが、[UserID に基づくクエリのフィルタリングに対してはあまり効果を提供しません](#the-primary-index-is-used-for-selecting-granules)。 - -UserID と URL のプライマリキーのカラムの同様に高いカーディナリティのため、2 番目のキーカラムでフィルタリングされるクエリは、[インデックスにある 2 番目のキーカラムからあまり恩恵を受けない](#generic-exclusion-search-algorithm)。 - -したがって、プライマリインデックスから 2 番目のキーカラムを削除し(インデックスのメモリ消費を少なくすることになります)、[複数のプライマリインデックスを使用する](#using-multiple-primary-indexes)方が理にかなっています。 - -ただし、複合プライマリキー内のキーカラムに大きなカーディナリティの違いがある場合、[クエリにとって有益](#generic-exclusion-search-algorithm)な処理を行うために、プライマリキーカラムを昇順にカーディナリティでソートすることの方が良いです。 - -キーカラム間のカーディナリティ差が大きいほど、それらのカラムの順序は重要となります。次のセクションでそのことを証明していきます。 - -## キーカラムを効率的に順序付ける {#ordering-key-columns-efficiently} - - - -複合プライマリキー内のキーカラムの順序は、次の両者に大きな影響を与えます: -- クエリ内のセカンダリキーカラムに対するフィルタリングの効率と、 -- テーブルのデータファイルの圧縮率。 - -これを実証するために、次の 3 つのカラムを持つ、インターネットの「ユーザー」(`UserID` カラム) が URL (`URL` カラム) にアクセスした際にボットトラフィックとしてマークされたかを示すサンプルデータセットを使用します。 -- 特定の URL へのトラフィックのうち、ボットによるものがどれくらい (パーセント) なのか -- 特定のユーザーが (ボットでない) かどうかの信頼度 (そのユーザーからのトラフィックのうち、どのくらいがボットトラフィックでないと見なされるか) - -上記の 3 つのカラムをキーカラムとして使用する複合プライマリキーのカーディナリティを計算するため、このクエリを使用します(注意: TSV データをローカルテーブルを作成することなく、即席でクエリするために [URL テーブル関数](/sql-reference/table-functions/url.md) を使用しています)。以下のクエリを `clickhouse client` で実行します: -```sql -SELECT - formatReadableQuantity(uniq(URL)) AS cardinality_URL, - formatReadableQuantity(uniq(UserID)) AS cardinality_UserID, - formatReadableQuantity(uniq(IsRobot)) AS cardinality_IsRobot -FROM -( - SELECT - c11::UInt64 AS UserID, - c15::String AS URL, - c20::UInt8 AS IsRobot - FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz') - WHERE URL != '' -) -``` -レスポンスは次のようになります: -```response -┌─cardinality_URL─┬─cardinality_UserID─┬─cardinality_IsRobot─┐ -│ 2.39 million │ 119.08 thousand │ 4.00 │ -└─────────────────┴────────────────────┴─────────────────────┘ - -1 row in set. Elapsed: 118.334 sec. Processed 8.87 million rows, 15.88 GB (74.99 thousand rows/s., 134.21 MB/s.) -``` - -私たちは、`URL` と `IsRobot` カラムの間で特にカーディナリティに大きな違いがあることを確認できます。したがって、複合プライマリキーでこれらのカラムの順序は、これらのカラムのフィルタリングの効率を高め、テーブルのカラムデータファイルの最適な圧縮比を達成するために重要です。 - -このことを示すために、私たちはボットトラフィック分析データ用に 2 つのテーブルバージョンを作成します: -- `(URL, UserID, IsRobot)` の複合プライマリキーを持つテーブル `hits_URL_UserID_IsRobot` -- `(IsRobot, UserID, URL)` の複合プライマリキーを持つテーブル `hits_IsRobot_UserID_URL` - -`hits_URL_UserID_IsRobot` テーブルを `(URL, UserID, IsRobot)` の複合プライマリキーで作成します: -```sql -CREATE TABLE hits_URL_UserID_IsRobot -( - `UserID` UInt32, - `URL` String, - `IsRobot` UInt8 -) -ENGINE = MergeTree --- highlight-next-line -PRIMARY KEY (URL, UserID, IsRobot); -``` - -そして、8.87 百万行で埋め込みます: -```sql -INSERT INTO hits_URL_UserID_IsRobot SELECT - intHash32(c11::UInt64) AS UserID, - c15 AS URL, - c20 AS IsRobot -FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz') -WHERE URL != ''; -``` -レスポンスは次のようになります: -```response -0 rows in set. Elapsed: 104.729 sec. Processed 8.87 million rows, 15.88 GB (84.73 thousand rows/s., 151.64 MB/s.) -``` - -次に、`hits_IsRobot_UserID_URL` テーブルを `(IsRobot, UserID, URL)` の複合プライマリキーで作成します: -```sql -CREATE TABLE hits_IsRobot_UserID_URL -( - `UserID` UInt32, - `URL` String, - `IsRobot` UInt8 -) -ENGINE = MergeTree --- highlight-next-line -PRIMARY KEY (IsRobot, UserID, URL); -``` -そして、前のテーブルを埋めるために使用したのと同じ 8.87 百万行で埋め込みます: -```sql -INSERT INTO hits_IsRobot_UserID_URL SELECT - intHash32(c11::UInt64) AS UserID, - c15 AS URL, - c20 AS IsRobot -FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz') -WHERE URL != ''; -``` -レスポンスは次のようになります: -```response -0 rows in set. Elapsed: 95.959 sec. Processed 8.87 million rows, 15.88 GB (92.48 thousand rows/s., 165.50 MB/s.) -``` - -### セカンダリキーカラムの効率的なフィルタリング {#efficient-filtering-on-secondary-key-columns} - -クエリが複合キーの一部であるカラムでフィルタリングし、かつそれが最初のキーカラムである場合、[ClickHouse はインデックスマークの上でバイナリ検索アルゴリズムを実行します](#the-primary-index-is-used-for-selecting-granules)。 - -クエリが複合キーの一部であるカラムでのみフィルタリングしているが、それが最初のキーカラムでない場合、[ClickHouse はインデックスマークの上で一般的な排他検索アルゴリズムを使用します](/guides/best-practices/sparse-primary-indexes#secondary-key-columns-can-not-be-inefficient)。 - -第二のケースでは、複合プライマリキー内でのキーカラムの順序は、[一般的な排他検索アルゴリズム](https://github.com/ClickHouse/ClickHouse/blob/22.3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L1444)の効果に影響を与えます。 - -これは、キーカラム `(URL, UserID, IsRobot)` の順序をカーディナリティに降順にしたテーブルの `UserID` カラムでフィルタリングしているクエリです: -```sql -SELECT count(*) -FROM hits_URL_UserID_IsRobot -WHERE UserID = 112304 -``` -レスポンスは次のようになります: -```response -┌─count()─┐ -│ 73 │ -└─────────┘ - -1 row in set. Elapsed: 0.026 sec. - -# highlight-next-line -Processed 7.92 million rows, -31.67 MB (306.90 million rows/s., 1.23 GB/s.) -``` - -次に、キーカラム `(IsRobot, UserID, URL)` の順序をカーディナリティに昇順にしたテーブルに対して同じクエリを実行します: -```sql -SELECT count(*) -FROM hits_IsRobot_UserID_URL -WHERE UserID = 112304 -``` -レスポンスは次のようになります: -```response -┌─count()─┐ -│ 73 │ -└─────────┘ - -1 row in set. Elapsed: 0.003 sec. - -# highlight-next-line -Processed 20.32 thousand rows, -81.28 KB (6.61 million rows/s., 26.44 MB/s.) -``` - -テーブルでのキーカラムの順序をカーディナリティに降順にした場合と比較して、迅速性が非常に大きく効果的であることがわかります。 - -その理由は、[一般的な排他検索アルゴリズム](https://github.com/ClickHouse/ClickHouse/blob/22.3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L1444)が、前のキーカラムが低いカーディナリティである場合に、セカンダリキーカラムを介してグラニュールが選択されるとうまく機能するからです。このことについては、ガイドの[前のセクション](#generic-exclusion-search-algorithm)で詳しく説明しました。 - -### データファイルの最適圧縮率 {#optimal-compression-ratio-of-data-files} - -次のクエリは、上記で作成した 2 つのテーブルの `UserID` カラムの圧縮率を比較します: - -```sql -SELECT - table AS Table, - name AS Column, - formatReadableSize(data_uncompressed_bytes) AS Uncompressed, - formatReadableSize(data_compressed_bytes) AS Compressed, - round(data_uncompressed_bytes / data_compressed_bytes, 0) AS Ratio -FROM system.columns -WHERE (table = 'hits_URL_UserID_IsRobot' OR table = 'hits_IsRobot_UserID_URL') AND (name = 'UserID') -ORDER BY Ratio ASC -``` -レスポンスは以下のようになります: -```response -┌─Table───────────────────┬─Column─┬─Uncompressed─┬─Compressed─┬─Ratio─┐ -│ hits_URL_UserID_IsRobot │ UserID │ 33.83 MiB │ 11.24 MiB │ 3 │ -│ hits_IsRobot_UserID_URL │ UserID │ 33.83 MiB │ 877.47 KiB │ 39 │ -└─────────────────────────┴────────┴──────────────┴────────────┴───────┘ - -2 rows in set. Elapsed: 0.006 sec. -``` - -`UserID` カラムの圧縮率は、カーディナリティに昇順にソートされたテーブルの方が非常に高いことがわかります。 - -両方のテーブルに正確に同じデータが保存されているにも関わらず(両方のテーブルに同じ 8.87 百万行を挿入しました)、複合プライマリキー内のキーカラムの順序は、テーブルの[カラムデータファイル](#data-is-stored-on-disk-ordered-by-primary-key-columns)内の圧縮データが必要とするディスクスペースの大きさに大きな影響を与えています: -- 複合プライマリキーが `(URL, UserID, IsRobot)` でキーカラムの順序がカーディナリティに降順の場合、`UserID.bin` データファイルのディスクスペースは **11.24 MiB** です -- 複合プライマリキーが `(IsRobot, UserID, URL)` でキーカラムの順序がカーディナリティに昇順の場合、`UserID.bin` データファイルのディスクスペースは **877.47 KiB** です - -ディスク上のテーブルのカラムに対して良好な圧縮率を持つことは、ディスクスペースを節約するだけでなく、当該カラムからのデータをメインメモリ(オペレーティングシステムのファイルキャッシュ)に移動するために必要な入出力が少なくなるため、(特に分析用の)クエリがより高速になります。 - -次のセクションで、テーブルのカラムに対する圧縮率を最適化するためにプライマリキーのカラムを昇順にソートすることがいかに有益であるかを説明します。 - -以下の図は、カーディナリティによって昇順に並べられたプライマリキーの行がディスク上での順序を示しています: - -Sparse Primary Indices 14a - -私たちは、[テーブルの行データがプライマリキーのカラムに沿ってディスクに保存される](#data-is-stored-on-disk-ordered-by-primary-key-columns)ことを確認しました。 - -上記の図では、テーブルの行(そのカラム値がディスク上)はまずその `cl` 値によってオーダーされ、同じ `cl` 値を持つ行はその `ch` 値によってオーダーされます。そして、最初のキーカラム `cl` が低いカーディナリティであるため、同じ `cl` 値を持つ行がある可能性が高く、これにより `ch` 値がローカルでオーダーされる可能性が高いのです。 - -もしデータが似たようなものだと近くに配置されている場合(例えば、ソートによって)、そのデータはよりよく圧縮されます。 -一般的に、圧縮アルゴリズムは、データのランレングスが多いほど(データが多ければ多いほど圧縮にとって良いことです)および局所性(データが似たようなものであるほど圧縮率が良いことです)に利益を得ます。 - -上記の図と対照的に、下記の図は、カーディナリティに降順で順序付けられたプライマリキーのディスク上での行を示しています: - -Sparse Primary Indices 14b - -ここでは、テーブルの行はまずその `ch` 値によってオーダーされ、同じ `ch` 値を持つ行はその `cl` 値によってオーダーされます。 -しかし、最初のキーカラム `ch` が高いカーディナリティであるため、同じ `ch` 値を持つ行が存在する可能性は低く、これにより `cl` 値がローカルでオーダーされる可能性も低くなります。 - -したがって、`cl` 値は最も可能性としてランダムな順序にあり、したがって局所性や圧縮率が悪くなります。 - -### Summary {#summary-1} - -クエリにおけるセカンダリキーカラムの効率的フィルタリングとテーブルのカラムデータファイルの圧縮率の両方に対して、プライマリキー内のカラムの順序をカーディナリティに沿って昇順に並べることが有益です。 - -### 関連コンテンツ {#related-content-1} -- ブログ: [ClickHouse のクエリをスーパーチャージする](https://clickhouse.com/blog/clickhouse-faster-queries-with-projections-and-primary-indexes) - -## 単一行の特定を効率的に行う {#identifying-single-rows-efficiently} - -一般的に、ClickHouse にとっての最良の使用ケースではありませんが、時々 ClickHouse 上に構築されたアプリケーションは、ClickHouse テーブルの単一行を特定する必要があります。 - -その直感的な解決策は、各行にユニークな値を持つ [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) カラムを使用し、そのカラムをプライマリキーとして使用して行を迅速に取得することです。 - -最も迅速に取得するためには、UUID カラムは[最初のキーカラムである必要があります](#the-primary-index-is-used-for-selecting-granules)。 - -私たちは[ClickHouse テーブルの行データがディスクに保存され、プライマリキーのカラムによって並べられている](#data-is-stored-on-disk-ordered-by-primary-key-columns)ため、非常に高いカーディナリティのカラム(UUID カラムのような)をプライマリキーまたは複合プライマリキー内の低いカーディナリティのカラムの前に置くことは、テーブルの他のカラムの圧縮率に悪影響を及ぼします。 - -最も迅速に取得することと、データ圧縮を最適化することとの妥協案は、複合プライマリキーを使用し、UUIDを最後のキーカラム、低(または)カーディナリティのキーカラムの後に配置することです。 -### A concrete example {#a-concrete-example} - -一つの具体例は、Alexey Milovidov が開発し、[ブログに書いた](https://clickhouse.com/blog/building-a-paste-service-with-clickhouse/)プレーンテキストペーストサービス [https://pastila.nl](https://pastila.nl) です。 - -テキストエリアの変更があるたびに、データは自動的に ClickHouse テーブルの行に保存されます(変更ごとに一行)。 - -ペーストされたコンテンツの(特定のバージョンの)識別と取得の方法の一つは、コンテンツのハッシュをそのコンテンツを含むテーブル行の UUID として使用することです。 - -以下の図は -- コンテンツが変更されるときの行の挿入順(例えば、テキストエリアにテキストを入力するキーストロークによる)と -- `PRIMARY KEY (hash)` が使用される場合の挿入された行からのデータのディスク上の順序を示しています: - -Sparse Primary Indices 15a - -`hash` カラムが主キー列として使用されるため -- 特定の行を [非常に速く](#the-primary-index-is-used-for-selecting-granules) 取得できますが、 -- テーブルの行(そのカラムデータ)はディスク上に(ユニークでランダムな)ハッシュ値によって昇順に保存されます。したがって、コンテンツカラムの値もランダム順で保存され、データの局所性がないため、**コンテンツカラムデータファイルの最適でない圧縮比**をもたらします。 - -コンテンツカラムの圧縮比を大幅に改善しつつ、特定の行の迅速な取得を実現するために、pastila.nl は特定の行を識別するために二つのハッシュ(および複合主キー)を使用しています: -- 上述の通り、異なるデータに対して異なるハッシュであるコンテンツのハッシュと、 -- 小さなデータの変更で**変わらない** [局所感度ハッシュ(フィンガープリント)](https://en.wikipedia.org/wiki/Locality-sensitive_hashing) です。 - -以下の図は -- コンテンツが変更されるときの行の挿入順(例えば、テキストエリアにテキストを入力するキーストロークによる)と -- 複合 `PRIMARY KEY (fingerprint, hash)` が使用される場合の挿入された行からのデータのディスク上の順序を示しています: - -Sparse Primary Indices 15b - -今やディスク上の行はまず `fingerprint` によって順序付けられ、同じフィンガープリント値を持つ行においては、その `hash` 値が最終的な順序を決定します。 - -データが小さな変更のみで異なる場合でも同じフィンガープリント値が付与されるため、今や似たデータはコンテンツカラム上で近くに保存されます。これは、圧縮アルゴリズムが一般的にデータの局所性から恩恵を受けるため、コンテンツカラムの圧縮比を非常に良くします(データがより似ているほど、圧縮比は良くなります)。 - -妥協点は、複合 `PRIMARY KEY (fingerprint, hash)` から得られる主インデックスを最適に利用するために特定の行を取得するには二つのフィールド(`fingerprint` と `hash`)が必要であることです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md.hash deleted file mode 100644 index d0b6c4aea11..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/best-practices/sparse-primary-indexes.md.hash +++ /dev/null @@ -1 +0,0 @@ -2cba29367644f609 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md deleted file mode 100644 index b23551c9ec8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: 'ClickHouseでのテーブル作成' -title: 'ClickHouseでのテーブル作成' -slug: '/guides/creating-tables' -description: 'ClickHouse でのテーブル作成について学びます' ---- - - - - -# ClickHouseでのテーブル作成 - -ほとんどのデータベースと同様に、ClickHouseはテーブルを**データベース**に論理的にグループ化します。新しいデータベースをClickHouseに作成するには、`CREATE DATABASE` コマンドを使用します: - -```sql -CREATE DATABASE IF NOT EXISTS helloworld -``` - -同様に、`CREATE TABLE` を使用して新しいテーブルを定義します。データベース名を指定しない場合、テーブルは `default` データベースに作成されます。 - -次の `my_first_table` という名前のテーブルは、`helloworld` データベースに作成されます: - -```sql -CREATE TABLE helloworld.my_first_table -( - user_id UInt32, - message String, - timestamp DateTime, - metric Float32 -) -ENGINE = MergeTree() -PRIMARY KEY (user_id, timestamp) -``` - -上記の例では、`my_first_table` は4つのカラムを持つ `MergeTree` テーブルです: - -- `user_id`: 32ビットの符号なし整数 -- `message`: `String` データ型で、他のデータベースシステムにおける `VARCHAR`、`BLOB`、`CLOB` などの型を置き換えます -- `timestamp`: 時間の瞬間を表す `DateTime` 値 -- `metric`: 32ビットの浮動小数点数 - -:::note -テーブルエンジンは次のことを決定します: -- データがどのように、どこに保存されるか -- サポートされるクエリの種類 -- データがレプリケートされるかどうか - -選択肢が豊富なエンジンがありますが、単一ノードのClickHouseサーバーでのシンプルなテーブルには、[MergeTree](/engines/table-engines/mergetree-family/mergetree.md)が適しているでしょう。 -::: - -## 主キーの簡単な紹介 {#a-brief-intro-to-primary-keys} - -さらに進む前に、ClickHouseにおける主キーの仕組みを理解することが重要です(主キーの実装は予想外かもしれません!): - -- ClickHouseの主キーは、テーブル内の各行で**_一意ではありません_**。 - -ClickHouseテーブルの主キーは、ディスクに書き込む際にデータがどのようにソートされるかを決定します。8,192行または10MBのデータ(**インデックスの粒度**と呼ばれます)ごとに、主キーインデックスファイルにエントリが作成されます。この粒度の概念は、メモリに簡単に収まる**スパースインデックス**を生成し、グラニュールは `SELECT` クエリ中に処理される最小のカラムデータのストライプを表します。 - -主キーは `PRIMARY KEY` パラメータを使用して定義できます。 `PRIMARY KEY` を指定せずにテーブルを定義した場合、キーは `ORDER BY` 句で指定されたタプルになります。 `PRIMARY KEY` と `ORDER BY` の両方を指定すると、主キーはソート順のプレフィックスでなければなりません。 - -主キーはまたソーティングキーでもあり、`(user_id, timestamp)` のタプルです。したがって、各カラムファイルに保存されるデータは `user_id` でソートされ、その後 `timestamp` でソートされます。 - -:::tip -詳細については、ClickHouse Academyの[データモデリングトレーニングモジュール](https://learn.clickhouse.com/visitor_catalog_class/show/1328860/?utm_source=clickhouse&utm_medium=docs)を参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md.hash deleted file mode 100644 index fafce972029..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/creating-tables.md.hash +++ /dev/null @@ -1 +0,0 @@ -e3e8153db2c85077 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/_category_.yml deleted file mode 100644 index 1936f3258a8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 2 -label: 'Developer Guides' -collapsible: true -collapsed: true -link: - type: generated-index - title: Developer Guides - slug: /guides/developer diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md deleted file mode 100644 index bfa0b6f5ecc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -slug: '/guides/developer/alternative-query-languages' -sidebar_label: '代替クエリ言語' -title: '代替クエリ言語' -description: 'ClickHouseで代替クエリ言語を使用する' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - -ClickHouseは、標準SQL以外にもさまざまな代替クエリ言語をデータのクエリにサポートしています。 - -現在サポートされているダイアレクトは以下の通りです: -- `clickhouse`: ClickHouseのデフォルトの[SQLダイアレクト](../../chdb/reference/sql-reference.md) -- `prql`: [Pipelined Relational Query Language (PRQL)](https://prql-lang.org/) -- `kusto`: [Kusto Query Language (KQL)](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query) - -使用するクエリ言語は、`dialect`を設定することで制御されます。 - -## Standard SQL {#standard-sql} - -Standard SQLはClickHouseのデフォルトのクエリ言語です。 - -```sql -SET dialect = 'clickhouse' -``` - -## Pipelined Relational Query Language (PRQL) {#pipelined-relational-query-language-prql} - - - -PRQLを有効にするには: - -```sql -SET allow_experimental_prql_dialect = 1; -- このSET文はClickHouseのバージョンが>= v25.1の場合のみ必要です -SET dialect = 'prql' -``` - -PRQLのクエリの例: - -```prql -from trips -aggregate { - ct = count this - total_days = sum days -} -``` - -内部的に、ClickHouseはPRQLをSQLにトランスパイルしてPRQLクエリを実行します。 - -## Kusto Query Language (KQL) {#kusto-query-language-kql} - - - -KQLを有効にするには: - -```sql -SET allow_experimental_kusto_dialect = 1; -- このSET文はClickHouseのバージョンが>= 25.1の場合のみ必要です -SET dialect = 'kusto' -``` - -```kql title="Query" -numbers(10) | project number -``` - -```response title="Response" -┌─number─┐ -│ 0 │ -│ 1 │ -│ 2 │ -│ 3 │ -│ 4 │ -│ 5 │ -│ 6 │ -│ 7 │ -│ 8 │ -│ 9 │ -└────────┘ -``` - -KQLクエリは、ClickHouseで定義されたすべての関数にアクセスできない場合があることに注意してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md.hash deleted file mode 100644 index 0389a064406..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/alternative-query-languages.md.hash +++ /dev/null @@ -1 +0,0 @@ -f625c965ebdaf7af diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md deleted file mode 100644 index af91291e35f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -slug: '/guides/developer/cascading-materialized-views' -title: 'Cascading Materialized Views' -description: 'ソーステーブルから複数のマテリアライズドビューを使用する方法。' -keywords: -- 'materialized view' -- 'aggregation' ---- - - - - -# カスケーディングマテリアライズドビュー - -この例では、マテリアライズドビューを作成し、次に、最初のマテリアライズドビューにカスケードする2番目のマテリアライズドビューを作成する方法を示します。このページでは、その方法、さまざまな可能性、および制限について説明します。さまざまなユースケースは、2番目のマテリアライズドビューをソースとして使用して、マテリアライズドビューを作成することで対応できます。 - - - -
- -例: - -ドメイン名のグループに対する1時間ごとのビュー数を持つ架空のデータセットを使用します。 - -私たちの目標 - -1. 各ドメイン名ごとに月ごとに集約されたデータが必要です。 -2. 各ドメイン名ごとに年ごとに集約されたデータが必要です。 - -これらのオプションのいずれかを選ぶことができます: - -- SELECTリクエスト中にデータを読み取って集約するクエリを書く -- データを新しい形式で取り込む時点で準備する -- 特定の集約に対してデータを取り込む時点で準備する。 - -マテリアライズドビューを使用してデータを準備することで、ClickHouseが実行する必要のあるデータと計算の量を制限でき、SELECTリクエストが高速化されます。 - -## マテリアライズドビューのソーステーブル {#source-table-for-the-materialized-views} - -データを集約したものを報告することが目標であるため、個々の行ではなくソーステーブルを作成します。これにより、情報をマテリアライズドビューに渡し、実際の入力データを破棄することができます。これにより目標が達成され、ストレージの節約にもなりますので、`Null`テーブルエンジンを使用します。 - -```sql -CREATE DATABASE IF NOT EXISTS analytics; -``` - -```sql -CREATE TABLE analytics.hourly_data -( - `domain_name` String, - `event_time` DateTime, - `count_views` UInt64 -) -ENGINE = Null -``` - -:::note -Nullテーブルにマテリアライズドビューを作成できます。したがって、テーブルに書き込まれたデータはビューに影響しますが、元の生データは依然として破棄されます。 -::: - -## 月単位の集約テーブルとマテリアライズドビュー {#monthly-aggregated-table-and-materialized-view} - -最初のマテリアライズドビューのために、`Target`テーブルを作成する必要があります。この例では、`analytics.monthly_aggregated_data`とし、月単位およびドメイン名ごとにビューの合計を保存します。 - -```sql -CREATE TABLE analytics.monthly_aggregated_data -( - `domain_name` String, - `month` Date, - `sumCountViews` AggregateFunction(sum, UInt64) -) -ENGINE = AggregatingMergeTree -ORDER BY (domain_name, month) -``` - -ターゲットテーブルにデータを転送するマテリアライズドビューは次のようになります: - -```sql -CREATE MATERIALIZED VIEW analytics.monthly_aggregated_data_mv -TO analytics.monthly_aggregated_data -AS -SELECT - toDate(toStartOfMonth(event_time)) AS month, - domain_name, - sumState(count_views) AS sumCountViews -FROM analytics.hourly_data -GROUP BY - domain_name, - month -``` - -## 年単位の集約テーブルとマテリアライズドビュー {#yearly-aggregated-table-and-materialized-view} - -次に、前のターゲットテーブル`monthly_aggregated_data`にリンクされた2番目のマテリアライズドビューを作成します。 - -まず、各ドメイン名ごとに年単位で集約されたビューの合計を保存する新しいターゲットテーブルを作成します。 - -```sql -CREATE TABLE analytics.year_aggregated_data -( - `domain_name` String, - `year` UInt16, - `sumCountViews` UInt64 -) -ENGINE = SummingMergeTree() -ORDER BY (domain_name, year) -``` - -このステップでカスケードが定義されます。`FROM`ステートメントは`monthly_aggregated_data`テーブルを使用します。これはデータのフローが次のようになることを意味します: - -1. データが`hourly_data`テーブルに送られます。 -2. ClickHouseは受信したデータを最初のマテリアライズドビュー`monthly_aggregated_data`テーブルに転送します。 -3. 最後に、ステップ2で受信したデータが`year_aggregated_data`に転送されます。 - -```sql -CREATE MATERIALIZED VIEW analytics.year_aggregated_data_mv -TO analytics.year_aggregated_data -AS -SELECT - toYear(toStartOfYear(month)) AS year, - domain_name, - sumMerge(sumCountViews) as sumCountViews -FROM analytics.monthly_aggregated_data -GROUP BY - domain_name, - year -``` - -:::note -マテリアライズドビューを操作する際の一般的な誤解は、データがテーブルから読み取られるというものです。`マテリアライズドビュー`は、挿入されたブロックのデータを転送するものであり、テーブル内の最終結果ではありません。 - -この例で`monthly_aggregated_data`に使用されるエンジンがCollapsingMergeTreeであると仮定すると、私たちの2番目のマテリアライズドビュー`year_aggregated_data_mv`に転送されるデータは、圧縮されたテーブルの最終結果ではなく、むしろ`SELECT ... GROUP BY`で定義されたフィールドを持つデータのブロックが転送されます。 - -CollapsingMergeTree、ReplacingMergeTree、またはSummingMergeTreeを使用している場合で、カスケードマテリアライズドビューを作成する予定がある場合は、ここで説明されている制限を理解する必要があります。 -::: - -## サンプルデータ {#sample-data} - -今、データを挿入してカスケードマテリアライズドビューをテストする時が来ました: - -```sql -INSERT INTO analytics.hourly_data (domain_name, event_time, count_views) -VALUES ('clickhouse.com', '2019-01-01 10:00:00', 1), - ('clickhouse.com', '2019-02-02 00:00:00', 2), - ('clickhouse.com', '2019-02-01 00:00:00', 3), - ('clickhouse.com', '2020-01-01 00:00:00', 6); -``` - -`analytics.hourly_data`の内容をSELECTすると、テーブルエンジンが`Null`であるため、次のように表示されますが、データは処理されました。 - -```sql -SELECT * FROM analytics.hourly_data -``` - -```response -Ok. - -0 rows in set. Elapsed: 0.002 sec. -``` - -小さなデータセットを使用しているため、結果を追跡し、期待されるものと比較できます。フローが小さなデータセットで正常であれば、大規模なデータに移動できます。 - -## 結果 {#results} - -ターゲットテーブルで`sumCountViews`フィールドを選択してクエリを実行すると、バイナリ表現が表示されます(いくつかの端末では)。値が数としてではなく、AggregateFunction型として保存されているためです。集約の最終結果を取得するには、`-Merge`サフィックスを使用する必要があります。 - -このクエリでAggregateFunctionに保存されている特殊文字を確認できます: - -```sql -SELECT sumCountViews FROM analytics.monthly_aggregated_data -``` - -```response -┌─sumCountViews─┐ -│ │ -│ │ -│ │ -└───────────────┘ - -3 rows in set. Elapsed: 0.003 sec. -``` - -代わりに、`Merge`サフィックスを使用して`sumCountViews`の値を取得してみます: - -```sql -SELECT - sumMerge(sumCountViews) as sumCountViews -FROM analytics.monthly_aggregated_data; -``` - -```response -┌─sumCountViews─┐ -│ 12 │ -└───────────────┘ - -1 row in set. Elapsed: 0.003 sec. -``` - -`AggregatingMergeTree`では`AggregateFunction`を`sum`として定義しましたので、`sumMerge`を使用できます。`AggregateFunction`の`avg`を使用するときは、`avgMerge`を使用します。 - -```sql -SELECT - month, - domain_name, - sumMerge(sumCountViews) as sumCountViews -FROM analytics.monthly_aggregated_data -GROUP BY - domain_name, - month -``` - -これで、マテリアライズドビューが定義した目標にきちんと応じていることが確認できます。 - -ターゲットテーブル`monthly_aggregated_data`にデータが保存されたので、各ドメイン名ごとに月単位で集約されたデータを取得できます: - -```sql -SELECT - month, - domain_name, - sumMerge(sumCountViews) as sumCountViews -FROM analytics.monthly_aggregated_data -GROUP BY - domain_name, - month -``` - -```response -┌──────month─┬─domain_name────┬─sumCountViews─┐ -│ 2020-01-01 │ clickhouse.com │ 6 │ -│ 2019-01-01 │ clickhouse.com │ 1 │ -│ 2019-02-01 │ clickhouse.com │ 5 │ -└────────────┴────────────────┴───────────────┘ - -3 rows in set. Elapsed: 0.004 sec. -``` - -年単位で各ドメイン名ごとに集約されたデータ: - -```sql -SELECT - year, - domain_name, - sum(sumCountViews) -FROM analytics.year_aggregated_data -GROUP BY - domain_name, - year -``` - -```response -┌─year─┬─domain_name────┬─sum(sumCountViews)─┐ -│ 2019 │ clickhouse.com │ 6 │ -│ 2020 │ clickhouse.com │ 6 │ -└──────┴────────────────┴────────────────────┘ - -2 rows in set. Elapsed: 0.004 sec. -``` - - -## 複数のソーステーブルを単一のターゲットテーブルに結合する {#combining-multiple-source-tables-to-single-target-table} - -マテリアライズドビューは、複数のソーステーブルを同じ宛先テーブルに結合するためにも使用できます。これは、`UNION ALL`ロジックに似たマテリアライズドビューを作成するのに役立ちます。 - -まず、異なるメトリックセットを表す2つのソーステーブルを作成します: - -```sql -CREATE TABLE analytics.impressions -( - `event_time` DateTime, - `domain_name` String -) ENGINE = MergeTree ORDER BY (domain_name, event_time) -; - -CREATE TABLE analytics.clicks -( - `event_time` DateTime, - `domain_name` String -) ENGINE = MergeTree ORDER BY (domain_name, event_time) -; -``` - -次に、メトリックの結合セットを持つ`Target`テーブルを作成します: - -```sql -CREATE TABLE analytics.daily_overview -( - `on_date` Date, - `domain_name` String, - `impressions` SimpleAggregateFunction(sum, UInt64), - `clicks` SimpleAggregateFunction(sum, UInt64) -) ENGINE = AggregatingMergeTree ORDER BY (on_date, domain_name) -``` - -同じ`Target`テーブルを指す2つのマテリアライズドビューを作成します。欠落している列を明示的に含める必要はありません: - -```sql -CREATE MATERIALIZED VIEW analytics.daily_impressions_mv -TO analytics.daily_overview -AS -SELECT - toDate(event_time) AS on_date, - domain_name, - count() AS impressions, - 0 clicks ---<<<--- これを省略すると同じ0になります -FROM - analytics.impressions -GROUP BY - toDate(event_time) AS on_date, - domain_name -; - -CREATE MATERIALIZED VIEW analytics.daily_clicks_mv -TO analytics.daily_overview -AS -SELECT - toDate(event_time) AS on_date, - domain_name, - count() AS clicks, - 0 impressions ---<<<--- これを省略すると同じ0になります -FROM - analytics.clicks -GROUP BY - toDate(event_time) AS on_date, - domain_name -; -``` - -値を挿入すると、それらの値は`Target`テーブルのそれぞれの列に集約されます: - -```sql -INSERT INTO analytics.impressions (domain_name, event_time) -VALUES ('clickhouse.com', '2019-01-01 00:00:00'), - ('clickhouse.com', '2019-01-01 12:00:00'), - ('clickhouse.com', '2019-02-01 00:00:00'), - ('clickhouse.com', '2019-03-01 00:00:00') -; - -INSERT INTO analytics.clicks (domain_name, event_time) -VALUES ('clickhouse.com', '2019-01-01 00:00:00'), - ('clickhouse.com', '2019-01-01 12:00:00'), - ('clickhouse.com', '2019-03-01 00:00:00') -; -``` - -`Target`テーブルには、印象とクリックが結合されています: - -```sql -SELECT - on_date, - domain_name, - sum(impressions) AS impressions, - sum(clicks) AS clicks -FROM - analytics.daily_overview -GROUP BY - on_date, - domain_name -; -``` - -このクエリは次のような結果を出力するはずです: - -```response -┌────on_date─┬─domain_name────┬─impressions─┬─clicks─┐ -│ 2019-01-01 │ clickhouse.com │ 2 │ 2 │ -│ 2019-03-01 │ clickhouse.com │ 1 │ 1 │ -│ 2019-02-01 │ clickhouse.com │ 1 │ 0 │ -└────────────┴────────────────┴─────────────┴────────┘ - -3 rows in set. Elapsed: 0.018 sec. -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md.hash deleted file mode 100644 index cc1fe633320..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/cascading-materialized-views.md.hash +++ /dev/null @@ -1 +0,0 @@ -5b0d0dd541852dc1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md deleted file mode 100644 index d1477cc645f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -slug: '/guides/developer/debugging-memory-issues' -sidebar_label: 'メモリのデバッグ' -sidebar_position: 1 -description: 'メモリの問題をデバッグするためのクエリ。' -keywords: -- 'memory issues' -title: 'メモリのデバッグ' ---- - - - - -# メモリ問題のデバッグ {#debugging-memory-issues} - -メモリの問題やメモリリークに遭遇した際に、どのクエリやリソースが大量のメモリを消費しているかを知ることは役立ちます。以下には、最適化できるクエリ、データベース、テーブルを見つけるためにメモリ問題をデバッグするのに役立つクエリがあります。 - -## ピークメモリ使用量による現在実行中のプロセスのリスト {#list-currently-running-processes-by-peak-memory} - -```sql -SELECT - initial_query_id, - query, - elapsed, - formatReadableSize(memory_usage), - formatReadableSize(peak_memory_usage), -FROM system.processes -ORDER BY peak_memory_usage DESC -LIMIT 100; -``` - -## メモリ使用量のメトリクスのリスト {#list-metrics-for-memory-usage} - -```sql -SELECT - metric, description, formatReadableSize(value) size -FROM - system.asynchronous_metrics -WHERE - metric like '%Cach%' - or metric like '%Mem%' -order by - value desc; -``` - -## 現在のメモリ使用量によるテーブルのリスト {#list-tables-by-current-memory-usage} - -```sql -SELECT - database, - name, - formatReadableSize(total_bytes) -FROM system.tables -WHERE engine IN ('Memory','Set','Join'); -``` - -## マージによって使用される総メモリの出力 {#output-total-memory-used-by-merges} - -```sql -SELECT formatReadableSize(sum(memory_usage)) FROM system.merges; -``` - -## 現在実行中のプロセスによって使用される総メモリの出力 {#output-total-memory-used-by-currently-running-processes} - -```sql -SELECT formatReadableSize(sum(memory_usage)) FROM system.processes; -``` - -## 辞書によって使用される総メモリの出力 {#output-total-memory-used-by-dictionaries} - -```sql -SELECT formatReadableSize(sum(bytes_allocated)) FROM system.dictionaries; -``` - -## 主キーによって使用される総メモリの出力 {#output-total-memory-used-by-primary-keys} - -```sql -SELECT - sumIf(data_uncompressed_bytes, part_type = 'InMemory') as memory_parts, - formatReadableSize(sum(primary_key_bytes_in_memory)) AS primary_key_bytes_in_memory, - formatReadableSize(sum(primary_key_bytes_in_memory_allocated)) AS primary_key_bytes_in_memory_allocated -FROM system.parts; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md.hash deleted file mode 100644 index 9f6346e176c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/debugging-memory-issues.md.hash +++ /dev/null @@ -1 +0,0 @@ -6922a935d709913b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md deleted file mode 100644 index d5157f281e5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md +++ /dev/null @@ -1,562 +0,0 @@ ---- -slug: '/guides/developer/deduplicating-inserts-on-retries' -title: 'リトライ時の挿入重複の排除' -description: '挿入操作をリトライする際に重複データを防ぐ方法' -keywords: -- 'deduplication' -- 'deduplicate' -- 'insert retries' -- 'inserts' ---- - - - -Insert operations can sometimes fail due to errors such as timeouts. When inserts fail, data may or may not have been successfully inserted. This guide covers how to enable deduplication on insert retries such that the same data does not get inserted more than once. - -データを ClickHouse に挿入する際には、タイムアウトなどのエラーにより挿入操作が失敗することがあります。挿入が失敗した場合、データが正常に挿入されているかは不明です。このガイドでは、同じデータが二重に挿入されないように、挿入のリトライ時にデデュプリケーションを有効にする方法について説明します。 - -When an insert is retried, ClickHouse tries to determine whether the data has already been successfully inserted. If the inserted data is marked as a duplicate, ClickHouse does not insert it into the destination table. However, the user will still receive a successful operation status as if the data had been inserted normally. - -挿入がリトライされると、ClickHouse はデータがすでに正常に挿入されているかどうかを判断しようとします。挿入されたデータが重複としてマークされている場合、ClickHouse はそれを宛先テーブルに挿入しません。しかし、ユーザーにはデータが正常に挿入されたかのように、操作成功のステータスが表示されます。 - -## Enabling insert deduplication on retries {#enabling-insert-deduplication-on-retries} - -## リトライ時の挿入デデュプリケーションの有効化 {#enabling-insert-deduplication-on-retries} - -### Insert deduplication for tables {#insert-deduplication-for-tables} - -### テーブルのための挿入デデュプリケーション {#insert-deduplication-for-tables} - -**Only `*MergeTree` engines support deduplication on insertion.** - -**挿入時のデデュプリケーションをサポートするのは `*MergeTree` エンジンのみです。** - -For `*ReplicatedMergeTree` engines, insert deduplication is enabled by default and is controlled by the [`replicated_deduplication_window`](/operations/settings/merge-tree-settings#replicated_deduplication_window) and [`replicated_deduplication_window_seconds`](/operations/settings/merge-tree-settings#replicated_deduplication_window_seconds) settings. For non-replicated `*MergeTree` engines, deduplication is controlled by the [`non_replicated_deduplication_window`](/operations/settings/merge-tree-settings#non_replicated_deduplication_window) setting. - -`*ReplicatedMergeTree` エンジンの場合、挿入デデュプリケーションはデフォルトで有効になっており、[`replicated_deduplication_window`](/operations/settings/merge-tree-settings#replicated_deduplication_window) および [`replicated_deduplication_window_seconds`](/operations/settings/merge-tree-settings#replicated_deduplication_window_seconds) 設定によって制御されます。非レプリケーションの `*MergeTree` エンジンの場合、デデュプリケーションは [`non_replicated_deduplication_window`](/operations/settings/merge-tree-settings#non_replicated_deduplication_window) 設定によって制御されます。 - -The settings above determine the parameters of the deduplication log for a table. The deduplication log stores a finite number of `block_id`s, which determine how deduplication works (see below). - -上記の設定は、テーブルのデデュプリケーションログのパラメータを決定します。デデュプリケーションログは有限の数の `block_id` を保存し、デデュプリケーションがどのように機能するかを決定します(以下参照)。 - -### Query-level insert deduplication {#query-level-insert-deduplication} - -### クエリレベルの挿入デデュプリケーション {#query-level-insert-deduplication} - -The setting `insert_deduplicate=1` enables deduplication at the query level. Note that if you insert data with `insert_deduplicate=0`, that data cannot be deduplicated even if you retry an insert with `insert_deduplicate=1`. This is because the `block_id`s are not written for blocks during inserts with `insert_deduplicate=0`. - -設定 `insert_deduplicate=1` は、クエリレベルでのデデュプリケーションを有効にします。`insert_deduplicate=0` でデータを挿入した場合、そのデータは `insert_deduplicate=1` で挿入をリトライしてもデデュプリケートできません。これは、`insert_deduplicate=0` による挿入時には、ブロックに対して `block_id` が書き込まれないためです。 - -## How insert deduplication works {#how-insert-deduplication-works} - -## 挿入デデュプリケーションの動作方法 {#how-insert-deduplication-works} - -When data is inserted into ClickHouse, it splits data into blocks based on the number of rows and bytes. - -データが ClickHouse に挿入されると、行数とバイト数に基づいてデータをブロックに分割します。 - -For tables using `*MergeTree` engines, each block is assigned a unique `block_id`, which is a hash of the data in that block. This `block_id` is used as a unique key for the insert operation. If the same `block_id` is found in the deduplication log, the block is considered a duplicate and is not inserted into the table. - -`*MergeTree` エンジンを使用しているテーブルの場合、各ブロックにはユニークな `block_id` が割り当てられ、これはそのブロック内のデータのハッシュです。この `block_id` は挿入操作のユニークキーとして使用されます。同じ `block_id` がデデュプリケーションログに見つかった場合、そのブロックは重複と見なされ、テーブルには挿入されません。 - -This approach works well for cases where inserts contain different data. However, if the same data is inserted multiple times intentionally, you need to use the `insert_deduplication_token` setting to control the deduplication process. This setting allows you to specify a unique token for each insert, which ClickHouse uses to determine whether the data is a duplicate. - -このアプローチは、挿入するデータが異なる場合にうまく機能します。しかし、同じデータが意図的に複数回挿入される場合には、`insert_deduplication_token` 設定を使用してデデュプリケーションプロセスを制御する必要があります。この設定を使用すると、各挿入に対してユニークなトークンを指定でき、ClickHouse はそれを使用してデータが重複しているかどうかを判断します。 - -For `INSERT ... VALUES` queries, splitting the inserted data into blocks is deterministic and is determined by settings. Therefore, users should retry insertions with the same settings values as the initial operation. - -`INSERT ... VALUES` クエリの場合、挿入されたデータをブロックに分割することは決定論的であり、設定によって決まります。したがって、ユーザーは初期操作と同じ設定値で挿入をリトライするべきです。 - -For `INSERT ... SELECT` queries, it is important that the `SELECT` part of the query returns the same data in the same order for each operation. Note that this is hard to achieve in practical usage. To ensure stable data order on retries, define a precise `ORDER BY` section in the `SELECT` part of the query. Keep in mind that it is possible that the selected table could be updated between retries: the result data could have changed and deduplication will not occur. Additionally, in situations where you are inserting large amounts of data, it is possible that the number of blocks after inserts can overflow the deduplication log window, and ClickHouse won't know to deduplicate the blocks. - -`INSERT ... SELECT` クエリの場合、クエリの `SELECT` 部分が各操作で同じデータを同じ順序で返すことが重要です。これは実際の使用で達成するのが難しい点に注意してください。リトライ時にデータ順序が安定することを保証するために、クエリの `SELECT` 部分に正確な `ORDER BY` セクションを定義してください。リトライの間に選択されたテーブルが更新される可能性があることにも留意してください: 結果データが変更されてデデュプリケーションが行われない可能性があります。また、大量のデータを挿入している場合、挿入後のブロック数がデデュプリケーションログウィンドウをオーバーフローする可能性があり、ClickHouse はブロックをデデュプリケートする方法を知りません。 - -## Insert deduplication with materialized views {#insert-deduplication-with-materialized-views} - -## マテリアライズドビューを使用した挿入デデュプリケーション {#insert-deduplication-with-materialized-views} - -When a table has one or more materialized views, the inserted data is also inserted into the destination of those views with the defined transformations. The transformed data is also deduplicated on retries. ClickHouse performs deduplications for materialized views in the same way it deduplicates data inserted into the target table. - -テーブルに1つ以上のマテリアライズドビューがある場合、挿入されたデータは定義された変換とともにそれらのビューの宛先にも挿入されます。変換されたデータもリトライ時にデデュプリケートされます。ClickHouse は、マテリアライズドビューに対してデデュプリケーションを実行する際、ターゲットテーブルに挿入されたデータのデデュプリケーションと同じように処理します。 - -You can control this process using the following settings for the source table: - -このプロセスは、ソーステーブルに対して以下の設定を使用して制御できます。 - -- [`replicated_deduplication_window`](/operations/settings/merge-tree-settings#replicated_deduplication_window) -- [`replicated_deduplication_window_seconds`](/operations/settings/merge-tree-settings#replicated_deduplication_window_seconds) -- [`non_replicated_deduplication_window`](/operations/settings/merge-tree-settings#non_replicated_deduplication_window) - -You can also use the user profile setting [`deduplicate_blocks_in_dependent_materialized_views`](/operations/settings/settings#deduplicate_blocks_in_dependent_materialized_views). - -ユーザープロファイル設定 [`deduplicate_blocks_in_dependent_materialized_views`](/operations/settings/settings#deduplicate_blocks_in_dependent_materialized_views) を使用することもできます。 - -When inserting blocks into tables under materialized views, ClickHouse calculates the `block_id` by hashing a string that combines the `block_id`s from the source table and additional identifiers. This ensures accurate deduplication within materialized views, allowing data to be distinguished based on its original insertion, regardless of any transformations applied before reaching the destination table under the materialized view. - -マテリアライズドビューの下にあるテーブルにブロックを挿入する際、ClickHouse はソーステーブルからの `block_id` および追加の識別子を組み合わせた文字列をハッシュ化して `block_id` を計算します。これにより、マテリアライズドビュー内での正確なデデュプリケーションが保証され、どのような変換が行われる前に元の挿入に基づいてデータを区別できるようになります。 - -## Examples {#examples} - -## 例 {#examples} - -### Identical blocks after materialized view transformations {#identical-blocks-after-materialized-view-transformations} - -### マテリアライズドビューの変換後の同一ブロック {#identical-blocks-after-materialized-view-transformations} - -Identical blocks, which have been generated during transformation inside a materialized view, are not deduplicated because they are based on different inserted data. - -マテリアライズドビュー内での変換中に生成された同一ブロックは、異なる挿入データに基づいているためデデュプリケートされません。 - -Here is an example: - -以下が例です: - -```sql -CREATE TABLE dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -CREATE MATERIALIZED VIEW mv_dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000 -AS SELECT - 0 AS key, - value AS value -FROM dst; -``` - -```sql -SET max_block_size=1; -SET min_insert_block_size_rows=0; -SET min_insert_block_size_bytes=0; -``` - -The settings above allow us to select from a table with a series of blocks containing only one row. These small blocks are not squashed and remain the same until they are inserted into a table. - -上記の設定により、1行だけを含む一連のブロックからテーブルを選択できるようになります。これらの小さなブロックは圧縮されず、テーブルに挿入されるまでそのまま残ります。 - -```sql -SET deduplicate_blocks_in_dependent_materialized_views=1; -``` - -We need to enable deduplication in materialized view: - -マテリアライズドビューでのデデュプリケーションを有効にする必要があります: - -```sql -INSERT INTO dst SELECT - number + 1 AS key, - IF(key = 0, 'A', 'B') AS value -FROM numbers(2); - -SELECT - *, - _part -FROM dst -ORDER by all; - -┌─key─┬─value─┬─_part─────┐ -│ 1 │ B │ all_0_0_0 │ -│ 2 │ B │ all_1_1_0 │ -└─────┴───────┴───────────┘ -``` - -Here we see that two parts have been inserted into the `dst` table. 2 blocks from select -- 2 parts on insert. The parts contains different data. - -ここでは、`dst` テーブルに2つのパーツが挿入されたことがわかります。select からの2ブロック--挿入時の2パーツ。パーツには異なるデータが含まれています。 - -```sql -SELECT - *, - _part -FROM mv_dst -ORDER by all; - -┌─key─┬─value─┬─_part─────┐ -│ 0 │ B │ all_0_0_0 │ -│ 0 │ B │ all_1_1_0 │ -└─────┴───────┴───────────┘ -``` - -Here we see that 2 parts have been inserted into the `mv_dst` table. That parts contain the same data, however they are not deduplicated. - -ここでは、`mv_dst` テーブルに2つのパーツが挿入されたことがわかります。そのパーツは同じデータを含んでいますが、デデュプリケートされていません。 - -```sql -INSERT INTO dst SELECT - number + 1 AS key, - IF(key = 0, 'A', 'B') AS value -FROM numbers(2); - -SELECT - *, - _part -FROM dst -ORDER by all; - -┌─key─┬─value─┬─_part─────┐ -│ 1 │ B │ all_0_0_0 │ -│ 2 │ B │ all_1_1_0 │ -└─────┴───────┴───────────┘ - -SELECT - *, - _part -FROM mv_dst -ORDER by all; - -┌─key─┬─value─┬─_part─────┐ -│ 0 │ B │ all_0_0_0 │ -│ 0 │ B │ all_1_1_0 │ -└─────┴───────┴───────────┘ -``` - -Here we see that when we retry the inserts, all data is deduplicated. Deduplication works for both the `dst` and `mv_dst` tables. - -ここでは、リトライ時にすべてのデータがデデュプリケートされていることがわかります。デデュプリケーションは、`dst` と `mv_dst` テーブルの両方で機能します。 - -### Identical blocks on insertion {#identical-blocks-on-insertion} - -### 挿入時の同一ブロック {#identical-blocks-on-insertion} - -```sql -CREATE TABLE dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -SET max_block_size=1; -SET min_insert_block_size_rows=0; -SET min_insert_block_size_bytes=0; -``` - -Insertion: - -挿入: - -```sql -INSERT INTO dst SELECT - 0 AS key, - 'A' AS value -FROM numbers(2); - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 0 │ A │ all_0_0_0 │ -└────────────┴─────┴───────┴───────────┘ -``` - -With the settings above, two blocks result from select– as a result, there should be two blocks for insertion into table `dst`. However, we see that only one block has been inserted into table `dst`. This occurred because the second block has been deduplicated. It has the same data and the key for deduplication `block_id` which is calculated as a hash from the inserted data. This behaviour is not what was expected. Such cases are a rare occurrence, but theoretically is possible. In order to handle such cases correctly, the user has to provide a `insert_deduplication_token`. Let's fix this with the following examples: - -上記の設定では、select から2ブロックが生成されるため、`dst` テーブルに挿入するためには2ブロック必要です。しかし、`dst` テーブルには1つのブロックしか挿入されていないことがわかります。これは、2番目のブロックがデデュプリケートされたためです。これは同じデータが含まれており、重複のための `block_id` は挿入データのハッシュとして計算されたためです。この動作は期待されるものではありません。このようなケースは稀ですが、理論的には可能です。このようなケースを適切に処理するためには、ユーザーが `insert_deduplication_token` を提供する必要があります。以下の例で修正してみましょう: - -### Identical blocks in insertion with `insert_deduplication_token` {#identical-blocks-in-insertion-with-insert-deduplication_token} - -### `insert_deduplication_token` を使用した挿入時の同一ブロック {#identical-blocks-in-insertion-with-insert_deduplication_token} - -```sql -CREATE TABLE dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -SET max_block_size=1; -SET min_insert_block_size_rows=0; -SET min_insert_block_size_bytes=0; -``` - -Insertion: - -挿入: - -```sql -INSERT INTO dst SELECT - 0 AS key, - 'A' AS value -FROM numbers(2) -SETTINGS insert_deduplication_token='some_user_token'; - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 0 │ A │ all_2_2_0 │ -│ from dst │ 0 │ A │ all_3_3_0 │ -└────────────┴─────┴───────┴───────────┘ -``` - -Two identical blocks have been inserted as expected. - -予想通り、2つの同一ブロックが挿入されました。 - -```sql -select 'second attempt'; - -INSERT INTO dst SELECT - 0 AS key, - 'A' AS value -FROM numbers(2) -SETTINGS insert_deduplication_token='some_user_token'; - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 0 │ A │ all_2_2_0 │ -│ from dst │ 0 │ A │ all_3_3_0 │ -└────────────┴─────┴───────┴───────────┘ -``` - -Retried insertion is deduplicated as expected. - -リトライした挿入も予想通りデデュプリケートされました。 - -```sql -select 'third attempt'; - -INSERT INTO dst SELECT - 1 AS key, - 'b' AS value -FROM numbers(2) -SETTINGS insert_deduplication_token='some_user_token'; - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 0 │ A │ all_2_2_0 │ -│ from dst │ 0 │ A │ all_3_3_0 │ -└────────────┴─────┴───────┴───────────┘ -``` - -That insertion is also deduplicated even though it contains different inserted data. Note that `insert_deduplication_token` has higher priority: ClickHouse does not use the hash sum of data when `insert_deduplication_token` is provided. - -その挿入も異なる挿入データが含まれていてもデデュプリケートされます。`insert_deduplication_token` が優先されることに注意してください: `insert_deduplication_token` が提供されている場合、ClickHouse はデータのハッシュ合計を使用しません。 - -### Different insert operations generate the same data after transformation in the underlying table of the materialized view {#different-insert-operations-generate-the-same-data-after-transformation-in-the-underlying-table-of-the-materialized-view} - -### マテリアライズドビューの基盤となるテーブルでの変換後に異なる挿入操作が同じデータを生成する {#different-insert-operations-generate-the-same-data-after-transformation-in-the-underlying-table-of-the-materialized-view} - -```sql -CREATE TABLE dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -CREATE MATERIALIZED VIEW mv_dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000 -AS SELECT - 0 AS key, - value AS value -FROM dst; - -SET deduplicate_blocks_in_dependent_materialized_views=1; - -select 'first attempt'; - -INSERT INTO dst VALUES (1, 'A'); - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 1 │ A │ all_0_0_0 │ -└────────────┴─────┴───────┴───────────┘ - -SELECT - 'from mv_dst', - *, - _part -FROM mv_dst -ORDER by all; - -┌─'from mv_dst'─┬─key─┬─value─┬─_part─────┐ -│ from mv_dst │ 0 │ A │ all_0_0_0 │ -└───────────────┴─────┴───────┴───────────┘ - -select 'second attempt'; - -INSERT INTO dst VALUES (2, 'A'); - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 1 │ A │ all_0_0_0 │ -│ from dst │ 2 │ A │ all_1_1_0 │ -└────────────┴─────┴───────┴───────────┘ - -SELECT - 'from mv_dst', - *, - _part -FROM mv_dst -ORDER by all; - -┌─'from mv_dst'─┬─key─┬─value─┬─_part─────┐ -│ from mv_dst │ 0 │ A │ all_0_0_0 │ -│ from mv_dst │ 0 │ A │ all_1_1_0 │ -└───────────────┴─────┴───────┴───────────┘ -``` - -We insert different data each time. However, the same data is inserted into the `mv_dst` table. Data is not deduplicated because the source data was different. - -毎回異なるデータを挿入しています。しかし、同じデータが `mv_dst` テーブルに挿入されています。ソースデータが異なるため、データはデデュプリケートされません。 - -### Different materialized view inserts into one underlying table with equivalent data {#different-materialized-view-inserts-into-one-underlying-table-with-equivalent-data} - -### 同一データに対して1つの基盤となるテーブルに異なるマテリアライズドビューの挿入 {#different-materialized-view-inserts-into-one-underlying-table-with-equivalent-data} - -```sql -CREATE TABLE dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -CREATE TABLE mv_dst -( - `key` Int64, - `value` String -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS non_replicated_deduplication_window=1000; - -CREATE MATERIALIZED VIEW mv_first -TO mv_dst -AS SELECT - 0 AS key, - value AS value -FROM dst; - -CREATE MATERIALIZED VIEW mv_second -TO mv_dst -AS SELECT - 0 AS key, - value AS value -FROM dst; - -SET deduplicate_blocks_in_dependent_materialized_views=1; - -select 'first attempt'; - -INSERT INTO dst VALUES (1, 'A'); - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 1 │ A │ all_0_0_0 │ -└────────────┴─────┴───────┴───────────┘ - -SELECT - 'from mv_dst', - *, - _part -FROM mv_dst -ORDER by all; - -┌─'from mv_dst'─┬─key─┬─value─┬─_part─────┐ -│ from mv_dst │ 0 │ A │ all_0_0_0 │ -│ from mv_dst │ 0 │ A │ all_1_1_0 │ -└───────────────┴─────┴───────┴───────────┘ -``` - -Two equal blocks inserted to the table `mv_dst` (as expected). - -期待通り、`mv_dst` テーブルに等しい2つのブロックが挿入されました。 - -```sql -select 'second attempt'; - -INSERT INTO dst VALUES (1, 'A'); - -SELECT - 'from dst', - *, - _part -FROM dst -ORDER by all; - -┌─'from dst'─┬─key─┬─value─┬─_part─────┐ -│ from dst │ 1 │ A │ all_0_0_0 │ -└────────────┴─────┴───────┴───────────┘ - -SELECT - 'from mv_dst', - *, - _part -FROM mv_dst -ORDER by all; - -┌─'from mv_dst'─┬─key─┬─value─┬─_part─────┐ -│ from mv_dst │ 0 │ A │ all_0_0_0 │ -│ from mv_dst │ 0 │ A │ all_1_1_0 │ -└───────────────┴─────┴───────┴───────────┘ -``` - -That retry operation is deduplicated on both tables `dst` and `mv_dst`. - -そのリトライ操作は、`dst` テーブルと `mv_dst` テーブルの両方でデデュプリケートされます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md.hash deleted file mode 100644 index 84e5a225edb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplicating-inserts-on-retries.md.hash +++ /dev/null @@ -1 +0,0 @@ -05b579ce63235c92 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md deleted file mode 100644 index e4aebd54af8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -slug: '/guides/developer/deduplication' -sidebar_label: '重複排除戦略' -sidebar_position: 3 -description: '頻繁なupsert、更新、削除を行う場合に、重複排除を使用します。' -title: '重複排除戦略' ---- - -import deduplication from '@site/static/images/guides/developer/de_duplication.png'; -import Image from '@theme/IdealImage'; - - - -# 重複排除戦略 - -**重複排除**とは、***データセットの重複行を削除するプロセス***を指します。OLTPデータベースでは、各行に一意の主キーがあるため、これを簡単に行うことができますが、挿入が遅くなるという代償があります。挿入されたすべての行は、まず検索され、もし見つかった場合には置き換えられる必要があります。 - -ClickHouseはデータ挿入の速度を考慮して構築されています。ストレージファイルは不変であり、ClickHouseは行を挿入する前に既存の主キーをチェックしないため、重複排除には少し余分な労力が必要です。これはまた、重複排除が即時に行われないことを意味します - **最終的**に行われるものであり、いくつかの副作用があります: - -- いつでも、テーブルには重複(同じソートキーを持つ行)が存在する可能性があります -- 重複行の実際の削除はパーツのマージ中に発生します -- クエリは重複の可能性を考慮する必要があります - -
- -||| -|------|----| -|重複排除のロゴ|ClickHouseは、重複排除やその他多くのトピックに関する無料トレーニングを提供しています。 [データの削除と更新のトレーニングモジュール](https://learn.clickhouse.com/visitor_catalog_class/show/1328954/?utm_source=clickhouse&utm_medium=docs)は、始めるのに良い場所です。| - -
- -## 重複排除のオプション {#options-for-deduplication} - -重複排除は、以下のテーブルエンジンを使用してClickHouseで実装されています。 - -1. `ReplacingMergeTree`テーブルエンジン:このテーブルエンジンでは、同じソートキーを持つ重複行がマージ中に削除されます。`ReplacingMergeTree`は、クエリが最後に挿入された行を返すようにしたい場合に、upsertの動作を模倣するのに良い選択です。 - -2. 行の崩壊:`CollapsingMergeTree`および`VersionedCollapsingMergeTree`テーブルエンジンは、既存の行が「キャンセル」され、新しい行が挿入されるというロジックを使用します。これらは`ReplacingMergeTree`よりも実装が複雑ですが、データがまだマージされているかどうかを気にせずに、クエリと集約を簡単に記述できます。これらの2つのテーブルエンジンは、データを頻繁に更新する必要がある場合に便利です。 - -以下に、これらのテクニックの両方を説明します。詳細については、無料のオンデマンド[データの削除と更新のトレーニングモジュール](https://learn.clickhouse.com/visitor_catalog_class/show/1328954/?utm_source=clickhouse&utm_medium=docs)をチェックしてください。 - -## ReplacingMergeTreeを使用したUpserts {#using-replacingmergetree-for-upserts} - -テーブルがHacker Newsのコメントを含み、viewsカラムがコメントが閲覧された回数を示しているシンプルな例を見てみましょう。記事が公開されたときに新しい行を挿入し、もし値が増加した場合は、毎日合計閲覧数で新しい行をupsertするとします: - -```sql -CREATE TABLE hackernews_rmt ( - id UInt32, - author String, - comment String, - views UInt64 -) -ENGINE = ReplacingMergeTree -PRIMARY KEY (author, id) -``` - -2行を挿入しましょう: - -```sql -INSERT INTO hackernews_rmt VALUES - (1, 'ricardo', 'This is post #1', 0), - (2, 'ch_fan', 'This is post #2', 0) -``` - -`views`カラムを更新するためには、同じ主キーで新しい行を挿入します(`views`カラムの新しい値に注意してください): - -```sql -INSERT INTO hackernews_rmt VALUES - (1, 'ricardo', 'This is post #1', 100), - (2, 'ch_fan', 'This is post #2', 200) -``` - -現在、テーブルには4行あります: - -```sql -SELECT * -FROM hackernews_rmt -``` - -```response -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 0 │ -│ 1 │ ricardo │ This is post #1 │ 0 │ -└────┴─────────┴─────────────────┴───────┘ -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 200 │ -│ 1 │ ricardo │ This is post #1 │ 100 │ -└────┴─────────┴─────────────────┴───────┘ -``` - -出力の上部の別々のボックスは、背後での2つのパーツを示しています - このデータはまだマージされていないため、重複行はまだ削除されていません。クエリ結果の論理的なマージを行うために、`SELECT`クエリで`FINAL`キーワードを使用しましょう: - -```sql -SELECT * -FROM hackernews_rmt -FINAL -``` - -```response -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 200 │ -│ 1 │ ricardo │ This is post #1 │ 100 │ -└────┴─────────┴─────────────────┴───────┘ -``` - -結果には2行のみがあり、最後に挿入された行が返されます。 - -:::note -`FINAL`を使用することは少量のデータであれば良好ですが、大量のデータを処理する場合、`FINAL`を使用することはお勧めできません。列の最新値を見つけるためのより良い選択肢を議論しましょう... -::: - -### FINALの回避 {#avoiding-final} - -ユニークな行の両方の`views`カラムを更新しましょう: - -```sql -INSERT INTO hackernews_rmt VALUES - (1, 'ricardo', 'This is post #1', 150), - (2, 'ch_fan', 'This is post #2', 250) -``` - -現在、テーブルには6行あり、実際のマージはまだ行われておらず(`FINAL`を使用した際のクエリ時間のマージのみ)、 - -```sql -SELECT * -FROM hackernews_rmt -``` - -```response -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 200 │ -│ 1 │ ricardo │ This is post #1 │ 100 │ -└────┴─────────┴─────────────────┴───────┘ -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 0 │ -│ 1 │ ricardo │ This is post #1 │ 0 │ -└────┴─────────┴─────────────────┴───────┘ -┌─id─┬─author──┬─comment─────────┬─views─┐ -│ 2 │ ch_fan │ This is post #2 │ 250 │ -│ 1 │ ricardo │ This is post #1 │ 150 │ -└────┴─────────┴─────────────────┴───────┘ -``` - -`FINAL`を使用する代わりに、ビジネスロジックを利用しましょう - `views`カラムは常に増加していると知っているので、希望するカラムでグループ化した後、`max`関数を使用して最大値を持つ行を選択します: - -```sql -SELECT - id, - author, - comment, - max(views) -FROM hackernews_rmt -GROUP BY (id, author, comment) -``` - -```response -┌─id─┬─author──┬─comment─────────┬─max(views)─┐ -│ 2 │ ch_fan │ This is post #2 │ 250 │ -│ 1 │ ricardo │ This is post #1 │ 150 │ -└────┴─────────┴─────────────────┴────────────┘ -``` - -上記のクエリのようにグループ化することは、実際には`FINAL`キーワードを使用するよりも効率的(クエリ性能の観点から)です。 - -私たちの[データの削除と更新のトレーニングモジュール](https://learn.clickhouse.com/visitor_catalog_class/show/1328954/?utm_source=clickhouse&utm_medium=docs)では、この例を拡張し、`ReplacingMergeTree`で`version`カラムを使用する方法を含めます。 - -## columnsを頻繁に更新するためのCollapsingMergeTreeの使用 {#using-collapsingmergetree-for-updating-columns-frequently} - -カラムを更新することは、既存の行を削除し、新しい値で置き換えることを含みます。すでに見たように、ClickHouseではこのタイプの変異は _最終的に_ 発生します - マージの際に。更新する行が多い場合、`ALTER TABLE..UPDATE`を避けて、既存のデータとともに新しいデータを挿入する方が実際には効率的であることがあります。データが古いか新しいかを示すカラムを追加することができ... 実際には、この動作を非常にうまく実装しているテーブルエンジンがあり、古いデータは自動的に削除されます。どのように機能するか見てみましょう。 - -外部システムを使用してHacker Newsのコメントの閲覧数を追跡し、数時間ごとにデータをClickHouseにプッシュするとしましょう。古い行を削除し、新しい行が各Hacker Newsのコメントの新しい状態を表すようにしたいと考えています。この動作を実装するために`CollapsingMergeTree`を使用できます。 - -閲覧数を保存するためのテーブルを定義しましょう: - -```sql -CREATE TABLE hackernews_views ( - id UInt32, - author String, - views UInt64, - sign Int8 -) -ENGINE = CollapsingMergeTree(sign) -PRIMARY KEY (id, author) -``` - -`hackernews_views`テーブルには`Int8`型のsignというカラムがあります。これは**sign**カラムと呼ばれます。signカラムの名前は任意ですが、`Int8`データ型が必要であり、signカラムの名前は`CollapsingMergeTree`テーブルのコンストラクタに渡されました。 - -`CollapsingMergeTree`テーブルのsignカラムとは何でしょうか?それは行の_状態_ を表し、signカラムは1または-1のみ可能です。動作は次のとおりです: - -- 二つの行が同じ主キー(または、主キーが異なる場合はソート順)を持ち、signカラムの値が異なる場合、+1で挿入された最後の行が状態行となり、他の行が互いにキャンセルされます。 -- 互いにキャンセルされる行はマージの際に削除されます。 -- 対になる行を持たない行は保持されます。 - -では、`hackernews_views`テーブルに行を追加しましょう。それがこの主キーの唯一の行なので、状態を1に設定します: - -```sql -INSERT INTO hackernews_views VALUES - (123, 'ricardo', 0, 1) -``` - -次に、`views`カラムを変更したいとします。既存の行をキャンセルする行と、その行の新しい状態を含む行の2行を挿入します: - -```sql -INSERT INTO hackernews_views VALUES - (123, 'ricardo', 0, -1), - (123, 'ricardo', 150, 1) -``` - -テーブルには、主キー`(123, 'ricardo')`で3行があります: - -```sql -SELECT * -FROM hackernews_views -``` - -```response -┌──id─┬─author──┬─views─┬─sign─┐ -│ 123 │ ricardo │ 0 │ -1 │ -│ 123 │ ricardo │ 150 │ 1 │ -└─────┴─────────┴───────┴──────┘ -┌──id─┬─author──┬─views─┬─sign─┐ -│ 123 │ ricardo │ 0 │ 1 │ -└─────┴─────────┴───────┴──────┘ -``` - -`FINAL`を加えると、現在の状態行が返されます: - -```sql -SELECT * -FROM hackernews_views -FINAL -``` - -```response -┌──id─┬─author──┬─views─┬─sign─┐ -│ 123 │ ricardo │ 150 │ 1 │ -└─────┴─────────┴───────┴──────┘ -``` - -しかし、大きなテーブルに対して`FINAL`を使用することは推奨されません。 - -:::note -私たちの例で挿入した`views`カラムの値は実際には必要ありませんし、古い行の`views`の現在の値と一致する必要もありません。実際には、主キーと-1だけで行をキャンセルできます: - -```sql -INSERT INTO hackernews_views(id, author, sign) VALUES - (123, 'ricardo', -1) -``` -::: - -## 複数スレッドからのリアルタイム更新 {#real-time-updates-from-multiple-threads} - -`CollapsingMergeTree`テーブルでは、行がsignカラムを使って互いにキャンセルされ、行の状態は最後に挿入された行によって決まります。しかし、異なるスレッドから行を挿入している場合、行が順序を無視して挿入される可能性があるため、これは問題になることがあります。「最後の」行を使用することは、この状況では機能しません。 - -ここで`VersionedCollapsingMergeTree`が便利です - これは`CollapsingMergeTree`のように行を崩しますが、最後に挿入された行ではなく、指定したバージョンカラムの最大値を持つ行を保持します。 - -例を見てみましょう。Hacker Newsのコメントの閲覧数を追跡したいとし、データが頻繁に更新されるとます。レポートには、強制的にマージを待つことなく最新の値を使用することを望みます。`CollapsedMergeTree`に類似したテーブルから始め、行の状態のバージョンを保存するためのカラムを追加しましょう: - -```sql -CREATE TABLE hackernews_views_vcmt ( - id UInt32, - author String, - views UInt64, - sign Int8, - version UInt32 -) -ENGINE = VersionedCollapsingMergeTree(sign, version) -PRIMARY KEY (id, author) -``` - -テーブルは`VersionedCollapsingMergeTree`をエンジンとして使用し、**signカラム**と**versionカラム**を渡しています。テーブルの動作は次の通りです: - -- 同じ主キーとバージョンを持ち、異なるsignを持つ行のペアが削除されます。 -- 行が挿入された順序は重要ではありません。 -- バージョンカラムが主キーの一部でない場合、ClickHouseはそれを暗黙的に主キーの最後のフィールドとして追加します。 - -クエリを書くときも同様のロジックを使用します - 主キーでグループ化し、キャンセルされているがまだ削除されていない行を避けるための巧妙なロジックを使用します。`hackernews_views_vcmt`テーブルに行を追加しましょう: - -```sql -INSERT INTO hackernews_views_vcmt VALUES - (1, 'ricardo', 0, 1, 1), - (2, 'ch_fan', 0, 1, 1), - (3, 'kenny', 0, 1, 1) -``` - -次に、2行を更新し、そのうちの1行を削除します。行をキャンセルするためには、以前のバージョン番号を含めることを確認してください(それも主キーの一部であるため): - -```sql -INSERT INTO hackernews_views_vcmt VALUES - (1, 'ricardo', 0, -1, 1), - (1, 'ricardo', 50, 1, 2), - (2, 'ch_fan', 0, -1, 1), - (3, 'kenny', 0, -1, 1), - (3, 'kenny', 1000, 1, 2) -``` - -以前のように、signカラムに基づいて値を増加させたり減少させたりするクエリを実行します: - -```sql -SELECT - id, - author, - sum(views * sign) -FROM hackernews_views_vcmt -GROUP BY (id, author) -HAVING sum(sign) > 0 -ORDER BY id ASC -``` - -結果は2行です: - -```response -┌─id─┬─author──┬─sum(multiply(views, sign))─┐ -│ 1 │ ricardo │ 50 │ -│ 3 │ kenny │ 1000 │ -└────┴─────────┴────────────────────────────┘ -``` - -テーブルのマージを強制します: - -```sql -OPTIMIZE TABLE hackernews_views_vcmt -``` - -結果には2行だけが表示されるはずです: - -```sql -SELECT * -FROM hackernews_views_vcmt -``` - -```response -┌─id─┬─author──┬─views─┬─sign─┬─version─┐ -│ 1 │ ricardo │ 50 │ 1 │ 2 │ -│ 3 │ kenny │ 1000 │ 1 │ 2 │ -└────┴─────────┴───────┴──────┴─────────┘ -``` - -`VersionedCollapsingMergeTree`テーブルは、複数のクライアントおよび/またはスレッドから行を挿入しながら重複排除を実装したい場合に非常に便利です。 - -## なぜ行が重複排除されないのか? {#why-arent-my-rows-being-deduplicated} - -挿入された行が重複排除されない理由の一つは、`INSERT`文に非冪等関数または式を使用している場合です。例えば、`createdAt DateTime64(3) DEFAULT now()`というカラムを持つ行を挿入している場合、各行には`createdAt`カラムの一意のデフォルト値があるため、行は確実に一意です。MergeTree / ReplicatedMergeTreeテーブルエンジンは、挿入された各行が一意のチェックサムを生成するため、行を重複排除することはできません。 - -この場合、同じバッチの行が複数回挿入されても同じ行が再挿入されないように、各バッチごとに独自の`insert_deduplication_token`を指定できます。この設定の使用方法についての詳細は、[`insert_deduplication_token`に関するドキュメント](/operations/settings/settings#insert_deduplication_token)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md.hash deleted file mode 100644 index 051dc8745c8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/deduplication.md.hash +++ /dev/null @@ -1 +0,0 @@ -263ae6e68394b5ef diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md deleted file mode 100644 index cde2d93a9ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -slug: '/guides/developer/overview' -sidebar_label: '高度なガイドの概要' -description: '高度なガイドの概要' -title: 'Advanced Guides' ---- - - - - -# 高度なガイド - -このセクションには、次の高度なガイドが含まれています。 - -| ガイド | 説明 | -|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Alternative Query Languages](../developer/alternative-query-languages) | サポートされている代替の方言とそれを使用する方法に関するガイド。各方言のクエリの例を提供します。 | -| [Cascading Materialized Views](../developer/cascading-materialized-views) | Materialized View を作成し、それらをカスケードさせて、複数のソーステーブルを単一のデスティネーションテーブルに統合する方法に関するガイド。ドメイン名のグループに対して、月と年ごとにデータを集計するためにカスケードする Materialized Views を使用する例を含みます。 | -| [Debugging memory issues](../developer/debugging-memory-issues) | ClickHouse 内のメモリ問題をデバッグする方法に関するガイド。 | -| [Deduplicating Inserts on Retries](../developer/deduplicating-inserts-on-retries) | 失敗した挿入をリトライする可能性がある状況を処理する方法に関するガイド。 | -| [Deduplication Strategies](../developer/deduplication) | データの重複排除に関するガイドであり、データベースから重複行を削除するための手法です。OLTP システムにおける主キーによる重複排除との違い、ClickHouse の重複排除のアプローチ、および ClickHouse のクエリ内で重複データシナリオを処理する方法について説明します。 | -| [Filling gaps in time-series data](../developer/time-series-filling-gaps) | 時系列データを扱う ClickHouse の機能に関するガイドで、データのギャップを埋める技術を含み、時系列情報のより完全で連続した表現を作成します。 | -| [Manage Data with TTL (Time-to-live)](../developer/ttl) | `WITH FILL` 句を使用して時系列データのギャップを埋める方法について説明するガイド。ゼロ値でギャップを埋める方法、ギャップを埋めるための開始点の指定方法、特定の終了点までギャップを埋める方法、累積計算のために値を補間する方法について説明します。 | -| [Understanding Query Execution with the Analyzer](../developer/understanding-query-execution-with-the-analyzer) | アナライザーツールを紹介して ClickHouse のクエリ実行を解き明かすガイド。アナライザーがクエリを一連のステップに分解し、最適なパフォーマンスのために全体の実行プロセスを視覚化し、トラブルシューティングできるようにします。 | -| [Using JOINs in ClickHouse](../joining-tables) | ClickHouse におけるテーブル結合を簡素化するガイド。さまざまな結合タイプ(`INNER`、`LEFT`、`RIGHT` など)をカバーし、効率的な結合のためのベストプラクティス(小さいテーブルを右側に配置するなど)を探り、複雑なデータ関係のためにクエリを最適化するのに役立つ ClickHouse の内部結合アルゴリズムについての洞察を提供します。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md.hash deleted file mode 100644 index 59cd6b92436..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -99adb7825ee79957 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md deleted file mode 100644 index a9555f65921..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/guides/developer/lightweight-delete' -title: '論理削除' -keywords: -- 'lightweight delete' -description: 'ClickHouse における論理削除の概要を提供します' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md.hash deleted file mode 100644 index 6cd13ef8875..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-delete.md.hash +++ /dev/null @@ -1 +0,0 @@ -52ce7986ce4e0ee7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md deleted file mode 100644 index 7d5a324edf2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -slug: '/guides/developer/lightweight-update' -sidebar_label: '軽量更新' -title: '軽量更新' -keywords: -- 'lightweight update' -description: '軽量更新の説明を提供します' ---- - - - -## Lightweight Update {#lightweight-update} - -軽量更新が有効になると、更新された行はすぐに更新されたとしてマークされ、その後の `SELECT` クエリは自動的に変更された値を返します。軽量更新が無効の場合、変更された値を見るためには、バックグラウンドプロセスを介して変更が適用されるのを待つ必要があります。 - -軽量更新は、クエリレベルの設定 `apply_mutations_on_fly` を有効にすることで、 `MergeTree` 系のテーブルに対して有効にすることができます。 - -```sql -SET apply_mutations_on_fly = 1; -``` - -## Example {#example} - -テーブルを作成し、いくつかの変更を実行してみましょう: -```sql -CREATE TABLE test_on_fly_mutations (id UInt64, v String) -ENGINE = MergeTree ORDER BY id; - --- 軽量更新が無効な場合のデフォルトの動作を示すために --- 変更のバックグラウンドマテリアライズを無効にします -SYSTEM STOP MERGES test_on_fly_mutations; -SET mutations_sync = 0; - --- 新しいテーブルに行をいくつか挿入します -INSERT INTO test_on_fly_mutations VALUES (1, 'a'), (2, 'b'), (3, 'c'); - --- 行の値を更新します -ALTER TABLE test_on_fly_mutations UPDATE v = 'd' WHERE id = 1; -ALTER TABLE test_on_fly_mutations DELETE WHERE v = 'd'; -ALTER TABLE test_on_fly_mutations UPDATE v = 'e' WHERE id = 2; -ALTER TABLE test_on_fly_mutations DELETE WHERE v = 'e'; -``` - -`SELECT` クエリを介して更新の結果を確認してみましょう: -```sql --- 明示的に軽量更新を無効にします -SET apply_mutations_on_fly = 0; - -SELECT id, v FROM test_on_fly_mutations ORDER BY id; -``` - -新しいテーブルをクエリしたときに、行の値はまだ更新されていないことに注意してください: - -```response -┌─id─┬─v─┐ -│ 1 │ a │ -│ 2 │ b │ -│ 3 │ c │ -└────┴───┘ -``` - -次に、軽量更新を有効にしたときに何が起こるか見てみましょう: - -```sql --- 軽量更新を有効にします -SET apply_mutations_on_fly = 1; - -SELECT id, v FROM test_on_fly_mutations ORDER BY id; -``` - -`SELECT` クエリは、変更が適用されるのを待たずに即座に正しい結果を返します: - -```response -┌─id─┬─v─┐ -│ 3 │ c │ -└────┴───┘ -``` - -## Performance Impact {#performance-impact} - -軽量更新が有効な場合、変更はすぐにはマテリアライズされず、 `SELECT` クエリの実行中のみ適用されます。ただし、バックグラウンドで非同期的に変更がマテリアライズされることに注意してください。これは重いプロセスです。 - -提出された変更の数が、一定の時間間隔でバックグラウンドで処理される変更の数を常に超える場合、適用する必要がある未マテリアライズの変更のキューは増大し続けます。これにより、 `SELECT` クエリのパフォーマンスが最終的に低下します。 - -無限に成長する未マテリアライズの変更を制限するために、 `apply_mutations_on_fly` 設定を `number_of_mutations_to_throw` や `number_of_mutations_to_delay` などの他の `MergeTree` レベルの設定とともに有効にすることをお勧めします。 - -## Support for subqueries and non-deterministic functions {#support-for-subqueries-and-non-deterministic-functions} - -軽量更新は、サブクエリや非決定的関数に対するサポートが限られています。結果が合理的なサイズのスカラサブクエリのみ(設定 `mutations_max_literal_size_to_replace` によって制御される)がサポートされています。定数の非決定的関数のみがサポートされています(例:関数 `now()`)。 - -これらの動作は次の設定によって制御されます: - -- `mutations_execute_nondeterministic_on_initiator` - true の場合、非決定的関数はイニシエーターのレプリカで実行され、`UPDATE` および `DELETE` クエリ内でリテラルとして置き換えられます。デフォルト値:`false`。 -- `mutations_execute_subqueries_on_initiator` - true の場合、スカラサブクエリはイニシエーターのレプリカで実行され、`UPDATE` および `DELETE` クエリ内でリテラルとして置き換えられます。デフォルト値:`false`。 - - `mutations_max_literal_size_to_replace` - `UPDATE` および `DELETE` クエリで置き換えるシリアル化されたリテラルの最大サイズ(バイト)。デフォルト値:`16384` (16 KiB)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md.hash deleted file mode 100644 index ce568fde54b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/lightweight-update.md.hash +++ /dev/null @@ -1 +0,0 @@ -f8cb2a2928fc4276 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md deleted file mode 100644 index 07d9f5ccfe4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -slug: '/guides/developer/mutations' -sidebar_label: 'データの更新と削除' -sidebar_position: 1 -keywords: -- 'update' -- 'delete' -- 'mutation' -title: 'ClickHouseデータの更新と削除' -description: 'ClickHouseでの更新および削除操作の方法について説明します' ---- - - - - -# ClickHouseデータの更新と削除 - -ClickHouseは高ボリュームの分析ワークロード向けに設計されていますが、特定の状況で既存のデータを変更または削除することも可能です。これらの操作は「ミューテーション」と呼ばれ、`ALTER TABLE` コマンドを使用して実行されます。また、ClickHouseの軽量削除機能を使用して行を `DELETE` することもできます。 - -:::tip -頻繁に更新を行う必要がある場合は、[重複排除](../developer/deduplication.md)機能を使用することを検討してください。この機能により、ミューテーションイベントを生成することなく、行を更新および/または削除できます。 -::: - -## データの更新 {#updating-data} - -テーブルの行を更新するには、`ALTER TABLE...UPDATE` コマンドを使用します: - -```sql -ALTER TABLE [.] UPDATE = WHERE -``` - -`` は `` が満たされるカラムの新しい値です。`` はカラムと同じデータ型である必要があるか、または `CAST` 演算子を使用して同じデータ型に変換可能である必要があります。`` はデータの各行に対して `UInt8`(ゼロまたは非ゼロ)の値を返す必要があります。複数の `UPDATE ` ステートメントは、カンマで区切って単一の `ALTER TABLE` コマンドに結合できます。 - -**例**: - -1. このようなミューテーションでは、辞書lookupを使用して `visitor_ids` を新しいものに置き換えて更新できます: - - ```sql - ALTER TABLE website.clicks - UPDATE visitor_id = getDict('visitors', 'new_visitor_id', visitor_id) - WHERE visit_date < '2022-01-01' - ``` - -2. 1つのコマンドで複数の値を変更することは、複数のコマンドを使用するよりも効率的です: - - ```sql - ALTER TABLE website.clicks - UPDATE url = substring(url, position(url, '://') + 3), visitor_id = new_visit_id - WHERE visit_date < '2022-01-01' - ``` - -3. ミューテーションはシャード化されたテーブルに対して `ON CLUSTER` で実行できます: - - ```sql - ALTER TABLE clicks ON CLUSTER main_cluster - UPDATE click_count = click_count / 2 - WHERE visitor_id ILIKE '%robot%' - ``` - -:::note -主キーまたはソートキーの一部であるカラムの更新はできません。 -::: - -## データの削除 {#deleting-data} - -行を削除するには、`ALTER TABLE` コマンドを使用します: - -```sql -ALTER TABLE [.]
DELETE WHERE -``` - -`` はデータの各行に対して `UInt8` 値を返す必要があります。 - -**例** - -1. 列が値の配列に含まれるレコードを削除します: - ```sql - ALTER TABLE website.clicks DELETE WHERE visitor_id in (253, 1002, 4277) - ``` - -2. このクエリは何を変更しますか? - ```sql - ALTER TABLE clicks ON CLUSTER main_cluster DELETE WHERE visit_date < '2022-01-02 15:00:00' AND page_id = '573' - ``` - -:::note -テーブル内のデータをすべて削除するには、`TRUNCATE TABLE [.]
` コマンドを使用する方が効率的です。このコマンドも `ON CLUSTER` で実行できます。 -::: - -詳細については、[`DELETE` ステートメント](/sql-reference/statements/delete.md)のドキュメントページを参照してください。 - -## 軽量削除 {#lightweight-deletes} - -行を削除するもう1つのオプションは、**軽量削除**と呼ばれる `DELETE FROM` コマンドを使用することです。削除された行は即座に削除済みとしてマークされ、その後のすべてのクエリから自動的にフィルタリングされるため、パーツのマージを待つ必要はなく、`FINAL` キーワードを使用する必要もありません。データのクリーンアップはバックグラウンドで非同期的に行われます。 - -``` sql -DELETE FROM [db.]table [ON CLUSTER cluster] [WHERE expr] -``` - -たとえば、次のクエリは `Title` 列に `hello` というテキストが含まれる `hits` テーブルのすべての行を削除します: - -```sql -DELETE FROM hits WHERE Title LIKE '%hello%'; -``` - -軽量削除に関するいくつかの注意点: -- この機能は、`MergeTree` テーブルエンジンファミリーにのみ利用可能です。 -- 軽量削除はデフォルトで非同期です。`mutations_sync` を 1 に設定すると、1つのレプリカがステートメントを処理するのを待機し、`mutations_sync` を 2 に設定すると、すべてのレプリカを待機します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md.hash deleted file mode 100644 index b30e19ab830..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/mutations.md.hash +++ /dev/null @@ -1 +0,0 @@ -8f90fcd2e02cd194 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md deleted file mode 100644 index 8c1ec6b9141..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -slug: '/guides/replacing-merge-tree' -title: 'ReplacingMergeTree' -description: 'ClickHouse で ReplacingMergeTree エンジンを使用する' -keywords: -- 'replacingmergetree' -- 'inserts' -- 'deduplication' ---- - -import postgres_replacingmergetree from '@site/static/images/migrations/postgres-replacingmergetree.png'; -import Image from '@theme/IdealImage'; - -While transactional databases are optimized for transactional update and delete workloads, OLAP databases offer reduced guarantees for such operations. Instead, they optimize for immutable data inserted in batches for the benefit of significantly faster analytical queries. While ClickHouse offers update operations through mutations, as well as a lightweight means of deleting rows, its column-orientated structure means these operations should be scheduled with care, as described above. These operations are handled asynchronously, processed with a single thread, and require (in the case of updates) data to be rewritten on disk. They should thus not be used for high numbers of small changes. In order to process a stream of update and delete rows while avoiding the above usage patterns, we can use the ClickHouse table engine ReplacingMergeTree. - -## Automatic upserts of inserted rows {#automatic-upserts-of-inserted-rows} - -The [ReplacingMergeTree table engine](/engines/table-engines/mergetree-family/replacingmergetree) allows update operations to be applied to rows, without needing to use inefficient `ALTER` or `DELETE` statements, by offering the ability for users to insert multiple copies of the same row and denote one as the latest version. A background process, in turn, asynchronously removes older versions of the same row, efficiently imitating an update operation through the use of immutable inserts. This relies on the ability of the table engine to identify duplicate rows. This is achieved using the `ORDER BY` clause to determine uniqueness, i.e., if two rows have the same values for the columns specified in the `ORDER BY`, they are considered duplicates. A `version` column, specified when defining the table, allows the latest version of a row to be retained when two rows are identified as duplicates i.e. the row with the highest version value is kept. We illustrate this process in the example below. Here, the rows are uniquely identified by the A column (the `ORDER BY` for the table). We assume these rows have been inserted as two batches, resulting in the formation of two data parts on disk. Later, during an asynchronous background process, these parts are merged together. - -ReplacingMergeTree additionally allows a deleted column to be specified. This can contain either 0 or 1, where a value of 1 indicates that the row (and its duplicates) has been deleted and zero is used otherwise. **Note: Deleted rows will not be removed at merge time.** - -During this process, the following occurs during part merging: - -- The row identified by the value 1 for column A has both an update row with version 2 and a delete row with version 3 (and a deleted column value of 1). The latest row, marked as deleted, is therefore retained. -- The row identified by the value 2 for column A has two update rows. The latter row is retained with a value of 6 for the price column. -- The row identified by the value 3 for column A has a row with version 1 and a delete row with version 2. This delete row is retained. - -As a result of this merge process, we have four rows representing the final state: - -
- - - -
- -Note that deleted rows are never removed. They can be forcibly deleted with an `OPTIMIZE table FINAL CLEANUP`. This requires the experimental setting `allow_experimental_replacing_merge_with_cleanup=1`. This should only be issued under the following conditions: - -1. You can be sure that no rows with old versions (for those that are being deleted with the cleanup) will be inserted after the operation is issued. If these are inserted, they will be incorrectly retained, as the deleted rows will no longer be present. -2. Ensure all replicas are in sync prior to issuing the cleanup. This can be achieved with the command: - -
- -```sql -SYSTEM SYNC REPLICA table -``` - -We recommend pausing inserts once (1) is guaranteed and until this command and the subsequent cleanup are complete. - -> Handling deletes with the ReplacingMergeTree is only recommended for tables with a low to moderate number of deletes (less than 10%) unless periods can be scheduled for cleanup with the above conditions. - -> Tip: Users may also be able to issue `OPTIMIZE FINAL CLEANUP` against selective partitions no longer subject to changes. - -## Choosing a primary/deduplication key {#choosing-a-primarydeduplication-key} - -Above, we highlighted an important additional constraint that must also be satisfied in the case of the ReplacingMergeTree: the values of columns of the `ORDER BY` uniquely identify a row across changes. If migrating from a transactional database like Postgres, the original Postgres primary key should thus be included in the Clickhouse `ORDER BY` clause. - -Users of ClickHouse will be familiar with choosing the columns in their tables `ORDER BY` clause to [optimize for query performance](/data-modeling/schema-design#choosing-an-ordering-key). Generally, these columns should be selected based on your [frequent queries and listed in order of increasing cardinality](/guides/best-practices/sparse-primary-indexes#an-index-design-for-massive-data-scales). Importantly, the ReplacingMergeTree imposes an additional constraint - these columns must be immutable, i.e., if replicating from Postgres, only add columns to this clause if they do not change in the underlying Postgres data. While other columns can change, these are required to be consistent for unique row identification. For analytical workloads, the Postgres primary key is generally of little use as users will rarely perform point row lookups. Given we recommend that columns be ordered in order of increasing cardinality, as well as the fact that matches on [columns listed earlier in the ORDER BY will usually be faster](/guides/best-practices/sparse-primary-indexes#ordering-key-columns-efficiently), the Postgres primary key should be appended to the end of the `ORDER BY` (unless it has analytical value). In the case that multiple columns form a primary key in Postgres, they should be appended to the `ORDER BY`, respecting cardinality and the likelihood of query value. Users may also wish to generate a unique primary key using a concatenation of values via a `MATERIALIZED` column. - -Consider the posts table from the Stack Overflow dataset. - -```sql -CREATE TABLE stackoverflow.posts_updateable -( - `Version` UInt32, - `Deleted` UInt8, - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - `PostTypeId` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8), - `AcceptedAnswerId` UInt32, - `CreationDate` DateTime64(3, 'UTC'), - `Score` Int32, - `ViewCount` UInt32 CODEC(Delta(4), ZSTD(1)), - `Body` String, - `OwnerUserId` Int32, - `OwnerDisplayName` String, - `LastEditorUserId` Int32, - `LastEditorDisplayName` String, - `LastEditDate` DateTime64(3, 'UTC') CODEC(Delta(8), ZSTD(1)), - `LastActivityDate` DateTime64(3, 'UTC'), - `Title` String, - `Tags` String, - `AnswerCount` UInt16 CODEC(Delta(2), ZSTD(1)), - `CommentCount` UInt8, - `FavoriteCount` UInt8, - `ContentLicense` LowCardinality(String), - `ParentId` String, - `CommunityOwnedDate` DateTime64(3, 'UTC'), - `ClosedDate` DateTime64(3, 'UTC') -) -ENGINE = ReplacingMergeTree(Version, Deleted) -PARTITION BY toYear(CreationDate) -ORDER BY (PostTypeId, toDate(CreationDate), CreationDate, Id) -``` - -We use an `ORDER BY` key of `(PostTypeId, toDate(CreationDate), CreationDate, Id)`. The `Id` column, unique for each post, ensures rows can be deduplicated. A `Version` and `Deleted` column are added to the schema as required. - -## Querying ReplacingMergeTree {#querying-replacingmergetree} - -At merge time, the ReplacingMergeTree identifies duplicate rows, using the values of the `ORDER BY` columns as a unique identifier, and either retains only the highest version or removes all duplicates if the latest version indicates a delete. This, however, offers eventual correctness only - it does not guarantee rows will be deduplicated, and you should not rely on it. Queries can, therefore, produce incorrect answers due to update and delete rows being considered in queries. - -To obtain correct answers, users will need to complement background merges with query time deduplication and deletion removal. This can be achieved using the `FINAL` operator. - -Consider the posts table above. We can use the normal method of loading this dataset but specify a deleted and version column in addition to values 0. For example purposes, we load 10000 rows only. - -```sql -INSERT INTO stackoverflow.posts_updateable SELECT 0 AS Version, 0 AS Deleted, * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') WHERE AnswerCount > 0 LIMIT 10000 - -0 rows in set. Elapsed: 1.980 sec. Processed 8.19 thousand rows, 3.52 MB (4.14 thousand rows/s., 1.78 MB/s.) -``` - -Let's confirm the number of rows: - -```sql -SELECT count() FROM stackoverflow.posts_updateable - -┌─count()─┐ -│ 10000 │ -└─────────┘ - -1 row in set. Elapsed: 0.002 sec. -``` - -We now update our post-answer statistics. Rather than updating these values, we insert new copies of 5000 rows and add one to their version number (this means 150 rows will exist in the table). We can simulate this with a simple `INSERT INTO SELECT`: - -```sql -INSERT INTO posts_updateable SELECT - Version + 1 AS Version, - Deleted, - Id, - PostTypeId, - AcceptedAnswerId, - CreationDate, - Score, - ViewCount, - Body, - OwnerUserId, - OwnerDisplayName, - LastEditorUserId, - LastEditorDisplayName, - LastEditDate, - LastActivityDate, - Title, - Tags, - AnswerCount, - CommentCount, - FavoriteCount, - ContentLicense, - ParentId, - CommunityOwnedDate, - ClosedDate -FROM posts_updateable --select 100 random rows -WHERE (Id % toInt32(floor(randUniform(1, 11)))) = 0 -LIMIT 5000 - -0 rows in set. Elapsed: 4.056 sec. Processed 1.42 million rows, 2.20 GB (349.63 thousand rows/s., 543.39 MB/s.) -``` - -In addition, we delete 1000 random posts by reinserting the rows but with a deleted column value of 1. Again, simulating this can be simulated with a simple `INSERT INTO SELECT`. - -```sql -INSERT INTO posts_updateable SELECT - Version + 1 AS Version, - 1 AS Deleted, - Id, - PostTypeId, - AcceptedAnswerId, - CreationDate, - Score, - ViewCount, - Body, - OwnerUserId, - OwnerDisplayName, - LastEditorUserId, - LastEditorDisplayName, - LastEditDate, - LastActivityDate, - Title, - Tags, - AnswerCount + 1 AS AnswerCount, - CommentCount, - FavoriteCount, - ContentLicense, - ParentId, - CommunityOwnedDate, - ClosedDate -FROM posts_updateable --select 100 random rows -WHERE (Id % toInt32(floor(randUniform(1, 11)))) = 0 AND AnswerCount > 0 -LIMIT 1000 - -0 rows in set. Elapsed: 0.166 sec. Processed 135.53 thousand rows, 212.65 MB (816.30 thousand rows/s., 1.28 GB/s.) -``` - -The result of the above operations will be 16,000 rows i.e. 10,000 + 5000 + 1000. The correct total here is, reality we should have only 1000 rows less than our original total i.e. 10,000 - 1000 = 9000. - -```sql -SELECT count() -FROM posts_updateable - -┌─count()─┐ -│ 10000 │ -└─────────┘ -1 row in set. Elapsed: 0.002 sec. -``` - -Your results will vary here depending on the merges that have occurred. We can see the total here is different as we have duplicate rows. Applying `FINAL` to the table delivers the correct result. - -```sql -SELECT count() -FROM posts_updateable -FINAL - -┌─count()─┐ -│ 9000 │ -└─────────┘ - -1 row in set. Elapsed: 0.006 sec. Processed 11.81 thousand rows, 212.54 KB (2.14 million rows/s., 38.61 MB/s.) -Peak memory usage: 8.14 MiB. -``` - -## FINAL performance {#final-performance} - -The `FINAL` operator will have a performance overhead on queries despite ongoing improvements. This will be most appreciable when queries are not filtering on primary key columns, causing more data to be read and increasing the deduplication overhead. If users filter on key columns using a `WHERE` condition, the data loaded and passed for deduplication will be reduced. - -If the `WHERE` condition does not use a key column, ClickHouse does not currently utilize the `PREWHERE` optimization when using `FINAL`. This optimization aims to reduce the rows read for non-filtered columns. Examples of emulating this `PREWHERE` and thus potentially improving performance can be found [here](https://clickhouse.com/blog/clickhouse-postgresql-change-data-capture-cdc-part-1#final-performance). - -## Exploiting partitions with ReplacingMergeTree {#exploiting-partitions-with-replacingmergetree} - -Merging of data in ClickHouse occurs at a partition level. When using ReplacingMergeTree, we recommend users partition their table according to best practices, provided users can ensure this **partitioning key does not change for a row**. This will ensure updates pertaining to the same row will be sent to the same ClickHouse partition. You may reuse the same partition key as Postgres provided you adhere to the best practices outlined here. - -Assuming this is the case, users can use the setting `do_not_merge_across_partitions_select_final=1` to improve `FINAL` query performance. This setting causes partitions to be merged and processed independently when using FINAL. - -Consider the following posts table, where we use no partitioning: - -```sql -CREATE TABLE stackoverflow.posts_no_part -( - `Version` UInt32, - `Deleted` UInt8, - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - ... -) -ENGINE = ReplacingMergeTree -ORDER BY (PostTypeId, toDate(CreationDate), CreationDate, Id) - -INSERT INTO stackoverflow.posts_no_part SELECT 0 AS Version, 0 AS Deleted, * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/*.parquet') - -0 rows in set. Elapsed: 182.895 sec. Processed 59.82 million rows, 38.07 GB (327.07 thousand rows/s., 208.17 MB/s.) -``` - -To ensure `FINAL` is required to do some work, we update 1m rows - incrementing their `AnswerCount` by inserting duplicate rows. - -```sql -INSERT INTO posts_no_part SELECT Version + 1 AS Version, Deleted, Id, PostTypeId, AcceptedAnswerId, CreationDate, Score, ViewCount, Body, OwnerUserId, OwnerDisplayName, LastEditorUserId, LastEditorDisplayName, LastEditDate, LastActivityDate, Title, Tags, AnswerCount + 1 AS AnswerCount, CommentCount, FavoriteCount, ContentLicense, ParentId, CommunityOwnedDate, ClosedDate -FROM posts_no_part -LIMIT 1000000 -``` - -Computing the sum of answers per year with `FINAL`: - -```sql -SELECT toYear(CreationDate) AS year, sum(AnswerCount) AS total_answers -FROM posts_no_part -FINAL -GROUP BY year -ORDER BY year ASC - -┌─year─┬─total_answers─┐ -│ 2008 │ 371480 │ -... -│ 2024 │ 127765 │ -└──────┴───────────────┘ - -17 rows in set. Elapsed: 2.338 sec. Processed 122.94 million rows, 1.84 GB (52.57 million rows/s., 788.58 MB/s.) -Peak memory usage: 2.09 GiB. -``` - -Repeating these same steps for a table partitioning by year, and repeating the above query with `do_not_merge_across_partitions_select_final=1`. - -```sql -CREATE TABLE stackoverflow.posts_with_part -( - `Version` UInt32, - `Deleted` UInt8, - `Id` Int32 CODEC(Delta(4), ZSTD(1)), - ... -) -ENGINE = ReplacingMergeTree -PARTITION BY toYear(CreationDate) -ORDER BY (PostTypeId, toDate(CreationDate), CreationDate, Id) - -// populate & update omitted - -SELECT toYear(CreationDate) AS year, sum(AnswerCount) AS total_answers -FROM posts_with_part -FINAL -GROUP BY year -ORDER BY year ASC - -┌─year─┬─total_answers─┐ -│ 2008 │ 387832 │ -│ 2009 │ 1165506 │ -│ 2010 │ 1755437 │ -... -│ 2023 │ 787032 │ -│ 2024 │ 127765 │ -└──────┴───────────────┘ - -17 rows in set. Elapsed: 0.994 sec. Processed 64.65 million rows, 983.64 MB (65.02 million rows/s., 989.23 MB/s.) -``` - -As shown, partitioning has significantly improved query performance in this case by allowing the deduplication process to occur at a partition level in parallel. - -## Merge Behavior Considerations {#merge-behavior-considerations} - -ClickHouse's merge selection mechanism goes beyond simple merging of parts. Below, we examine this behavior in the context of ReplacingMergeTree, including configuration options for enabling more aggressive merging of older data and considerations for larger parts. - -### Merge Selection Logic {#merge-selection-logic} - -While merging aims to minimize the number of parts, it also balances this goal against the cost of write amplification. Consequently, some ranges of parts are excluded from merging if they would lead to excessive write amplification, based on internal calculations. This behavior helps prevent unnecessary resource usage and extends the lifespan of storage components. - -### Merging Behavior on Large Parts {#merging-behavior-on-large-parts} - -The ReplacingMergeTree engine in ClickHouse is optimized for managing duplicate rows by merging data parts, keeping only the latest version of each row based on a specified unique key. However, when a merged part reaches the max_bytes_to_merge_at_max_space_in_pool threshold, it will no longer be selected for further merging, even if min_age_to_force_merge_seconds is set. As a result, automatic merges can no longer be relied upon to remove duplicates that may accumulate with ongoing data insertion. - -To address this, users can invoke OPTIMIZE FINAL to manually merge parts and remove duplicates. Unlike automatic merges, OPTIMIZE FINAL bypasses the max_bytes_to_merge_at_max_space_in_pool threshold, merging parts based solely on available resources, particularly disk space, until a single part remains in each partition. However, this approach can be memory-intensive on large tables and may require repeated execution as new data is added. - -For a more sustainable solution that maintains performance, partitioning the table is recommended. This can help prevent data parts from reaching the maximum merge size and reduces the need for ongoing manual optimizations. - -### Partitioning and Merging Across Partitions {#partitioning-and-merging-across-partitions} - -As discussed in Exploiting Partitions with ReplacingMergeTree, we recommend partitioning tables as a best practice. Partitioning isolates data for more efficient merges and avoids merging across partitions, particularly during query execution. This behavior is enhanced in versions from 23.12 onward: if the partition key is a prefix of the sorting key, merging across partitions is not performed at query time, leading to faster query performance. - -### Tuning Merges for Better Query Performance {#tuning-merges-for-better-query-performance} - -By default, min_age_to_force_merge_seconds and min_age_to_force_merge_on_partition_only are set to 0 and false, respectively, disabling these features. In this configuration, ClickHouse will apply standard merging behavior without forcing merges based on partition age. - -If a value for min_age_to_force_merge_seconds is specified, ClickHouse will ignore normal merging heuristics for parts older than the specified period. While this is generally only effective if the goal is to minimize the total number of parts, it can improve query performance in ReplacingMergeTree by reducing the number of parts needing merging at query time. - -This behavior can be further tuned by setting min_age_to_force_merge_on_partition_only=true, requiring all parts in the partition to be older than min_age_to_force_merge_seconds for aggressive merging. This configuration allows older partitions to merge down to a single part over time, which consolidates data and maintains query performance. - -### Recommended Settings {#recommended-settings} - -:::warning -Tuning merge behavior is an advanced operation. We recommend consulting with ClickHouse support before enabling these settings in production workloads. -::: - -In most cases, setting min_age_to_force_merge_seconds to a low value—significantly less than the partition period—is preferred. This minimizes the number of parts and prevents unnecessary merging at query time with the FINAL operator. - -For example, consider a monthly partition that has already been merged into a single part. If a small, stray insert creates a new part within this partition, query performance can suffer because ClickHouse must read multiple parts until the merge completes. Setting min_age_to_force_merge_seconds can ensure these parts are merged aggressively, preventing a degradation in query performance. diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md.hash deleted file mode 100644 index 50dcbf3accc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/replacing-merge-tree.md.hash +++ /dev/null @@ -1 +0,0 @@ -c47deda9d052b8f5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md deleted file mode 100644 index 2a3ef374a56..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -slug: '/guides/developer/time-series-filling-gaps' -sidebar_label: '時系列 - ギャップ埋め' -sidebar_position: 10 -description: '時系列データのギャップを埋める' -keywords: -- 'time series' -- 'gap fill' -title: '時系列データのギャップ埋め' ---- - - - - -# 時系列データのギャップを埋める - -時系列データを扱うとき、データの欠落や非活動によりギャップが発生することがあります。 -通常、データをクエリするときにこれらのギャップが存在しないことを望みます。このような場合に、`WITH FILL` 句が役立ちます。 -このガイドでは、時系列データのギャップを埋めるための `WITH FILL` の使い方について説明します。 - -## セットアップ {#setup} - -次のようなテーブルがあり、GenAI画像サービスによって生成された画像のメタデータを格納しているとしましょう。 - -```sql -CREATE TABLE images -( - `id` String, - `timestamp` DateTime64(3), - `height` Int64, - `width` Int64, - `size` Int64 -) -ENGINE = MergeTree -ORDER BY (size, height, width); -``` - -次に、いくつかのレコードをインポートします。 - -```sql -INSERT INTO images VALUES (1088619203512250448, '2023-03-24 00:24:03.684', 1536, 1536, 2207289); -INSERT INTO images VALUES (1088619204040736859, '2023-03-24 00:24:03.810', 1024, 1024, 1928974); -INSERT INTO images VALUES (1088619204749561989, '2023-03-24 00:24:03.979', 1024, 1024, 1275619); -INSERT INTO images VALUES (1088619206431477862, '2023-03-24 00:24:04.380', 2048, 2048, 5985703); -INSERT INTO images VALUES (1088619206905434213, '2023-03-24 00:24:04.493', 1024, 1024, 1558455); -INSERT INTO images VALUES (1088619208524431510, '2023-03-24 00:24:04.879', 1024, 1024, 1494869); -INSERT INTO images VALUES (1088619208425437515, '2023-03-24 00:24:05.160', 1024, 1024, 1538451); -``` - -## バケット別にクエリする {#querying-by-bucket} - -2023年3月24日の `00:24:03` と `00:24:04` の間に作成された画像を探索するので、その時間点のパラメータを作成しましょう。 - -```sql -SET param_start = '2023-03-24 00:24:03', - param_end = '2023-03-24 00:24:04'; -``` - -次に、データを100msのバケットにグループ分けし、バケット内に作成された画像の数を返すクエリを書きます。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -``` - -```response -┌──────────────────bucket─┬─count─┐ -│ 2023-03-24 00:24:03.600 │ 1 │ -│ 2023-03-24 00:24:03.800 │ 1 │ -│ 2023-03-24 00:24:03.900 │ 1 │ -│ 2023-03-24 00:24:04.300 │ 1 │ -│ 2023-03-24 00:24:04.400 │ 1 │ -│ 2023-03-24 00:24:04.800 │ 1 │ -└─────────────────────────┴───────┘ -``` - -結果セットには画像が作成されたバケットのみが含まれていますが、時系列分析のためには、エントリーがない場合でも各100msバケットを返すことを望むかもしれません。 - -## WITH FILL {#with-fill} - -`WITH FILL` 句を使用してこれらのギャップを埋めることができます。 -ギャップを埋めるための `STEP` も指定します。これは `DateTime` 型の場合、デフォルトで1秒ですが、100msの間隔を埋めたいので、ステップ値として100msの間隔を設定します。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -STEP toIntervalMillisecond(100); -``` - -```response -┌──────────────────bucket─┬─count─┐ -│ 2023-03-24 00:24:03.600 │ 1 │ -│ 2023-03-24 00:24:03.700 │ 0 │ -│ 2023-03-24 00:24:03.800 │ 1 │ -│ 2023-03-24 00:24:03.900 │ 1 │ -│ 2023-03-24 00:24:04.000 │ 0 │ -│ 2023-03-24 00:24:04.100 │ 0 │ -│ 2023-03-24 00:24:04.200 │ 0 │ -│ 2023-03-24 00:24:04.300 │ 1 │ -│ 2023-03-24 00:24:04.400 │ 1 │ -│ 2023-03-24 00:24:04.500 │ 0 │ -│ 2023-03-24 00:24:04.600 │ 0 │ -│ 2023-03-24 00:24:04.700 │ 0 │ -│ 2023-03-24 00:24:04.800 │ 1 │ -└─────────────────────────┴───────┘ -``` - -ギャップが `count` 列の0の値で埋められたことが確認できます。 - -## WITH FILL...FROM {#with-fillfrom} - -しかし、時間範囲の最初にもギャップが残っています。これを `FROM` を指定することで修正できます。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -FROM toDateTime64({start:String}, 3) -STEP toIntervalMillisecond(100); -``` - -```response -┌──────────────────bucket─┬─count─┐ -│ 2023-03-24 00:24:03.000 │ 0 │ -│ 2023-03-24 00:24:03.100 │ 0 │ -│ 2023-03-24 00:24:03.200 │ 0 │ -│ 2023-03-24 00:24:03.300 │ 0 │ -│ 2023-03-24 00:24:03.400 │ 0 │ -│ 2023-03-24 00:24:03.500 │ 0 │ -│ 2023-03-24 00:24:03.600 │ 1 │ -│ 2023-03-24 00:24:03.700 │ 0 │ -│ 2023-03-24 00:24:03.800 │ 1 │ -│ 2023-03-24 00:24:03.900 │ 1 │ -│ 2023-03-24 00:24:04.000 │ 0 │ -│ 2023-03-24 00:24:04.100 │ 0 │ -│ 2023-03-24 00:24:04.200 │ 0 │ -│ 2023-03-24 00:24:04.300 │ 1 │ -│ 2023-03-24 00:24:04.400 │ 1 │ -│ 2023-03-24 00:24:04.500 │ 0 │ -│ 2023-03-24 00:24:04.600 │ 0 │ -│ 2023-03-24 00:24:04.700 │ 0 │ -│ 2023-03-24 00:24:04.800 │ 1 │ -└─────────────────────────┴───────┘ -``` - -結果から、`00:24:03.000`から`00:24:03.500`までのバケットが全て表示されることが確認できます。 - -## WITH FILL...TO {#with-fillto} - -しかし、時間範囲の終わりにもいくつかのバケットが欠けています。これを `TO` 値を提供することで埋めることができます。 -`TO` は含まれないので、終了時間に少し追加してそれが含まれるようにします。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -FROM toDateTime64({start:String}, 3) -TO toDateTime64({end:String}, 3) + INTERVAL 1 millisecond -STEP toIntervalMillisecond(100); -``` - -```response -┌──────────────────bucket─┬─count─┐ -│ 2023-03-24 00:24:03.000 │ 0 │ -│ 2023-03-24 00:24:03.100 │ 0 │ -│ 2023-03-24 00:24:03.200 │ 0 │ -│ 2023-03-24 00:24:03.300 │ 0 │ -│ 2023-03-24 00:24:03.400 │ 0 │ -│ 2023-03-24 00:24:03.500 │ 0 │ -│ 2023-03-24 00:24:03.600 │ 1 │ -│ 2023-03-24 00:24:03.700 │ 0 │ -│ 2023-03-24 00:24:03.800 │ 1 │ -│ 2023-03-24 00:24:03.900 │ 1 │ -│ 2023-03-24 00:24:04.000 │ 0 │ -│ 2023-03-24 00:24:04.100 │ 0 │ -│ 2023-03-24 00:24:04.200 │ 0 │ -│ 2023-03-24 00:24:04.300 │ 1 │ -│ 2023-03-24 00:24:04.400 │ 1 │ -│ 2023-03-24 00:24:04.500 │ 0 │ -│ 2023-03-24 00:24:04.600 │ 0 │ -│ 2023-03-24 00:24:04.700 │ 0 │ -│ 2023-03-24 00:24:04.800 │ 1 │ -│ 2023-03-24 00:24:04.900 │ 0 │ -│ 2023-03-24 00:24:05.000 │ 0 │ -└─────────────────────────┴───────┘ -``` - -ギャップがすべて埋まり、`00:24:03.000`から`00:24:05.000`までの各100msにエントリーがあることが確認できます。 - -## 累積カウント {#cumulative-count} - -次に、バケット内で作成された画像の数を累積カウントで保持したいとします。 -以下のように `cumulative` 列を追加することでこれを実現できます。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count, - sum(count) OVER (ORDER BY bucket) AS cumulative -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -FROM toDateTime64({start:String}, 3) -TO toDateTime64({end:String}, 3) + INTERVAL 1 millisecond -STEP toIntervalMillisecond(100); -``` - -```response -┌──────────────────bucket─┬─count─┬─cumulative─┐ -│ 2023-03-24 00:24:03.000 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.100 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.200 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.300 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.400 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.500 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.600 │ 1 │ 1 │ -│ 2023-03-24 00:24:03.700 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.800 │ 1 │ 2 │ -│ 2023-03-24 00:24:03.900 │ 1 │ 3 │ -│ 2023-03-24 00:24:04.000 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.100 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.200 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.300 │ 1 │ 4 │ -│ 2023-03-24 00:24:04.400 │ 1 │ 5 │ -│ 2023-03-24 00:24:04.500 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.600 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.700 │ 0 │ 0 │ -│ 2023-03-24 00:24:04.800 │ 1 │ 6 │ -│ 2023-03-24 00:24:04.900 │ 0 │ 0 │ -│ 2023-03-24 00:24:05.000 │ 0 │ 0 │ -└─────────────────────────┴───────┴────────────┘ -``` - -累積列の値は、私たちが望むようには動作していません。 - -## WITH FILL...INTERPOLATE {#with-fillinterpolate} - -`count` 列に `0` がある行は、累積列にも `0` があり、むしろ累積列の前の値を使用してほしいです。 -これを `INTERPOLATE` 句を使用することで実現できます。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count, - sum(count) OVER (ORDER BY bucket) AS cumulative -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -FROM toDateTime64({start:String}, 3) -TO toDateTime64({end:String}, 3) + INTERVAL 100 millisecond -STEP toIntervalMillisecond(100) -INTERPOLATE (cumulative); -``` - -```response -┌──────────────────bucket─┬─count─┬─cumulative─┐ -│ 2023-03-24 00:24:03.000 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.100 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.200 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.300 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.400 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.500 │ 0 │ 0 │ -│ 2023-03-24 00:24:03.600 │ 1 │ 1 │ -│ 2023-03-24 00:24:03.700 │ 0 │ 1 │ -│ 2023-03-24 00:24:03.800 │ 1 │ 2 │ -│ 2023-03-24 00:24:03.900 │ 1 │ 3 │ -│ 2023-03-24 00:24:04.000 │ 0 │ 3 │ -│ 2023-03-24 00:24:04.100 │ 0 │ 3 │ -│ 2023-03-24 00:24:04.200 │ 0 │ 3 │ -│ 2023-03-24 00:24:04.300 │ 1 │ 4 │ -│ 2023-03-24 00:24:04.400 │ 1 │ 5 │ -│ 2023-03-24 00:24:04.500 │ 0 │ 5 │ -│ 2023-03-24 00:24:04.600 │ 0 │ 5 │ -│ 2023-03-24 00:24:04.700 │ 0 │ 5 │ -│ 2023-03-24 00:24:04.800 │ 1 │ 6 │ -│ 2023-03-24 00:24:04.900 │ 0 │ 6 │ -│ 2023-03-24 00:24:05.000 │ 0 │ 6 │ -└─────────────────────────┴───────┴────────────┘ -``` - -これでずっと良くなりました。 -最後に、`bar` 関数を使ってバーチャートを追加し、`INTERPOLATE` 句に新しい列を追加することを忘れないようにしましょう。 - -```sql -SELECT - toStartOfInterval(timestamp, toIntervalMillisecond(100)) AS bucket, - count() AS count, - sum(count) OVER (ORDER BY bucket) AS cumulative, - bar(cumulative, 0, 10, 10) AS barChart -FROM MidJourney.images -WHERE (timestamp >= {start:String}) AND (timestamp <= {end:String}) -GROUP BY ALL -ORDER BY bucket ASC -WITH FILL -FROM toDateTime64({start:String}, 3) -TO toDateTime64({end:String}, 3) + INTERVAL 100 millisecond -STEP toIntervalMillisecond(100) -INTERPOLATE (cumulative, barChart); -``` - -```response -┌──────────────────bucket─┬─count─┬─cumulative─┬─barChart─┐ -│ 2023-03-24 00:24:03.000 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.100 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.200 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.300 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.400 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.500 │ 0 │ 0 │ │ -│ 2023-03-24 00:24:03.600 │ 1 │ 1 │ █ │ -│ 2023-03-24 00:24:03.700 │ 0 │ 1 │ █ │ -│ 2023-03-24 00:24:03.800 │ 1 │ 2 │ ██ │ -│ 2023-03-24 00:24:03.900 │ 1 │ 3 │ ███ │ -│ 2023-03-24 00:24:04.000 │ 0 │ 3 │ ███ │ -│ 2023-03-24 00:24:04.100 │ 0 │ 3 │ ███ │ -│ 2023-03-24 00:24:04.200 │ 0 │ 3 │ ███ │ -│ 2023-03-24 00:24:04.300 │ 1 │ 4 │ ████ │ -│ 2023-03-24 00:24:04.400 │ 1 │ 5 │ █████ │ -│ 2023-03-24 00:24:04.500 │ 0 │ 5 │ █████ │ -│ 2023-03-24 00:24:04.600 │ 0 │ 5 │ █████ │ -│ 2023-03-24 00:24:04.700 │ 0 │ 5 │ █████ │ -│ 2023-03-24 00:24:04.800 │ 1 │ 6 │ ██████ │ -│ 2023-03-24 00:24:04.900 │ 0 │ 6 │ ██████ │ -│ 2023-03-24 00:24:05.000 │ 0 │ 6 │ ██████ │ -└─────────────────────────┴───────┴────────────┴──────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md.hash deleted file mode 100644 index 0857adebfe3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/time-series-filling-gaps.md.hash +++ /dev/null @@ -1 +0,0 @@ -d4b1d30d7fa81654 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md deleted file mode 100644 index 03b0ccea6b7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -slug: '/guides/developer/ttl' -sidebar_label: 'TTL (Time To Live)' -sidebar_position: 2 -keywords: -- 'ttl' -- 'time to live' -- 'clickhouse' -- 'old' -- 'data' -description: 'TTL (time-to-live) は、一定の時間が経過した後、行または列を移動、削除、またはロールアップする機能を指します。' -title: 'Manage Data with TTL (Time-to-live)' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# TTL(有効期限)によるデータ管理 - -## TTLの概要 {#overview-of-ttl} - -TTL(time-to-live)は、特定の時間が経過した後に行やカラムを移動、削除、または集約する機能を指します。「time-to-live」という表現は、古いデータの削除にのみ適用されるように聞こえますが、TTLにはいくつかのユースケースがあります: - -- 古いデータの削除:驚くことではありませんが、指定された時間が経過した後に行やカラムを削除できます。 -- ディスク間のデータ移動:一定時間が経過した後に、ストレージボリューム間でデータを移動できます - ホット/ウォーム/コールドアーキテクチャを展開するのに便利です。 -- データの集約:古いデータを削除する前に、さまざまな役立つ集約や計算に集約できます。 - -:::note -TTLは、テーブル全体または特定のカラムに適用できます。 -::: - -## TTL構文 {#ttl-syntax} - -`TTL`句は、カラム定義の後やテーブル定義の最後に出現することができます。時間の長さを定義するために`INTERVAL`句を使用します(データ型は`Date`または`DateTime`である必要があります)。例えば、以下のテーブルは、`TTL`句を持つ2つのカラムを持っています: - -```sql -CREATE TABLE example1 ( - timestamp DateTime, - x UInt32 TTL timestamp + INTERVAL 1 MONTH, - y String TTL timestamp + INTERVAL 1 DAY, - z String -) -ENGINE = MergeTree -ORDER BY tuple() -``` - -- xカラムはtimestampカラムから1か月の有効期限があります。 -- yカラムはtimestampカラムから1日の有効期限があります。 -- インターバルが経過すると、カラムは期限切れになります。ClickHouseは、そのデータ型のデフォルト値でカラムの値を置き換えます。データ部分内の全てのカラム値が期限切れになると、ClickHouseはファイルシステムからこのカラムを削除します。 - -:::note -TTLルールは変更または削除できます。詳細は[テーブルTTLの操作](/sql-reference/statements/alter/ttl.md)ページを参照してください。 -::: - -## TTLイベントのトリガー {#triggering-ttl-events} - -期限切れの行の削除または集約は即時には行われず、テーブルのマージ中のみ発生します。テーブルがアクティブにマージされていない場合(何らかの理由で)、TTLイベントをトリガーする2つの設定があります: - -- `merge_with_ttl_timeout`:削除TTLでマージを再実行する前の最小遅延(秒)。デフォルトは14400秒(4時間)です。 -- `merge_with_recompression_ttl_timeout`:再圧縮TTL(削除前にデータを集約するルール)でマージを再実行する前の最小遅延(秒)。デフォルト値:14400秒(4時間)。 - -したがって、デフォルトでは、あなたのTTLルールは少なくとも4時間ごとにテーブルに適用されます。TTLルールをより頻繁に適用したい場合は、上記の設定を変更してください。 - -:::note -あまり良い解決策ではありませんが(また、頻繁には使用することを推奨しません)、`OPTIMIZE`を使用してマージを強制することもできます: - -```sql -OPTIMIZE TABLE example1 FINAL -``` - -`OPTIMIZE`は、テーブルのパーツの予定外のマージを初期化し、`FINAL`はテーブルがすでに単一のパーツである場合に再最適化を強制します。 -::: - -## 行の削除 {#removing-rows} - -特定の時間が経過した後にテーブルから全行を削除するには、テーブルレベルでTTLルールを定義します: - -```sql -CREATE TABLE customers ( -timestamp DateTime, -name String, -balance Int32, -address String -) -ENGINE = MergeTree -ORDER BY timestamp -TTL timestamp + INTERVAL 12 HOUR -``` - -さらに、レコードの値に基づいてTTLルールを定義することも可能です。これは、WHERE条件を指定することで簡単に実装できます。複数の条件が許可されています: - -```sql -CREATE TABLE events -( - `event` String, - `time` DateTime, - `value` UInt64 -) -ENGINE = MergeTree -ORDER BY (event, time) -TTL time + INTERVAL 1 MONTH DELETE WHERE event != 'error', - time + INTERVAL 6 MONTH DELETE WHERE event = 'error' -``` - -## カラムの削除 {#removing-columns} - -全行を削除するのではなく、バランスとアドレスのカラムだけを期限切れにしたいとします。`customers`テーブルを修正して、両方のカラムのTTLを2時間に設定しましょう: - -```sql -ALTER TABLE customers -MODIFY COLUMN balance Int32 TTL timestamp + INTERVAL 2 HOUR, -MODIFY COLUMN address String TTL timestamp + INTERVAL 2 HOUR -``` - -## ロールアップの実装 {#implementing-a-rollup} - -特定の時間が経過した後に行を削除したいが、報告目的のために一部のデータを保持したいとします。すべての詳細を必要とせず、過去のデータの集約結果をいくつか保持したい場合、`TTL`表現に`GROUP BY`句を追加し、集約結果を保存するためのカラムをテーブルに追加することで実装できます。 - -以下の`hits`テーブルでは、古い行を削除したいが、行を削除する前に`hits`カラムの合計と最大値を保持したいとします。それらの値を保存するフィールドが必要で、合計と最大値をロールアップする`TTL`句に`GROUP BY`句を追加する必要があります。 - -```sql -CREATE TABLE hits ( - timestamp DateTime, - id String, - hits Int32, - max_hits Int32 DEFAULT hits, - sum_hits Int64 DEFAULT hits -) -ENGINE = MergeTree -PRIMARY KEY (id, toStartOfDay(timestamp), timestamp) -TTL timestamp + INTERVAL 1 DAY - GROUP BY id, toStartOfDay(timestamp) - SET - max_hits = max(max_hits), - sum_hits = sum(sum_hits); -``` - -`hits`テーブルに関するいくつかの注意事項: - -- `TTL`句の`GROUP BY`カラムは`PRIMARY KEY`の接頭辞でなければならず、日付の開始時刻で結果をグループ化したいと考えています。したがって、`toStartOfDay(timestamp)`が主キーに追加されました。 -- 集約結果を保存するために、`max_hits`と`sum_hits`という2つのフィールドを追加しました。 -- `max_hits`と`sum_hits`のデフォルト値を`hits`に設定することは、`SET`句の定義に基づいて、私たちのロジックが機能するために必要です。 - -## ホット/ウォーム/コールドアーキテクチャの実装 {#implementing-a-hotwarmcold-architecture} - - - -:::note -ClickHouse Cloudを使用している場合、レッスン内の手順は適用されません。ClickHouse Cloudで古いデータを移動することを心配する必要はありません。 -::: - -大量のデータを扱う際の一般的な慣行は、データが古くなるにつれてそのデータを移動することです。ここでは、ClickHouseの`TTL`コマンドの`TO DISK`および`TO VOLUME`句を使用してホット/ウォーム/コールドアーキテクチャを実装する手順を示します。(ちなみに、ホットとコールドのことをしなくても構いません - あなたのユースケースに合わせてデータを移動するためにTTLを使用できます。) - -1. `TO DISK`および`TO VOLUME`オプションは、ClickHouseの設定ファイルで定義されたディスクまたはボリュームの名前を指します。ディスクを定義し、それを使用するボリュームを定義する新しいファイルを`my_system.xml`という名前で作成します(ファイル名は何でも構いません)。XMLファイルを`/etc/clickhouse-server/config.d/`に置いて、設定をシステムに適用します: - -```xml - - - - - - - ./hot/ - - - ./warm/ - - - ./cold/ - - - - - - - default - - - hot_disk - - - warm_disk - - - cold_disk - - - - - - -``` - -2. 上記の設定は、ClickHouseが読み取りおよび書き込みを行うことができるフォルダを指す3つのディスクを指します。ボリュームは1つまたはそれ以上のディスクを含むことができ、私たちはそれぞれの3つのディスク用のボリュームを定義しました。ディスクを表示してみましょう: - -```sql -SELECT name, path, free_space, total_space -FROM system.disks -``` - -```response -┌─name────────┬─path───────────┬───free_space─┬──total_space─┐ -│ cold_disk │ ./data/cold/ │ 179143311360 │ 494384795648 │ -│ default │ ./ │ 179143311360 │ 494384795648 │ -│ hot_disk │ ./data/hot/ │ 179143311360 │ 494384795648 │ -│ warm_disk │ ./data/warm/ │ 179143311360 │ 494384795648 │ -└─────────────┴────────────────┴──────────────┴──────────────┘ -``` - -3. そして...ボリュームを確認します: - -```sql -SELECT - volume_name, - disks -FROM system.storage_policies -``` - -```response -┌─volume_name─┬─disks─────────┐ -│ default │ ['default'] │ -│ hot_volume │ ['hot_disk'] │ -│ warm_volume │ ['warm_disk'] │ -│ cold_volume │ ['cold_disk'] │ -└─────────────┴───────────────┘ -``` - -4. 次に、ホット、ウォーム、コールドボリューム間でデータを移動する`TTL`ルールを追加します: - -```sql -ALTER TABLE my_table - MODIFY TTL - trade_date TO VOLUME 'hot_volume', - trade_date + INTERVAL 2 YEAR TO VOLUME 'warm_volume', - trade_date + INTERVAL 4 YEAR TO VOLUME 'cold_volume'; -``` - -5. 新しい`TTL`ルールを具現化させる必要がありますが、強制することで確認できます: - -```sql -ALTER TABLE my_table - MATERIALIZE TTL -``` - -6. `system.parts`テーブルを使用して、データが期待されるディスクに移動したかどうかを確認します: - -```sql -crypto_pricesテーブルのパーツがどのディスクにあるかをsystem.partsテーブルで確認する: - -SELECT - name, - disk_name -FROM system.parts -WHERE (table = 'my_table') AND (active = 1) -``` - -レスポンスは以下のようになります: - -```response -┌─name────────┬─disk_name─┐ -│ all_1_3_1_5 │ warm_disk │ -│ all_2_2_0 │ hot_disk │ -└─────────────┴───────────┘ -``` - -## 関連コンテンツ {#related-content} - -- ブログ & ウェビナー: [ClickHouseでデータライフサイクルを管理するためのTTLの使用](https://clickhouse.com/blog/using-ttl-to-manage-data-lifecycles-in-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md.hash deleted file mode 100644 index 38203726f0f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/ttl.md.hash +++ /dev/null @@ -1 +0,0 @@ -b1cc38d63a21e481 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md deleted file mode 100644 index 5c38a7e78fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md +++ /dev/null @@ -1,442 +0,0 @@ ---- -slug: '/guides/developer/understanding-query-execution-with-the-analyzer' -sidebar_label: 'Understanding Query Execution with the Analyzer' -title: 'Understanding Query Execution with the Analyzer' -description: 'Describes how you can use the analyzer to understand how ClickHouse - executes your queries' ---- - -import analyzer1 from '@site/static/images/guides/developer/analyzer1.png'; -import analyzer2 from '@site/static/images/guides/developer/analyzer2.png'; -import analyzer3 from '@site/static/images/guides/developer/analyzer3.png'; -import analyzer4 from '@site/static/images/guides/developer/analyzer4.png'; -import analyzer5 from '@site/static/images/guides/developer/analyzer5.png'; -import Image from '@theme/IdealImage'; - - -# クエリ実行の理解とアナライザー - -ClickHouseはクエリを非常に迅速に処理しますが、クエリの実行は単純なプロセスではありません。`SELECT` クエリがどのように実行されるかを理解してみましょう。その説明にあたり、ClickHouseのテーブルにいくつかのデータを追加してみます。 - -```sql -CREATE TABLE session_events( - clientId UUID, - sessionId UUID, - pageId UUID, - timestamp DateTime, - type String -) ORDER BY (timestamp); - -INSERT INTO session_events SELECT * FROM generateRandom('clientId UUID, - sessionId UUID, - pageId UUID, - timestamp DateTime, - type Enum(\'type1\', \'type2\')', 1, 10, 2) LIMIT 1000; -``` - -ClickHouseにデータが追加されたので、いくつかのクエリを実行し、実行の理解を深めたいと思います。クエリの実行は多くのステップに分解されます。各クエリの実行ステップは、対応する `EXPLAIN` クエリを使用して分析およびトラブルシューティングできます。これらのステップは以下のチャートに要約されています: - - - -クエリ実行時に各エンティティがどのように動作するかを見ていきましょう。いくつかのクエリを取り上げ、それらを `EXPLAIN` ステートメントを使って確認します。 - -## パーサー {#parser} - -パーサーの目標は、クエリテキストをAST(抽象構文木)に変換することです。このステップは、`EXPLAIN AST` を使用して視覚化できます: - -```sql -EXPLAIN AST SELECT min(timestamp), max(timestamp) FROM session_events; - -┌─explain────────────────────────────────────────────┐ -│ SelectWithUnionQuery (children 1) │ -│ ExpressionList (children 1) │ -│ SelectQuery (children 2) │ -│ ExpressionList (children 2) │ -│ Function min (alias minimum_date) (children 1) │ -│ ExpressionList (children 1) │ -│ Identifier timestamp │ -│ Function max (alias maximum_date) (children 1) │ -│ ExpressionList (children 1) │ -│ Identifier timestamp │ -│ TablesInSelectQuery (children 1) │ -│ TablesInSelectQueryElement (children 1) │ -│ TableExpression (children 1) │ -│ TableIdentifier session_events │ -└────────────────────────────────────────────────────┘ -``` - -出力は、以下のように視覚化できる抽象構文木です: - - - -各ノードには対応する子ノードがあり、全体の木構造はクエリの全体的な構造を表しています。これはクエリを処理するための論理構造です。エンドユーザーの視点から見ると(クエリ実行に興味がない限り)あまり役立ちません。このツールは主に開発者が使用します。 - -## アナライザー {#analyzer} - -ClickHouseには現在アナライザーの2つのアーキテクチャがあります。`enable_analyzer=0` を設定することで旧アーキテクチャを使用できます。新しいアーキテクチャはデフォルトで有効になっています。ここでは、旧アーキテクチャが新しいアナライザーが一般に利用可能になると廃止されることを考慮して、新しいアーキテクチャのみを説明します。 - -:::note -新しいアーキテクチャはClickHouseのパフォーマンスを改善するためのより良いフレームワークを提供します。しかし、クエリ処理ステップの基本的な要素であるため、一部のクエリに負の影響を与える可能性もあり、[既知の非互換性](/operations/analyzer#known-incompatibilities)があります。クエリまたはユーザーレベルで `enable_analyzer` 設定を変更することで、旧アナライザーに戻ることができます。 -::: - -アナライザーはクエリ実行の重要なステップです。ASTを受け取り、それをクエリツリーに変換します。ASTに対するクエリツリーの主な利点は、多くのコンポーネントが解決されていることです。たとえば、読み取るテーブルの情報やエイリアスも解決され、使用される異なるデータ型がツリーに知られています。これらの利点により、アナライザーは最適化を適用できます。これらの最適化は「パス」によって機能します。各パスは異なる最適化を探します。すべてのパスは[こちら](https://github.com/ClickHouse/ClickHouse/blob/76578ebf92af3be917cd2e0e17fea2965716d958/src/Analyzer/QueryTreePassManager.cpp#L249)で確認できます。前述のクエリを実際に見てみましょう: - -```sql -EXPLAIN QUERY TREE passes=0 SELECT min(timestamp) AS minimum_date, max(timestamp) AS maximum_date FROM session_events SETTINGS allow_experimental_analyzer=1; - -┌─explain────────────────────────────────────────────────────────────────────────────────┐ -│ QUERY id: 0 │ -│ PROJECTION │ -│ LIST id: 1, nodes: 2 │ -│ FUNCTION id: 2, alias: minimum_date, function_name: min, function_type: ordinary │ -│ ARGUMENTS │ -│ LIST id: 3, nodes: 1 │ -│ IDENTIFIER id: 4, identifier: timestamp │ -│ FUNCTION id: 5, alias: maximum_date, function_name: max, function_type: ordinary │ -│ ARGUMENTS │ -│ LIST id: 6, nodes: 1 │ -│ IDENTIFIER id: 7, identifier: timestamp │ -│ JOIN TREE │ -│ IDENTIFIER id: 8, identifier: session_events │ -│ SETTINGS allow_experimental_analyzer=1 │ -└────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -```sql -EXPLAIN QUERY TREE passes=20 SELECT min(timestamp) AS minimum_date, max(timestamp) AS maximum_date FROM session_events SETTINGS allow_experimental_analyzer=1; - -┌─explain───────────────────────────────────────────────────────────────────────────────────┐ -│ QUERY id: 0 │ -│ PROJECTION COLUMNS │ -│ minimum_date DateTime │ -│ maximum_date DateTime │ -│ PROJECTION │ -│ LIST id: 1, nodes: 2 │ -│ FUNCTION id: 2, function_name: min, function_type: aggregate, result_type: DateTime │ -│ ARGUMENTS │ -│ LIST id: 3, nodes: 1 │ -│ COLUMN id: 4, column_name: timestamp, result_type: DateTime, source_id: 5 │ -│ FUNCTION id: 6, function_name: max, function_type: aggregate, result_type: DateTime │ -│ ARGUMENTS │ -│ LIST id: 7, nodes: 1 │ -│ COLUMN id: 4, column_name: timestamp, result_type: DateTime, source_id: 5 │ -│ JOIN TREE │ -│ TABLE id: 5, alias: __table1, table_name: default.session_events │ -│ SETTINGS allow_experimental_analyzer=1 │ -└───────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -2つの実行間で、エイリアスとプロジェクションの解決を見ることができます。 - -## プランナー {#planner} - -プランナーはクエリツリーを受け取り、そこからクエリプランを構築します。クエリツリーは特定のクエリを何をしたいかを教えてくれ、クエリプランはそれをどのように行うかを示します。クエリプランの一環として追加の最適化が行われます。クエリプランを見るには `EXPLAIN PLAN` または `EXPLAIN` を使用できます(`EXPLAIN` は `EXPLAIN PLAN` を実行します)。 - -```sql -EXPLAIN PLAN WITH - ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT type, min(timestamp) AS minimum_date, max(timestamp) AS maximum_date, count(*) /total_rows * 100 AS percentage FROM session_events GROUP BY type; - -┌─explain──────────────────────────────────────────┐ -│ Expression ((Projection + Before ORDER BY)) │ -│ Aggregating │ -│ Expression (Before GROUP BY) │ -│ ReadFromMergeTree (default.session_events) │ -└──────────────────────────────────────────────────┘ -``` - -この情報は提供されますが、さらに得たい情報があるかもしれません。例えば、プロジェクションが必要な列名を知りたい場合、クエリにヘッダーを追加できます: - -```SQL -EXPLAIN header = 1 -WITH ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT - type, - min(timestamp) AS minimum_date, - max(timestamp) AS maximum_date, - (count(*) / total_rows) * 100 AS percentage -FROM session_events -GROUP BY type; - -┌─explain──────────────────────────────────────────┐ -│ Expression ((Projection + Before ORDER BY)) │ -│ Header: type String │ -│ minimum_date DateTime │ -│ maximum_date DateTime │ -│ percentage Nullable(Float64) │ -│ Aggregating │ -│ Header: type String │ -│ min(timestamp) DateTime │ -│ max(timestamp) DateTime │ -│ count() UInt64 │ -│ Expression (Before GROUP BY) │ -│ Header: timestamp DateTime │ -│ type String │ -│ ReadFromMergeTree (default.session_events) │ -│ Header: timestamp DateTime │ -│ type String │ -└──────────────────────────────────────────────────┘ -``` - -これで、最後のプロジェクション(`minimum_date`、`maximum_date`、および `percentage`)のために作成する必要がある列名がわかります。しかし、実行する必要があるすべてのアクションの詳細も知りたいかもしれません。`actions=1` を設定することで実現できます。 - -```sql -EXPLAIN actions = 1 -WITH ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT - type, - min(timestamp) AS minimum_date, - max(timestamp) AS maximum_date, - (count(*) / total_rows) * 100 AS percentage -FROM session_events -GROUP BY type; - -┌─explain────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ Expression ((Projection + Before ORDER BY)) │ -│ Actions: INPUT :: 0 -> type String : 0 │ -│ INPUT : 1 -> min(timestamp) DateTime : 1 │ -│ INPUT : 2 -> max(timestamp) DateTime : 2 │ -│ INPUT : 3 -> count() UInt64 : 3 │ -│ COLUMN Const(Nullable(UInt64)) -> total_rows Nullable(UInt64) : 4 │ -│ COLUMN Const(UInt8) -> 100 UInt8 : 5 │ -│ ALIAS min(timestamp) :: 1 -> minimum_date DateTime : 6 │ -│ ALIAS max(timestamp) :: 2 -> maximum_date DateTime : 1 │ -│ FUNCTION divide(count() :: 3, total_rows :: 4) -> divide(count(), total_rows) Nullable(Float64) : 2 │ -│ FUNCTION multiply(divide(count() :: 3, total_rows :: 4) :: 2, 100 :: 5) -> multiply(divide(count(), total_rows), 100) Nullable(Float64) : 4 │ -│ ALIAS multiply(divide(count(), total_rows), 100) :: 4 -> percentage Nullable(Float64) : 5 │ -│ Positions: 0 6 1 5 │ -│ Aggregating │ -│ Keys: type │ -│ Aggregates: │ -│ min(timestamp) │ -│ Function: min(DateTime) → DateTime │ -│ Arguments: timestamp │ -│ max(timestamp) │ -│ Function: max(DateTime) → DateTime │ -│ Arguments: timestamp │ -│ count() │ -│ Function: count() → UInt64 │ -│ Arguments: none │ -│ Skip merging: 0 │ -│ Expression (Before GROUP BY) │ -│ Actions: INPUT :: 0 -> timestamp DateTime : 0 │ -│ INPUT :: 1 -> type String : 1 │ -│ Positions: 0 1 │ -│ ReadFromMergeTree (default.session_events) │ -│ ReadType: Default │ -│ Parts: 1 │ -│ Granules: 1 │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -これで、使用されているすべての入力、関数、エイリアス、およびデータ型を確認できます。プランナーが適用する最適化の一部は[こちら](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/QueryPlan/Optimizations/Optimizations.h)で見ることができます。 - -## クエリパイプライン {#query-pipeline} - -クエリパイプラインはクエリプランから生成されます。クエリパイプラインはクエリプランと非常に似ていますが、木構造ではなくグラフです。ClickHouseがクエリをどのように実行し、どのリソースが使用されるかを明示します。クエリパイプラインを分析することは、入力/出力の観点でボトルネックを確認するために非常に役立ちます。前述のクエリを取り上げ、クエリパイプラインの実行を見てみましょう: - -```sql -EXPLAIN PIPELINE -WITH ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT - type, - min(timestamp) AS minimum_date, - max(timestamp) AS maximum_date, - (count(*) / total_rows) * 100 AS percentage -FROM session_events -GROUP BY type; - -┌─explain────────────────────────────────────────────────────────────────────┐ -│ (Expression) │ -│ ExpressionTransform × 2 │ -│ (Aggregating) │ -│ Resize 1 → 2 │ -│ AggregatingTransform │ -│ (Expression) │ -│ ExpressionTransform │ -│ (ReadFromMergeTree) │ -│ MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread) 0 → 1 │ -└────────────────────────────────────────────────────────────────────────────┘ -``` - -括弧内はクエリプランステップであり、その隣にプロセッサがあります。これは優れた情報ですが、これはグラフであるため、グラフとして視覚化すると良いでしょう。`graph`設定を1にして、出力フォーマットをTSVに指定することができます: - -```sql -EXPLAIN PIPELINE graph=1 WITH - ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT type, min(timestamp) AS minimum_date, max(timestamp) AS maximum_date, count(*) /total_rows * 100 AS percentage FROM session_events GROUP BY type FORMAT TSV; -``` - -```response -digraph -{ - rankdir="LR"; - { node [shape = rect] - subgraph cluster_0 { - label ="Expression"; - style=filled; - color=lightgrey; - node [style=filled,color=white]; - { rank = same; - n5 [label="ExpressionTransform × 2"]; - } - } - subgraph cluster_1 { - label ="Aggregating"; - style=filled; - color=lightgrey; - node [style=filled,color=white]; - { rank = same; - n3 [label="AggregatingTransform"]; - n4 [label="Resize"]; - } - } - subgraph cluster_2 { - label ="Expression"; - style=filled; - color=lightgrey; - node [style=filled,color=white]; - { rank = same; - n2 [label="ExpressionTransform"]; - } - } - subgraph cluster_3 { - label ="ReadFromMergeTree"; - style=filled; - color=lightgrey; - node [style=filled,color=white]; - { rank = same; - n1 [label="MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread)"]; - } - } - } - n3 -> n4 [label=""]; - n4 -> n5 [label="× 2"]; - n2 -> n3 [label=""]; - n1 -> n2 [label=""]; -} -``` - -この出力をコピーして、[こちら](https://dreampuf.github.io/GraphvizOnline)に貼り付けると、以下のグラフが生成されます: - - - -白い長方形はパイプラインノードに対応し、灰色の長方形はクエリプランステップに対応し、`x`の後に続く数字は使用される入力/出力の数に対応します。コンパクトな形式で表示したくない場合は、`compact=0`を追加できます。 - -```sql -EXPLAIN PIPELINE graph = 1, compact = 0 -WITH ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT - type, - min(timestamp) AS minimum_date, - max(timestamp) AS maximum_date, - (count(*) / total_rows) * 100 AS percentage -FROM session_events -GROUP BY type -FORMAT TSV; -``` - -```response -digraph -{ - rankdir="LR"; - { node [shape = rect] - n0[label="MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread)"]; - n1[label="ExpressionTransform"]; - n2[label="AggregatingTransform"]; - n3[label="Resize"]; - n4[label="ExpressionTransform"]; - n5[label="ExpressionTransform"]; - } - n0 -> n1; - n1 -> n2; - n2 -> n3; - n3 -> n4; - n3 -> n5; -} -``` - - - -ClickHouseはなぜ複数のスレッドを使用してテーブルから読み取らないのでしょうか?テーブルにより多くのデータを追加してみましょう: - -```sql -INSERT INTO session_events SELECT * FROM generateRandom('clientId UUID, - sessionId UUID, - pageId UUID, - timestamp DateTime, - type Enum(\'type1\', \'type2\')', 1, 10, 2) LIMIT 1000000; -``` - -それでは、再度 `EXPLAIN` クエリを実行してみましょう: - -```sql -EXPLAIN PIPELINE graph = 1, compact = 0 -WITH ( - SELECT count(*) - FROM session_events - ) AS total_rows -SELECT - type, - min(timestamp) AS minimum_date, - max(timestamp) AS maximum_date, - (count(*) / total_rows) * 100 AS percentage -FROM session_events -GROUP BY type -FORMAT TSV; -``` - -```response -digraph -{ - rankdir="LR"; - { node [shape = rect] - n0[label="MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread)"]; - n1[label="MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread)"]; - n2[label="ExpressionTransform"]; - n3[label="ExpressionTransform"]; - n4[label="StrictResize"]; - n5[label="AggregatingTransform"]; - n6[label="AggregatingTransform"]; - n7[label="Resize"]; - n8[label="ExpressionTransform"]; - n9[label="ExpressionTransform"]; - } - n0 -> n2; - n1 -> n3; - n2 -> n4; - n3 -> n4; - n4 -> n5; - n4 -> n6; - n5 -> n7; - n6 -> n7; - n7 -> n8; - n7 -> n9; -} -``` - - - -このように、エグゼキュータはデータボリュームが十分に高くないため、操作を並列化しないことを決定しました。行を追加することで、エグゼキュータは複数のスレッドを使用することを決定しました、グラフに示されるように。 - -## エグゼキュータ {#executor} - -クエリ実行の最終ステップはエグゼキュータによって行われます。エグゼキュータはクエリパイプラインを受け取り、それを実行します。`SELECT`、`INSERT`、または `INSERT SELECT` を行うかどうかに応じて異なる種類のエグゼキュータがあります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md.hash deleted file mode 100644 index 31e3efec9a8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/developer/understanding-query-execution-with-the-analyzer.md.hash +++ /dev/null @@ -1 +0,0 @@ -e50dc2662ccd0d35 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md deleted file mode 100644 index 2ef73e53859..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/anyIf' -title: 'anyIf' -description: '使用例:anyIf コンビネーター' -keywords: -- 'any' -- 'if' -- 'combinator' -- 'examples' -- 'anyIf' -sidebar_label: 'anyIf' ---- - - - - -# anyIf {#avgif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネーターは、指定された条件に一致する特定のカラムから最初に遭遇した要素を選択するために、[`any`](/sql-reference/aggregate-functions/reference/any) 集約関数に適用できます。 - -## 使用例 {#example-usage} - -この例では、成功フラグを持つ売上データを保存するテーブルを作成し、`anyIf` を使用して、金額 200 より上、および下の最初の `transaction_id` を選択します。 - -まず、テーブルを作成し、データを挿入します: - -```sql title="クエリ" -CREATE TABLE sales( - transaction_id UInt32, - amount Decimal(10,2), - is_successful UInt8 -) -ENGINE = MergeTree() -ORDER BY tuple(); - -INSERT INTO sales VALUES - (1, 100.00, 1), - (2, 150.00, 1), - (3, 155.00, 0), - (4, 300.00, 1), - (5, 250.50, 0), - (6, 175.25, 1); -``` - -```sql -SELECT - anyIf(transaction_id, amount < 200) as tid_lt_200, - anyIf(transaction_id, amount > 200) as tid_gt_200 -FROM sales; -``` - -```response title="応答" -┌─tid_lt_200─┬─tid_gt_200─┐ -│ 1 │ 4 │ -└────────────┴────────────┘ -``` - -## 関連情報 {#see-also} -- [`any`](/sql-reference/aggregate-functions/reference/any) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md.hash deleted file mode 100644 index d42da56fb3a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/anyIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -eb027895ea1eec31 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md deleted file mode 100644 index 14e34d74310..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/argMaxIf' -title: 'argMaxIf' -description: 'argMaxIf combinatorの使用例' -keywords: -- 'argMax' -- 'if' -- 'combinator' -- 'examples' -- 'argMaxIf' -sidebar_label: 'argMaxIf' ---- - - - - -# argMaxIf {#argmaxif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネーターは、[`argMax`](/sql-reference/aggregate-functions/reference/argmax) 関数に適用して、条件が真である行の `val` の最大値に対応する `arg` の値を見つけるために、`argMaxIf` 集約コンビネータ関数を使用できます。 - -`argMaxIf` 関数は、特定の条件を満たす行のみのデータセット内の最大値に関連付けられた値を見つける必要があるときに便利です。 - -## 使用例 {#example-usage} - -この例では、製品販売のサンプルデータセットを使用して、`argMaxIf` の動作を説明します。販売数が10回以上の製品の中で、最も高い価格の製品名を見つけます。 - -```sql title="クエリ" -CREATE TABLE product_sales -( - product_name String, - price Decimal32(2), - sales_count UInt32 -) ENGINE = Memory; - -INSERT INTO product_sales VALUES - ('Laptop', 999.99, 10), - ('Phone', 499.99, 15), - ('Tablet', 299.99, 0), - ('Watch', 199.99, 5), - ('Headphones', 79.99, 20); - -SELECT argMaxIf(product_name, price, sales_count >= 10) as most_expensive_popular_product -FROM product_sales; -``` - -`argMaxIf` 関数は、販売数が10回以上のすべての製品の中で最も高い価格の製品名を返します(sales_count >= 10)。この場合、人気のある製品の中で最高価格(999.99)のため 'Laptop' を返します。 - -```response title="レスポンス" - ┌─most_expensi⋯lar_product─┐ -1. │ Laptop │ - └──────────────────────────┘ -``` - -## 参照 {#see-also} -- [`argMax`](/sql-reference/aggregate-functions/reference/argmax) -- [`argMin`](/sql-reference/aggregate-functions/reference/argmin) -- [`argMinIf`](/examples/aggregate-function-combinators/argMinIf) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md.hash deleted file mode 100644 index f0db3303370..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMaxIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -0fe6adcc7b594d77 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md deleted file mode 100644 index a0ffafce57f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/argMinIf' -title: 'argMinIf' -description: 'Example of using the argMinIf combinator' -keywords: -- 'argMin' -- 'if' -- 'combinator' -- 'examples' -- 'argMinIf' -sidebar_label: 'argMinIf' ---- - - - - -# argMinIf {#argminif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネータは、[`argMin`](/sql-reference/aggregate-functions/reference/argmin) 関数に適用して、条件が真である行について `val` の最小値に対応する `arg` の値を見つけるために使用されます。これは `argMinIf` 集約コンビネータ関数を使用して行います。 - -`argMinIf` 関数は、データセット内の最小値に関連付けられた値を見つける必要があるが、特定の条件を満たす行のみを考慮する場合に便利です。 - -## 使用例 {#example-usage} - -この例では、製品の価格とそのタイムスタンプを保存するテーブルを作成し、`argMinIf` を使用して、在庫があるときの各製品の最低価格を見つけます。 - -```sql title="クエリ" -CREATE TABLE product_prices( - product_id UInt32, - price Decimal(10,2), - timestamp DateTime, - in_stock UInt8 -) ENGINE = Log; - -INSERT INTO product_prices VALUES - (1, 10.99, '2024-01-01 10:00:00', 1), - (1, 9.99, '2024-01-01 10:05:00', 1), - (1, 11.99, '2024-01-01 10:10:00', 0), - (2, 20.99, '2024-01-01 11:00:00', 1), - (2, 19.99, '2024-01-01 11:05:00', 1), - (2, 21.99, '2024-01-01 11:10:00', 1); - -SELECT - product_id, - argMinIf(price, timestamp, in_stock = 1) as lowest_price_when_in_stock -FROM product_prices -GROUP BY product_id; -``` - -`argMinIf` 関数は、各製品の最も早いタイムスタンプに対応する価格を見つけますが、`in_stock = 1` の行のみを考慮します。例えば: -- 製品 1: 在庫がある行の中で、10.99 が最も早いタイムスタンプ(10:00:00)を持っています。 -- 製品 2: 在庫がある行の中で、20.99 が最も早いタイムスタンプ(11:00:00)を持っています。 - -```response title="レスポンス" - ┌─product_id─┬─lowest_price_when_in_stock─┐ -1. │ 1 │ 10.99 │ -2. │ 2 │ 20.99 │ - └────────────┴────────────────────────────┘ -``` - -## 関連項目 {#see-also} -- [`argMin`](/sql-reference/aggregate-functions/reference/argmin) -- [`argMax`](/sql-reference/aggregate-functions/reference/argmax) -- [`argMaxIf`](/examples/aggregate-function-combinators/argMaxIf) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md.hash deleted file mode 100644 index 99e23a9ab91..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/argMinIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -a6036cce7aa32370 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md deleted file mode 100644 index ebb01e05fb9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgIf' -title: 'avgIf' -description: 'avgIfコンビネータの使用例' -keywords: -- 'avg' -- 'if' -- 'combinator' -- 'examples' -- 'avgIf' -sidebar_label: 'avgIf' ---- - - - - -# avgIf {#avgif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネータは、[`avg`](/sql-reference/aggregate-functions/reference/avg) 関数に適用することで、条件が真である行の値の算術平均を計算するために `avgIf` 集約コンビネータ関数を使用できます。 - -## 例の使用法 {#example-usage} - -この例では、成功フラグを持つ販売データを格納するテーブルを作成し、`avgIf` を使用して成功したトランザクションの平均販売額を計算します。 - -```sql title="クエリ" -CREATE TABLE sales( - transaction_id UInt32, - amount Decimal(10,2), - is_successful UInt8 -) ENGINE = Log; - -INSERT INTO sales VALUES - (1, 100.50, 1), - (2, 200.75, 1), - (3, 150.25, 0), - (4, 300.00, 1), - (5, 250.50, 0), - (6, 175.25, 1); - -SELECT - avgIf(amount, is_successful = 1) as avg_successful_sale -FROM sales; -``` - -`avgIf` 関数は、`is_successful = 1` の行についてのみ平均額を計算します。 -この場合、金額は 100.50, 200.75, 300.00, 175.25 の平均を取ります。 - -```response title="応答" - ┌─avg_successful_sale─┐ -1. │ 193.88 │ - └─────────────────────┘ -``` - -## 参照 {#see-also} -- [`avg`](/sql-reference/aggregate-functions/reference/avg) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md.hash deleted file mode 100644 index 241c041a592..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -2c9f8d6a17142d4c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md deleted file mode 100644 index 66a9882adda..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgMap' -title: 'avgMap' -description: 'avgMap combinatorの使用例' -keywords: -- 'avg' -- 'map' -- 'combinator' -- 'examples' -- 'avgMap' -sidebar_label: 'avgMap' ---- - - - - -# avgMap {#avgmap} - -## 説明 {#description} - -[`Map`](/sql-reference/aggregate-functions/combinators#-map) コマンビネータは、`avg` 関数に適用して、各キーに対する Map 内の値の算術平均を計算するために、`avgMap` 集約コマンビネータ関数を使用できます。 - -## 使用例 {#example-usage} - -この例では、異なるタイムスロットのステータスコードとそのカウントを保存するテーブルを作成します。各行には、ステータスコードとそれに対応するカウントの Map が含まれます。`avgMap` を使用して、各タイムスロット内の各ステータスコードの平均カウントを計算します。 - -```sql title="クエリ" -CREATE TABLE metrics( - date Date, - timeslot DateTime, - status Map(String, UInt64) -) ENGINE = Log; - -INSERT INTO metrics VALUES - ('2000-01-01', '2000-01-01 00:00:00', (['a', 'b', 'c'], [15, 25, 35])), - ('2000-01-01', '2000-01-01 00:00:00', (['c', 'd', 'e'], [45, 55, 65])), - ('2000-01-01', '2000-01-01 00:01:00', (['d', 'e', 'f'], [75, 85, 95])), - ('2000-01-01', '2000-01-01 00:01:00', (['f', 'g', 'g'], [105, 115, 125])); - -SELECT - timeslot, - avgMap(status), -FROM metrics -GROUP BY timeslot; -``` - -`avgMap` 関数は、各タイムスロット内の各ステータスコードの平均カウントを計算します。例えば: -- タイムスロット '2000-01-01 00:00:00': - - ステータス 'a': 15 - - ステータス 'b': 25 - - ステータス 'c': (35 + 45) / 2 = 40 - - ステータス 'd': 55 - - ステータス 'e': 65 -- タイムスロット '2000-01-01 00:01:00': - - ステータス 'd': 75 - - ステータス 'e': 85 - - ステータス 'f': (95 + 105) / 2 = 100 - - ステータス 'g': (115 + 125) / 2 = 120 - -```response title="レスポンス" - ┌────────────timeslot─┬─avgMap(status)───────────────────────┐ -1. │ 2000-01-01 00:01:00 │ {'d':75,'e':85,'f':100,'g':120} │ -2. │ 2000-01-01 00:00:00 │ {'a':15,'b':25,'c':40,'d':55,'e':65} │ - └─────────────────────┴──────────────────────────────────────┘ -``` - -## 参考 {#see-also} -- [`avg`](/sql-reference/aggregate-functions/reference/avg) -- [`Map combinator`](/sql-reference/aggregate-functions/combinators#-map) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md.hash deleted file mode 100644 index b50d0de6a2d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMap.md.hash +++ /dev/null @@ -1 +0,0 @@ -f2d5c7a0e9eb6cdd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md deleted file mode 100644 index c51a14b385c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgMerge' -title: 'avgMerge' -description: 'avgMerge combinatorの使用例' -keywords: -- 'avg' -- 'merge' -- 'combinator' -- 'examples' -- 'avgMerge' -sidebar_label: 'avgMerge' ---- - - - - -# avgMerge {#avgMerge} - -## 説明 {#description} - -[`Merge`](/sql-reference/aggregate-functions/combinators#-state) コンビネータは、部分的な集約状態を結合して最終結果を生成するために、[`avg`](/sql-reference/aggregate-functions/reference/avg) 関数に適用することができます。 - -## 使用例 {#example-usage} - -`Merge` コンビネータは `State` コンビネータに密接に関連しています。両方の `avgMerge` と `avgState` の使用例については、["avgState 使用例"](/examples/aggregate-function-combinators/avgState/#example-usage) を参照してください。 - -## 参照 {#see-also} -- [`avg`](/sql-reference/aggregate-functions/reference/avg) -- [`Merge`](/sql-reference/aggregate-functions/combinators#-merge) -- [`MergeState`](/sql-reference/aggregate-functions/combinators#-mergestate) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md.hash deleted file mode 100644 index 219e190d654..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMerge.md.hash +++ /dev/null @@ -1 +0,0 @@ -07597e74cdade01e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md deleted file mode 100644 index 37bb0523c3e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgMergeState' -title: 'avgMergeState' -description: 'avgMergeState combinator の使用例' -keywords: -- 'avg' -- 'MergeState' -- 'combinator' -- 'examples' -- 'avgMergeState' -sidebar_label: 'avgMergeState' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - -# avgMergeState {#avgMergeState} - -## 説明 {#description} - -[`MergeState`](/sql-reference/aggregate-functions/combinators#-state) コンビネータは -[`avg`](/sql-reference/aggregate-functions/reference/avg) 関数に適用され、`AverageFunction(avg, T)` 型の部分集約状態を結合し、新しい中間集約状態を返します。 - -## 使用例 {#example-usage} - -`MergeState` コンビネータは、事前に集約された状態を結合し、それをさらなる処理のために状態として保持したいマルチレベルの集約シナリオに特に役立ちます。ここでは、個々のサーバーのパフォーマンス指標を複数のレベルにわたる階層的集約に変換する例を見てみましょう:サーバーレベル → リージョンレベル → データセンターレベル。 - -まず、原データを格納するテーブルを作成します。 - -```sql -CREATE TABLE raw_server_metrics -( - timestamp DateTime DEFAULT now(), - server_id UInt32, - region String, - datacenter String, - response_time_ms UInt32 -) -ENGINE = MergeTree() -ORDER BY (region, server_id, timestamp); -``` - -サーバーレベルの集約ターゲットテーブルを作成し、そこに挿入トリガーとして機能するインクリメンタル Materialized View を定義します。 - -```sql -CREATE TABLE server_performance -( - server_id UInt32, - region String, - datacenter String, - avg_response_time AggregateFunction(avg, UInt32) -) -ENGINE = AggregatingMergeTree() -ORDER BY (region, server_id); - -CREATE MATERIALIZED VIEW server_performance_mv -TO server_performance -AS SELECT - server_id, - region, - datacenter, - avgState(response_time_ms) AS avg_response_time -FROM raw_server_metrics -GROUP BY server_id, region, datacenter; -``` - -地域およびデータセンターレベルについても同様に行います。 - -```sql -CREATE TABLE region_performance -( - region String, - datacenter String, - avg_response_time AggregateFunction(avg, UInt32) -) -ENGINE = AggregatingMergeTree() -ORDER BY (datacenter, region); - -CREATE MATERIALIZED VIEW region_performance_mv -TO region_performance -AS SELECT - region, - datacenter, - avgMergeState(avg_response_time) AS avg_response_time -FROM server_performance -GROUP BY region, datacenter; - --- データセンターレベルのテーブルと Materialized View - -CREATE TABLE datacenter_performance -( - datacenter String, - avg_response_time AggregateFunction(avg, UInt32) -) -ENGINE = AggregatingMergeTree() -ORDER BY datacenter; - -CREATE MATERIALIZED VIEW datacenter_performance_mv -TO datacenter_performance -AS SELECT - datacenter, - avgMergeState(avg_response_time) AS avg_response_time -FROM region_performance -GROUP BY datacenter; -``` - -次に、ソーステーブルにサンプルの生データを挿入します。 - -```sql -INSERT INTO raw_server_metrics (timestamp, server_id, region, datacenter, response_time_ms) VALUES - (now(), 101, 'us-east', 'dc1', 120), - (now(), 101, 'us-east', 'dc1', 130), - (now(), 102, 'us-east', 'dc1', 115), - (now(), 201, 'us-west', 'dc1', 95), - (now(), 202, 'us-west', 'dc1', 105), - (now(), 301, 'eu-central', 'dc2', 145), - (now(), 302, 'eu-central', 'dc2', 155); -``` - -各レベルに対して3つのクエリを書きます。 - - - -```sql -SELECT - server_id, - region, - avgMerge(avg_response_time) AS avg_response_ms -FROM server_performance -GROUP BY server_id, region -ORDER BY region, server_id; -``` -```response -┌─server_id─┬─region─────┬─avg_response_ms─┐ -│ 301 │ eu-central │ 145 │ -│ 302 │ eu-central │ 155 │ -│ 101 │ us-east │ 125 │ -│ 102 │ us-east │ 115 │ -│ 201 │ us-west │ 95 │ -│ 202 │ us-west │ 105 │ -└───────────┴────────────┴─────────────────┘ -``` - - -```sql -SELECT - region, - datacenter, - avgMerge(avg_response_time) AS avg_response_ms -FROM region_performance -GROUP BY region, datacenter -ORDER BY datacenter, region; -``` -```response -┌─region─────┬─datacenter─┬────avg_response_ms─┐ -│ us-east │ dc1 │ 121.66666666666667 │ -│ us-west │ dc1 │ 100 │ -│ eu-central │ dc2 │ 150 │ -└────────────┴────────────┴────────────────────┘ -``` - - -```sql -SELECT - datacenter, - avgMerge(avg_response_time) AS avg_response_ms -FROM datacenter_performance -GROUP BY datacenter -ORDER BY datacenter; -``` -```response -┌─datacenter─┬─avg_response_ms─┐ -│ dc1 │ 113 │ -│ dc2 │ 150 │ -└────────────┴─────────────────┘ -``` - - - -さらにデータを挿入します。 - -```sql -INSERT INTO raw_server_metrics (timestamp, server_id, region, datacenter, response_time_ms) VALUES - (now(), 101, 'us-east', 'dc1', 140), - (now(), 201, 'us-west', 'dc1', 85), - (now(), 301, 'eu-central', 'dc2', 135); -``` - -データセンターレベルのパフォーマンスを再度確認します。集約チェーン全体が自動的に更新される様子に注意してください。 - -```sql -SELECT - datacenter, - avgMerge(avg_response_time) AS avg_response_ms -FROM datacenter_performance -GROUP BY datacenter -ORDER BY datacenter; -``` - -```response -┌─datacenter─┬────avg_response_ms─┐ -│ dc1 │ 112.85714285714286 │ -│ dc2 │ 145 │ -└────────────┴────────────────────┘ -``` - -## 参考 {#see-also} -- [`avg`](/sql-reference/aggregate-functions/reference/avg) -- [`AggregateFunction`](/sql-reference/data-types/aggregatefunction) -- [`Merge`](/sql-reference/aggregate-functions/combinators#-merge) -- [`MergeState`](/sql-reference/aggregate-functions/combinators#-mergestate) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md.hash deleted file mode 100644 index 572b03035d9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgMergeState.md.hash +++ /dev/null @@ -1 +0,0 @@ -d96bc65bf75836e1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md deleted file mode 100644 index 9ace4d3e0dc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgResample' -title: 'avgResample' -description: '平均を利用した Resample combinator の例' -keywords: -- 'avg' -- 'Resample' -- 'combinator' -- 'examples' -- 'avgResample' -sidebar_label: 'avgResample' ---- - - - - - -# countResample {#countResample} - -## Description {#description} - -[`Resample`](/sql-reference/aggregate-functions/combinators#-resample) -コンビネータは、指定されたキー列の値を固定数の -インターバル (`N`) でカウントするために、[`count`](/sql-reference/aggregate-functions/reference/count) -集約関数に適用できます。 - -## Example Usage {#example-usage} - -### Basic example {#basic-example} - -例を見てみましょう。従業員の `name`、`age`、および `wage` を含むテーブルを作成し、 -データを挿入します。 - -```sql -CREATE TABLE employee_data -( - name String, - age UInt8, - wage Float32 -) -ENGINE = MergeTree() -ORDER BY tuple() - -INSERT INTO employee_data (name, age, wage) VALUES - ('John', 16, 10.0), - ('Alice', 30, 15.0), - ('Mary', 35, 8.0), - ('Evelyn', 48, 11.5), - ('David', 62, 9.9), - ('Brian', 60, 16.0); -``` - -年齢が `[30,60)` および `[60,75)` の間にある人々の平均賃金を取得してみましょう -(`[` は排他的、`)` は包含的です)。整数表現を使用するため、年齢は -インターバル `[30, 59]` および `[60,74]` になります。これを行うために、`avg` -集約関数に `Resample` コンビネータを適用します。 - -```sql -WITH avg_wage AS -( - SELECT avgResample(30, 75, 30)(wage, age) AS original_avg_wage - FROM employee_data -) -SELECT - arrayMap(x -> round(x, 3), original_avg_wage) AS avg_wage_rounded -FROM avg_wage; -``` - -```response -┌─avg_wage_rounded─┐ -│ [11.5,12.95] │ -└──────────────────┘ -``` - -## See also {#see-also} -- [`count`](/sql-reference/aggregate-functions/reference/count) -- [`Resample combinator`](/sql-reference/aggregate-functions/combinators#-resample) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md.hash deleted file mode 100644 index b7598d110ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgResample.md.hash +++ /dev/null @@ -1 +0,0 @@ -fe12c7978ae18ae8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md deleted file mode 100644 index ffbb9597bc3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/avgState' -title: 'avgState' -description: 'avgState combinatorの使用例' -keywords: -- 'avg' -- 'state' -- 'combinator' -- 'examples' -- 'avgState' -sidebar_label: 'avgState' ---- - - - - -# avgState {#avgState} - -## 説明 {#description} - -[`State`](/sql-reference/aggregate-functions/combinators#-state) コンビネータは、[`avg`](/sql-reference/aggregate-functions/reference/avg) 関数に適用でき、`AggregateFunction(avg, T)` 型の中間状態を生成します。ここで `T` は、平均のために指定された型です。 - -## 使用例 {#example-usage} - -この例では、`AggregateFunction` 型をどのように使用し、`avgState` 関数と組み合わせてウェブサイトのトラフィックデータを集計するかを見ていきます。 - -まず、ウェブサイトのトラフィックデータのためのソーステーブルを作成します。 - -```sql -CREATE TABLE raw_page_views -( - page_id UInt32, - page_name String, - response_time_ms UInt32, -- ページ応答時間(ミリ秒) - viewed_at DateTime DEFAULT now() -) -ENGINE = MergeTree() -ORDER BY (page_id, viewed_at); -``` - -次に、平均応答時間を保存する集約テーブルを作成します。`avg` は複雑な状態(合計とカウント)を必要とするため、`SimpleAggregateFunction` 型を使用できません。そのため、`AggregateFunction` 型を使用します。 - -```sql -CREATE TABLE page_performance -( - page_id UInt32, - page_name String, - avg_response_time AggregateFunction(avg, UInt32) -- avg 計算に必要な状態を保存 -) -ENGINE = AggregatingMergeTree() -ORDER BY page_id; -``` - -新しいデータの挿入トリガーとして機能し、上記で定義されたターゲットテーブルに中間状態データを保存するインクリメンタルマテリアライズドビューを作成します。 - -```sql -CREATE MATERIALIZED VIEW page_performance_mv -TO page_performance -AS SELECT - page_id, - page_name, - avgState(response_time_ms) AS avg_response_time -- -State コンビネータを使用 -FROM raw_page_views -GROUP BY page_id, page_name; -``` - -ソーステーブルに初期データを挿入し、ディスク上にパーツを作成します。 - -```sql -INSERT INTO raw_page_views (page_id, page_name, response_time_ms) VALUES - (1, 'Homepage', 120), - (1, 'Homepage', 135), - (2, 'Products', 95), - (2, 'Products', 105), - (3, 'About', 80), - (3, 'About', 90); -``` - -ディスク上に2番目のパーツを作成するためにさらにデータを挿入します。 - -```sql -INSERT INTO raw_page_views (page_id, page_name, response_time_ms) VALUES -(1, 'Homepage', 150), -(2, 'Products', 110), -(3, 'About', 70), -(4, 'Contact', 60), -(4, 'Contact', 65); -``` - -ターゲットテーブル `page_performance` を確認します。 - -```sql -SELECT - page_id, - page_name, - avg_response_time, - toTypeName(avg_response_time) -FROM page_performance -``` - -```response -┌─page_id─┬─page_name─┬─avg_response_time─┬─toTypeName(avg_response_time)──┐ -│ 1 │ Homepage │ � │ AggregateFunction(avg, UInt32) │ -│ 2 │ Products │ � │ AggregateFunction(avg, UInt32) │ -│ 3 │ About │ � │ AggregateFunction(avg, UInt32) │ -│ 1 │ Homepage │ � │ AggregateFunction(avg, UInt32) │ -│ 2 │ Products │ n │ AggregateFunction(avg, UInt32) │ -│ 3 │ About │ F │ AggregateFunction(avg, UInt32) │ -│ 4 │ Contact │ } │ AggregateFunction(avg, UInt32) │ -└─────────┴───────────┴───────────────────┴────────────────────────────────┘ -``` - -`avg_response_time` カラムは `AggregateFunction(avg, UInt32)` タイプであり、中間状態情報を保存していることに注意してください。また、`avg_response_time` の行データは私たちにとって有用ではなく、`�, n, F, }` などの奇妙な文字が表示されています。これは端末がバイナリデータをテキストとして表示しようとしたためです。この理由は、`AggregateFunction` 型が状態を効率的な保存と計算のために最適化されたバイナリ形式で保存し、人間が読めない形式であるためです。このバイナリ状態は平均を計算するために必要なすべての情報を含んでいます。 - -これを利用するには、`Merge` コンビネータを使用します。 - -```sql -SELECT - page_id, - page_name, - avgMerge(avg_response_time) AS average_response_time_ms -FROM page_performance -GROUP BY page_id, page_name -ORDER BY page_id; -``` - -これで正しい平均が表示されます。 - -```response -┌─page_id─┬─page_name─┬─average_response_time_ms─┐ -│ 1 │ Homepage │ 135 │ -│ 2 │ Products │ 103.33333333333333 │ -│ 3 │ About │ 80 │ -│ 4 │ Contact │ 62.5 │ -└─────────┴───────────┴──────────────────────────┘ -``` - -## 関連項目 {#see-also} -- [`avg`](/sql-reference/aggregate-functions/reference/avg) -- [`State`](/sql-reference/aggregate-functions/combinators#-state) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md.hash deleted file mode 100644 index f0f8a59ca3b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/avgState.md.hash +++ /dev/null @@ -1 +0,0 @@ -a2baf220b555befe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md deleted file mode 100644 index de8a7ad0870..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/countIf' -title: 'countIf' -description: 'countIfコンビネータの使用例' -keywords: -- 'count' -- 'if' -- 'combinator' -- 'examples' -- 'countIf' -sidebar_label: 'countIf' ---- - - - - -# countIf {#countif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネータは、`count`(行数を数える)関数に適用でき、条件が真である行の数をカウントするために `countIf` 集計コンビネータ関数を使用します。 - -## 使用例 {#example-usage} - -この例では、ユーザーのログイン試行を保存するテーブルを作成し、`countIf` を使用して成功したログインの数をカウントします。 - -```sql title="クエリ" -CREATE TABLE login_attempts( - user_id UInt32, - timestamp DateTime, - is_successful UInt8 -) ENGINE = Log; - -INSERT INTO login_attempts VALUES - (1, '2024-01-01 10:00:00', 1), - (1, '2024-01-01 10:05:00', 0), - (1, '2024-01-01 10:10:00', 1), - (2, '2024-01-01 11:00:00', 1), - (2, '2024-01-01 11:05:00', 1), - (2, '2024-01-01 11:10:00', 0); - -SELECT - user_id, - countIf(is_successful = 1) as successful_logins -FROM login_attempts -GROUP BY user_id; -``` - -`countIf` 関数は、各ユーザーに対して `is_successful = 1` である行のみをカウントします。 - -```response title="レスポンス" - ┌─user_id─┬─successful_logins─┐ -1. │ 1 │ 2 │ -2. │ 2 │ 2 │ - └─────────┴───────────────────┘ -``` - -## 関連項目 {#see-also} -- [`count`](/sql-reference/aggregate-functions/reference/count) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md.hash deleted file mode 100644 index a1a21b5a24e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -784c701de731b506 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md deleted file mode 100644 index 3d75b60deed..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/countResample' -title: 'countResample' -description: 'countとResampleコンビネータの使用例' -keywords: -- 'count' -- 'Resample' -- 'combinator' -- 'examples' -- 'countResample' -sidebar_label: 'countResample' ---- - - - - -# countResample {#countResample} - -## 説明 {#description} - -[`Resample`](/sql-reference/aggregate-functions/combinators#-resample) -コンビネータは、指定されたキー列の値を固定数の間隔(`N`)でカウントするために、[`count`](/sql-reference/aggregate-functions/reference/count) -集約関数に適用できます。 - -## 使用例 {#example-usage} - -### 基本的な例 {#basic-example} - -例を見てみましょう。`name`、`age`、および`wage`を含むテーブルを作成し、いくつかのデータを挿入します: - -```sql -CREATE TABLE employee_data -( - name String, - age UInt8, - wage Float32 -) -ENGINE = MergeTree() -ORDER BY tuple() - -INSERT INTO employee_data (name, age, wage) VALUES - ('John', 16, 10.0), - ('Alice', 30, 15.0), - ('Mary', 35, 8.0), - ('Evelyn', 48, 11.5), - ('David', 62, 9.9), - ('Brian', 60, 16.0); -``` - -年齢が`[30,60)`および`[60,75)`の間にある人々をカウントしましょう。年齢を整数で表現するため、`[30, 59]`および`[60,74]`の間隔の年齢が得られます。これを行うために、`count`に`Resample`コンビネータを適用します。 - -```sql -SELECT countResample(30, 75, 30)(name, age) AS amount FROM employee_data -``` - -```response -┌─amount─┐ -│ [3,2] │ -└────────┘ -``` - -## 関連項目 {#see-also} -- [`count`](/sql-reference/aggregate-functions/reference/count) -- [`Resample combinator`](/sql-reference/aggregate-functions/combinators#-resample) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md.hash deleted file mode 100644 index 671e6b0b33c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/countResample.md.hash +++ /dev/null @@ -1 +0,0 @@ -e708c66cb0678e4d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md deleted file mode 100644 index a5e5535e996..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/groupArrayDistinct' -title: 'groupArrayDistinct' -description: 'groupArrayDistinct combinatorの使用例' -keywords: -- 'groupArray' -- 'Distinct' -- 'combinator' -- 'examples' -- 'groupArrayDistinct' -sidebar_label: 'groupArrayDistinct' ---- - - - - -# groupArrayDistinct {#sumdistinct} - -## 説明 {#description} - -[`groupArrayDistinct`](/sql-reference/aggregate-functions/combinators#-foreach) コンビネータは、[`groupArray`](/sql-reference/aggregate-functions/reference/sum) 集約関数に適用して、異なる引数値の配列を作成することができます。 - -## 使用例 {#example-usage} - -この例では、私たちの [SQL playground](https://sql.clickhouse.com/) で利用可能な `hits` データセットを使用します。 - -あなたのウェブサイトで、各異なるランディングページドメイン(`URLDomain`)について、そのドメインに訪れた訪問者のために記録されたすべてのユニークなユーザーエージェントOSコード(`OS`)を知りたいとしましょう。これにより、サイトの異なる部分と相互作用しているオペレーティングシステムの多様性を理解するのに役立ちます。 - -```sql runnable -SELECT - URLDomain, - groupArrayDistinct(OS) AS distinct_os_codes -FROM metrica.hits_v1 -WHERE URLDomain != '' -- 記録されたドメインを持つヒットのみを考慮 -GROUP BY URLDomain -ORDER BY URLDomain ASC -LIMIT 20; -``` - -## 関連情報 {#see-also} -- [`groupArray`](/sql-reference/aggregate-functions/reference/grouparray) -- [`Distinct combinator`](/sql-reference/aggregate-functions/combinators#-distinct) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md.hash deleted file mode 100644 index 7b0032309f0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayDistinct.md.hash +++ /dev/null @@ -1 +0,0 @@ -124812464974da51 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md deleted file mode 100644 index 398714f7b6e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/groupArrayResample' -title: 'groupArrayResample' -description: 'groupArrayをResampleコンビネータと共に使用する例' -keywords: -- 'groupArray' -- 'Resample' -- 'combinator' -- 'examples' -- 'groupArrayResample' -sidebar_label: 'groupArrayResample' ---- - - - - -# groupArrayResample {#grouparrayresample} - -## 説明 {#description} - -[`Resample`](/sql-reference/aggregate-functions/combinators#-resample) -コンビネータは、指定されたキー列の範囲を固定数の間隔 (`N`) に分割し、各間隔に該当するデータポイントから最小のキーに対応する代表値を選択して結果の配列を構築するために、[`groupArray`](/sql-reference/aggregate-functions/reference/sum) 集約関数に適用できます。 -これにより、すべての値を収集するのではなく、データのダウンサンプルされたビューが作成されます。 - -## 使用例 {#example-usage} - -例を見てみましょう。従業員の `name`、`age`、`wage` を含むテーブルを作成し、いくつかのデータを挿入します: - -```sql -CREATE TABLE employee_data -( - name String, - age UInt8, - wage Float32 -) ENGINE = MergeTree() -ORDER BY tuple() - -INSERT INTO employee_data (name, age, wage) VALUES - ('John', 16, 10.0), - ('Alice', 30, 15.0), - ('Mary', 35, 8.0), - ('Evelyn', 48, 11.5), - ('David', 62, 9.9), - ('Brian', 60, 16.0); -``` - -年齢が `[30,60)` と `[60,75)` の間にある人々の名前を取得しましょう。 -年齢を整数値で表現するため、`[30, 59]` と `[60,74]` の間隔になります。 - -名前を配列で集約するために、`groupArray` 集約関数を使用します。 -これは1つの引数を取ります。私たちの場合、それは名前の列です。`groupArrayResample` -関数は年齢列を使用して年齢ごとに名前を集約する必要があります。必要な間隔を定義するために、`30`、`75`、`30` を `groupArrayResample` -関数に引数として渡します: - -```sql -SELECT groupArrayResample(30, 75, 30)(name, age) FROM employee_data -``` - -```response -┌─groupArrayResample(30, 75, 30)(name, age)─────┐ -│ [['Alice','Mary','Evelyn'],['David','Brian']] │ -└───────────────────────────────────────────────┘ -``` - -## さらに見る {#see-also} -- [`groupArray`](/sql-reference/aggregate-functions/reference/grouparray) -- [`Resample combinator`](/sql-reference/aggregate-functions/combinators#-resample) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md.hash deleted file mode 100644 index e080a5b4199..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/groupArrayResample.md.hash +++ /dev/null @@ -1 +0,0 @@ -be43f2c7f85bb84d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md deleted file mode 100644 index 5d0a395da76..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/maxMap' -title: 'maxMap' -description: 'maxMapコンビネータの使用例' -keywords: -- 'max' -- 'map' -- 'combinator' -- 'examples' -- 'maxMap' -sidebar_label: 'maxMap' ---- - - - - -# maxMap {#maxmap} - -## 説明 {#description} - -[`Map`](/sql-reference/aggregate-functions/combinators#-map) 組み合わせ関数は、[`max`](/sql-reference/aggregate-functions/reference/max) 関数に適用して、各キーに基づいて Map 内の最大値を計算するために `maxMap` 集約組み合わせ関数を使用できます。 - -## 使用例 {#example-usage} - -この例では、ステータスコードとそれぞれの時間帯におけるカウントを格納するテーブルを作成します。各行には、ステータスコードとその対応するカウントの Map が含まれます。`maxMap` を使用して、各時間帯内の各ステータスコードの最大カウントを見つけます。 - -```sql title="クエリ" -CREATE TABLE metrics( - date Date, - timeslot DateTime, - status Map(String, UInt64) -) ENGINE = Log; - -INSERT INTO metrics VALUES - ('2000-01-01', '2000-01-01 00:00:00', (['a', 'b', 'c'], [15, 25, 35])), - ('2000-01-01', '2000-01-01 00:00:00', (['c', 'd', 'e'], [45, 55, 65])), - ('2000-01-01', '2000-01-01 00:01:00', (['d', 'e', 'f'], [75, 85, 95])), - ('2000-01-01', '2000-01-01 00:01:00', (['f', 'g', 'g'], [105, 115, 125])); - -SELECT - timeslot, - maxMap(status), -FROM metrics -GROUP BY timeslot; -``` - -`maxMap` 関数は、各時間帯内の各ステータスコードの最大カウントを見つけます。例えば: -- 時間帯 '2000-01-01 00:00:00': - - ステータス 'a': 15 - - ステータス 'b': 25 - - ステータス 'c': max(35, 45) = 45 - - ステータス 'd': 55 - - ステータス 'e': 65 -- 時間帯 '2000-01-01 00:01:00': - - ステータス 'd': 75 - - ステータス 'e': 85 - - ステータス 'f': max(95, 105) = 105 - - ステータス 'g': max(115, 125) = 125 - -```response title="レスポンス" - ┌────────────timeslot─┬─maxMap(status)───────────────────────┐ -1. │ 2000-01-01 00:01:00 │ {'d':75,'e':85,'f':105,'g':125} │ -2. │ 2000-01-01 00:00:00 │ {'a':15,'b':25,'c':45,'d':55,'e':65} │ - └─────────────────────┴──────────────────────────────────────┘ -``` - -## 参照 {#see-also} -- [`max`](/sql-reference/aggregate-functions/reference/max) -- [`Map 組み合わせ関数`](/sql-reference/aggregate-functions/combinators#-map) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md.hash deleted file mode 100644 index bc2e21ceafc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxMap.md.hash +++ /dev/null @@ -1 +0,0 @@ -033162d8743415a1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md deleted file mode 100644 index ab61bf9dda0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/maxSimpleState' -title: 'maxSimpleState' -description: 'minSimpleState combinator の使用例' -keywords: -- 'min' -- 'state' -- 'simple' -- 'combinator' -- 'examples' -- 'minSimpleState' -sidebar_label: 'minSimpleState' ---- - - - - -# minSimpleState {#minsimplestate} - -## Description {#description} - -[`SimpleState`](/sql-reference/aggregate-functions/combinators#-simplestate) コンビネータは、[`max`](/sql-reference/aggregate-functions/reference/max) 関数に適用され、すべての入力値の中で最大値を返します。結果は `SimpleAggregateState` 型で返されます。 - -## Example Usage {#example-usage} - -[`minSimpleState`](/examples/aggregate-function-combinators/minSimpleState/#example-usage) に示されている例は、`maxSimpleState` と `minSimpleState` の両方の使用法を示しています。 - -## See also {#see-also} -- [`max`](/sql-reference/aggregate-functions/reference/max) -- [`SimpleState combinator`](/sql-reference/aggregate-functions/combinators#-simplestate) -- [`SimpleAggregateFunction type`](/sql-reference/data-types/simpleaggregatefunction) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md.hash deleted file mode 100644 index ce27a70a6e9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/maxSimpleState.md.hash +++ /dev/null @@ -1 +0,0 @@ -54578a0772b2d5bb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md deleted file mode 100644 index eb86865cf0d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/minMap' -title: 'minMap' -description: 'minMap combinatorの使用例' -keywords: -- 'min' -- 'map' -- 'combinator' -- 'examples' -- 'minMap' -sidebar_label: 'minMap' ---- - - - - -# minMap {#minmap} - -## 説明 {#description} - -[`Map`](/sql-reference/aggregate-functions/combinators#-map) コンビネータは、`minMap` 集約コンビネータ関数を使用して、各キーに基づいて Map の最小値を計算するために、[`min`](/sql-reference/aggregate-functions/reference/min) 関数に適用できます。 - -## 例の使用法 {#example-usage} - -この例では、ステータスコードとそれぞれの時間帯におけるカウントを格納するテーブルを作成します。各行には、ステータスコードとその対応するカウントの Map が含まれます。`minMap` を使用して、各時間帯内の各ステータスコードの最小カウントを見つけます。 - -```sql title="クエリ" -CREATE TABLE metrics( - date Date, - timeslot DateTime, - status Map(String, UInt64) -) ENGINE = Log; - -INSERT INTO metrics VALUES - ('2000-01-01', '2000-01-01 00:00:00', (['a', 'b', 'c'], [15, 25, 35])), - ('2000-01-01', '2000-01-01 00:00:00', (['c', 'd', 'e'], [45, 55, 65])), - ('2000-01-01', '2000-01-01 00:01:00', (['d', 'e', 'f'], [75, 85, 95])), - ('2000-01-01', '2000-01-01 00:01:00', (['f', 'g', 'g'], [105, 115, 125])); - -SELECT - timeslot, - minMap(status), -FROM metrics -GROUP BY timeslot; -``` - -`minMap` 関数は、各時間帯内の各ステータスコードの最小カウントを見つけます。例えば: -- 時間帯 '2000-01-01 00:00:00': - - ステータス 'a': 15 - - ステータス 'b': 25 - - ステータス 'c': min(35, 45) = 35 - - ステータス 'd': 55 - - ステータス 'e': 65 -- 時間帯 '2000-01-01 00:01:00': - - ステータス 'd': 75 - - ステータス 'e': 85 - - ステータス 'f': min(95, 105) = 95 - - ステータス 'g': min(115, 125) = 115 - -```response title="応答" - ┌────────────timeslot─┬─minMap(status)───────────────────────┐ -1. │ 2000-01-01 00:01:00 │ {'d':75,'e':85,'f':95,'g':115} │ -2. │ 2000-01-01 00:00:00 │ {'a':15,'b':25,'c':35,'d':55,'e':65} │ - └─────────────────────┴──────────────────────────────────────┘ -``` - -## 関連項目 {#see-also} -- [`min`](/sql-reference/aggregate-functions/reference/min) -- [`Map combinator`](/sql-reference/aggregate-functions/combinators#-map) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md.hash deleted file mode 100644 index 8cd713a4603..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minMap.md.hash +++ /dev/null @@ -1 +0,0 @@ -0bb2296305f2b360 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md deleted file mode 100644 index 760f5f73197..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/minSimpleState' -title: 'minSimpleState' -description: 'minSimpleState combinator の使用例' -keywords: -- 'min' -- 'state' -- 'simple' -- 'combinator' -- 'examples' -- 'minSimpleState' -sidebar_label: 'minSimpleState' ---- - - - - - -# minSimpleState {#minsimplestate} - -## 説明 {#description} - -[`SimpleState`](/sql-reference/aggregate-functions/combinators#-simplestate) コンビネーターは、[`min`](/sql-reference/aggregate-functions/reference/min) 関数に適用され、すべての入力値の中で最小値を返します。結果は [`SimpleAggregateFunction`](/sql-reference/data-types/simpleaggregatefunction) 型で返されます。 - -## 使用例 {#example-usage} - -日々の温度測定を追跡するテーブルを使用した実用的な例を見てみましょう。各場所の記録された最低温度を維持したいと思います。`SimpleAggregateFunction` 型を `min` と共に使用すると、より低い温度が記録されると自動的に保存された値が更新されます。 - -生の温度測定のソーステーブルを作成します: - -```sql -CREATE TABLE raw_temperature_readings -( - location_id UInt32, - location_name String, - temperature Int32, - recorded_at DateTime DEFAULT now() -) - ENGINE = MergeTree() -ORDER BY (location_id, recorded_at); -``` - -最小温度を格納する集計テーブルを作成します: - -```sql -CREATE TABLE temperature_extremes -( - location_id UInt32, - location_name String, - min_temp SimpleAggregateFunction(min, Int32), -- 最小温度を格納 - max_temp SimpleAggregateFunction(max, Int32) -- 最大温度を格納 -) -ENGINE = AggregatingMergeTree() -ORDER BY location_id; -``` - -挿入されたデータのトリガーとして機能し、各場所の最小および最大温度を維持するインクリメンタルマテリアライズドビューを作成します。 - -```sql -CREATE MATERIALIZED VIEW temperature_extremes_mv -TO temperature_extremes -AS SELECT - location_id, - location_name, - minSimpleState(temperature) AS min_temp, -- SimpleState コンビネーターを使用 - maxSimpleState(temperature) AS max_temp -- SimpleState コンビネーターを使用 -FROM raw_temperature_readings -GROUP BY location_id, location_name; -``` - -初期の温度測定を挿入します: - -```sql -INSERT INTO raw_temperature_readings (location_id, location_name, temperature) VALUES -(1, 'North', 5), -(2, 'South', 15), -(3, 'West', 10), -(4, 'East', 8); -``` - -これらの測定はマテリアライズドビューによって自動的に処理されます。現在の状態を確認しましょう: - -```sql -SELECT - location_id, - location_name, - min_temp, -- SimpleAggregateFunction の値に直接アクセス - max_temp -- SimpleAggregateFunction にはファイナライズ関数は不要 -FROM temperature_extremes -ORDER BY location_id; -``` - -```response -┌─location_id─┬─location_name─┬─min_temp─┬─max_temp─┐ -│ 1 │ North │ 5 │ 5 │ -│ 2 │ South │ 15 │ 15 │ -│ 3 │ West │ 10 │ 10 │ -│ 4 │ East │ 8 │ 8 │ -└─────────────┴───────────────┴──────────┴──────────┘ -``` - -データをさらに挿入します: - -```sql -INSERT INTO raw_temperature_readings (location_id, location_name, temperature) VALUES - (1, 'North', 3), - (2, 'South', 18), - (3, 'West', 10), - (1, 'North', 8), - (4, 'East', 2); -``` - -新しいデータの後の更新された極値を表示します: - -```sql -SELECT - location_id, - location_name, - min_temp, - max_temp -FROM temperature_extremes -ORDER BY location_id; -``` - -```response -┌─location_id─┬─location_name─┬─min_temp─┬─max_temp─┐ -│ 1 │ North │ 3 │ 8 │ -│ 1 │ North │ 5 │ 5 │ -│ 2 │ South │ 18 │ 18 │ -│ 2 │ South │ 15 │ 15 │ -│ 3 │ West │ 10 │ 10 │ -│ 3 │ West │ 10 │ 10 │ -│ 4 │ East │ 2 │ 2 │ -│ 4 │ East │ 8 │ 8 │ -└─────────────┴───────────────┴──────────┴──────────┘ -``` - -上記のように、各場所に対して2つの挿入値があることに注意してください。これは、パーツがまだマージされていない(`AggregatingMergeTree` によって集約されていない)ためです。部分状態から最終結果を得るには、`GROUP BY` を追加する必要があります: - -```sql -SELECT - location_id, - location_name, - min(min_temp) AS min_temp, -- すべてのパーツを横断して集約 - max(max_temp) AS max_temp -- すべてのパーツを横断して集約 -FROM temperature_extremes -GROUP BY location_id, location_name -ORDER BY location_id; -``` - -期待される結果が得られます: - -```sql -┌─location_id─┬─location_name─┬─min_temp─┬─max_temp─┐ -│ 1 │ North │ 3 │ 8 │ -│ 2 │ South │ 15 │ 18 │ -│ 3 │ West │ 10 │ 10 │ -│ 4 │ East │ 2 │ 8 │ -└─────────────┴───────────────┴──────────┴──────────┘ -``` - -:::note -`SimpleState` を使用すると、部分集計状態を結合するために `Merge` コンビネーターを使用する必要はありません。 -::: - -## 参照 {#see-also} -- [`min`](/sql-reference/aggregate-functions/reference/min) -- [`SimpleState コンビネーター`](/sql-reference/aggregate-functions/combinators#-simplestate) -- [`SimpleAggregateFunction 型`](/sql-reference/data-types/simpleaggregatefunction) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md.hash deleted file mode 100644 index abf03ca274e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/minSimpleState.md.hash +++ /dev/null @@ -1 +0,0 @@ -70ad805946f7ac26 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md deleted file mode 100644 index dbded669811..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/quantilesTimingArrayIf' -title: 'quantilesTimingArrayIf' -description: 'Example of using the quantilesTimingArrayIf combinator' -keywords: -- 'quantilesTiming' -- 'array' -- 'if' -- 'combinator' -- 'examples' -- 'quantilesTimingArrayIf' -sidebar_label: 'quantilesTimingArrayIf' ---- - - - - -# quantilesTimingArrayIf {#quantilestimingarrayif} - -## 説明 {#description} - -[`Array`](/sql-reference/aggregate-functions/combinators#-array) および [`If`](/sql-reference/aggregate-functions/combinators#-if) -コンビネータは、条件が真である行の配列内のタイミング値の分位数を計算するために、[`quantilesTiming`](/sql-reference/aggregate-functions/reference/quantiletiming) -関数に適用することができ、`quantilesTimingArrayIf` アグリゲートコンビネータ関数を使用します。 - -## 使用例 {#example-usage} - -この例では、異なるエンドポイントのAPIレスポンスタイムを保存するテーブルを作成し、 -成功したリクエストのレスポンスタイムの分位数を計算するために `quantilesTimingArrayIf` を使用します。 - -```sql title="クエリ" -CREATE TABLE api_responses( - endpoint String, - response_times_ms Array(UInt32), - success_rate Float32 -) ENGINE = Log; - -INSERT INTO api_responses VALUES - ('orders', [82, 94, 98, 87, 103, 92, 89, 105], 0.98), - ('products', [45, 52, 48, 51, 49, 53, 47, 50], 0.95), - ('users', [120, 125, 118, 122, 121, 119, 123, 124], 0.92); - -SELECT - endpoint, - quantilesTimingArrayIf(0, 0.25, 0.5, 0.75, 0.95, 0.99, 1.0)(response_times_ms, success_rate >= 0.95) as response_time_quantiles -FROM api_responses -GROUP BY endpoint; -``` - -`quantilesTimingArrayIf` 関数は、成功率が95%を超えるエンドポイントのみの分位数を計算します。 -戻り値の配列には、次の分位数が順番に含まれています: -- 0 (最小値) -- 0.25 (第1四分位数) -- 0.5 (中央値) -- 0.75 (第3四分位数) -- 0.95 (95パーセンタイル) -- 0.99 (99パーセンタイル) -- 1.0 (最大値) - -```response title="レスポンス" - ┌─endpoint─┬─response_time_quantiles─────────────────────────────────────────────┐ -1. │ orders │ [82, 87, 92, 98, 103, 104, 105] │ -2. │ products │ [45, 47, 49, 51, 52, 52, 53] │ -3. │ users │ [nan, nan, nan, nan, nan, nan, nan] │ - └──────────┴─────────────────────────────────────────────────────────────────────┘ -``` - -## 関連リンク {#see-also} -- [`quantilesTiming`](/sql-reference/aggregate-functions/reference/quantiletiming) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md.hash deleted file mode 100644 index 855017b0318..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingArrayIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -d98ffbf12b53fdd0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md deleted file mode 100644 index ddd8c3a7e51..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/quantilesTimingIf' -title: 'quantilesTimingIf' -description: '使用quantilesTimingIf結合子的示例' -keywords: -- 'quantilesTiming' -- 'if' -- 'combinator' -- 'examples' -- 'quantilesTimingIf' -sidebar_label: 'quantilesTimingIf' ---- - - - - -# quantilesTimingIf {#quantilestimingif} - -## Description {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if)コンビネータは、`quantilesTiming`関数に適用でき、条件が真である行のタイミング値の分位数を計算するために、`quantilesTimingIf`集計コンビネータ関数を使用します。 - -## Example Usage {#example-usage} - -この例では、異なるエンドポイントのAPI応答時間を格納するテーブルを作成し、成功したリクエストの応答時間の分位数を計算するために`quantilesTimingIf`を使用します。 - -```sql title="Query" -CREATE TABLE api_responses( - endpoint String, - response_time_ms UInt32, - is_successful UInt8 -) ENGINE = Log; - -INSERT INTO api_responses VALUES - ('orders', 82, 1), - ('orders', 94, 1), - ('orders', 98, 1), - ('orders', 87, 1), - ('orders', 103, 1), - ('orders', 92, 1), - ('orders', 89, 1), - ('orders', 105, 1), - ('products', 45, 1), - ('products', 52, 1), - ('products', 48, 1), - ('products', 51, 1), - ('products', 49, 1), - ('products', 53, 1), - ('products', 47, 1), - ('products', 50, 1), - ('users', 120, 0), - ('users', 125, 0), - ('users', 118, 0), - ('users', 122, 0), - ('users', 121, 0), - ('users', 119, 0), - ('users', 123, 0), - ('users', 124, 0); - -SELECT - endpoint, - quantilesTimingIf(0, 0.25, 0.5, 0.75, 0.95, 0.99, 1.0)(response_time_ms, is_successful = 1) as response_time_quantiles -FROM api_responses -GROUP BY endpoint; -``` - -`quantilesTimingIf`関数は、成功したリクエスト(is_successful = 1)のみに対して分位数を計算します。返される配列には、次の順序で分位数が含まれます: -- 0 (最小値) -- 0.25 (第一四分位数) -- 0.5 (中央値) -- 0.75 (第三四分位数) -- 0.95 (95パーセンタイル) -- 0.99 (99パーセンタイル) -- 1.0 (最大値) - -```response title="Response" - ┌─endpoint─┬─response_time_quantiles─────────────────────────────────────────────┐ -1. │ orders │ [82, 87, 92, 98, 103, 104, 105] │ -2. │ products │ [45, 47, 49, 51, 52, 52, 53] │ -3. │ users │ [nan, nan, nan, nan, nan, nan, nan] │ - └──────────┴─────────────────────────────────────────────────────────────────────┘ -``` - -## See also {#see-also} -- [`quantilesTiming`](/sql-reference/aggregate-functions/reference/quantiletiming) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md.hash deleted file mode 100644 index ed523856931..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/quantilesTimingIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -a892771754f3fd35 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md deleted file mode 100644 index 6b46a53dc6a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/sumArray' -title: 'sumArray' -description: 'sumArray combinatorの使用例' -keywords: -- 'sum' -- 'array' -- 'combinator' -- 'examples' -- 'sumArray' -sidebar_label: 'sumArray' ---- - - - - -# sumArray {#sumarray} - -## 説明 {#description} - -[`Array`](/sql-reference/aggregate-functions/combinators#-array) コンビネータは、[`sum`](/sql-reference/aggregate-functions/reference/sum) 関数に適用して、配列のすべての要素の合計を計算するために `sumArray` 集計コンビネータ関数を使用できます。 - -`sumArray` 関数は、データセット内の複数の配列にわたるすべての要素の合計を計算する必要がある場合に便利です。 - -## 使用例 {#example-usage} - -この例では、異なる製品カテゴリーでのデイリー売上のサンプルデータセットを使用して、`sumArray` の動作を示します。各日のすべてのカテゴリーにおける総売上を計算します。 - -```sql title="クエリ" -CREATE TABLE daily_category_sales -( - date Date, - category_sales Array(UInt32) -) ENGINE = Memory; - -INSERT INTO daily_category_sales VALUES - ('2024-01-01', [100, 200, 150]), - ('2024-01-02', [120, 180, 160]), - ('2024-01-03', [90, 220, 140]); - -SELECT - date, - category_sales, - sumArray(category_sales) as total_sales_sumArray, - sum(arraySum(category_sales)) as total_sales_arraySum -FROM daily_category_sales -GROUP BY date, category_sales; -``` - -`sumArray` 関数は、各 `category_sales` 配列内のすべての要素を合計します。例えば、`2024-01-01` では、`100 + 200 + 150 = 450` と合計します。これは `arraySum` と同じ結果を与えます。 - -## 関連情報 {#see-also} -- [`sum`](/sql-reference/aggregate-functions/reference/sum) -- [`arraySum`](/sql-reference/functions/array-functions#arraysum) -- [`Array combinator`](/sql-reference/aggregate-functions/combinators#-array) -- [`sumMap`](/examples/aggregate-function-combinators/sumMap) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md.hash deleted file mode 100644 index 9062f3c1315..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumArray.md.hash +++ /dev/null @@ -1 +0,0 @@ -2a1e57f1d0141ab8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md deleted file mode 100644 index ca6a9ba6b3b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/sumForEach' -title: 'sumForEach' -description: 'Example of using the sumArray combinator' -keywords: -- 'sum' -- 'array' -- 'combinator' -- 'examples' -- 'sumArray' -sidebar_label: 'sumArray' ---- - - - - -# sumArray {#sumforeach} - -## 説明 {#description} - -[`ForEach`](/sql-reference/aggregate-functions/combinators#-foreach) コンビネータは、[`sum`](/sql-reference/aggregate-functions/reference/sum) 集約関数に適用することができ、行の値に対して動作する集約関数を、行を跨いで配列カラム内の各要素に集約を適用する集約関数に変換します。 - -## 使用例 {#example-usage} - -この例では、私たちの [SQL playground](https://sql.clickhouse.com/) で利用可能な `hits` データセットを使用します。 - -`hits` テーブルには、UInt8 型の `isMobile` というカラムが含まれており、デスクトップの場合は `0`、モバイルの場合は `1` です: - -```sql runnable -SELECT EventTime, IsMobile FROM metrica.hits ORDER BY rand() LIMIT 10 -``` - -`sumForEach` 集約コンビネータ関数を使用して、デスクトップトラフィックとモバイルトラフィックが時間帯に応じてどのように変化するかを分析します。以下の再生ボタンをクリックして、クエリをインタラクティブに実行してください: - -```sql runnable -SELECT - toHour(EventTime) AS hour_of_day, - -- sumForEach を使用してデスクトップとモバイルの訪問を一度でカウント - sumForEach([ - IsMobile = 0, -- デスクトップ訪問 (IsMobile = 0) - IsMobile = 1 -- モバイル訪問 (IsMobile = 1) - ]) AS device_counts -FROM metrica.hits -GROUP BY hour_of_day -ORDER BY hour_of_day; -``` - -## 関連項目 {#see-also} -- [`sum`](/sql-reference/aggregate-functions/reference/sum) -- [`ForEach combinator`](/sql-reference/aggregate-functions/combinators#-foreach) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md.hash deleted file mode 100644 index 86ed867548c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumForEach.md.hash +++ /dev/null @@ -1 +0,0 @@ -20475ed33a7d4fcf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md deleted file mode 100644 index 2a553e24126..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/sumIf' -title: 'sumIf' -description: 'sumIfコンビネータの使用例' -keywords: -- 'sum' -- 'if' -- 'combinator' -- 'examples' -- 'sumIf' -sidebar_label: 'sumIf' ---- - - - - -# sumIf {#sumif} - -## 説明 {#description} - -[`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネータは、条件が真である行の値の合計を計算するために、[`sum`](/sql-reference/aggregate-functions/reference/sum) 関数に適用できます。このために `sumIf` 集約コンビネータ関数を使用します。 - -## 使用例 {#example-usage} - -この例では、成功フラグを持つ売上データを保存するテーブルを作成し、`sumIf` を使用して成功したトランザクションの総売上額を計算します。 - -```sql title="クエリ" -CREATE TABLE sales( - transaction_id UInt32, - amount Decimal(10,2), - is_successful UInt8 -) ENGINE = Log; - -INSERT INTO sales VALUES - (1, 100.50, 1), - (2, 200.75, 1), - (3, 150.25, 0), - (4, 300.00, 1), - (5, 250.50, 0), - (6, 175.25, 1); - -SELECT - sumIf(amount, is_successful = 1) as total_successful_sales -FROM sales; -``` - -`sumIf` 関数は `is_successful = 1` の場合の金額のみを合計します。この場合、合計するのは: 100.50 + 200.75 + 300.00 + 175.25 になります。 - -```response title="レスポンス" - ┌─total_successful_sales─┐ -1. │ 776.50 │ - └───────────────────────┘ -``` - -### 価格方向による取引量の計算 {#calculate-trading-vol-price-direction} - -この例では、[ClickHouse playground](https://sql.clickhouse.com/) で入手可能な `stock` テーブルを使用して、2002年の上半期の価格方向による取引量を計算します。 - -```sql title="クエリ" -SELECT - toStartOfMonth(date) AS month, - formatReadableQuantity(sumIf(volume, price > open)) AS volume_on_up_days, - formatReadableQuantity(sumIf(volume, price < open)) AS volume_on_down_days, - formatReadableQuantity(sumIf(volume, price = open)) AS volume_on_neutral_days, - formatReadableQuantity(sum(volume)) AS total_volume -FROM stock.stock -WHERE date BETWEEN '2002-01-01' AND '2002-12-31' -GROUP BY month -ORDER BY month; -``` - -```markdown - ┌──────month─┬─volume_on_up_days─┬─volume_on_down_days─┬─volume_on_neutral_days─┬─total_volume──┐ - 1. │ 2002-01-01 │ 26.07 billion │ 30.74 billion │ 781.80 million │ 57.59 billion │ - 2. │ 2002-02-01 │ 20.84 billion │ 29.60 billion │ 642.36 million │ 51.09 billion │ - 3. │ 2002-03-01 │ 28.81 billion │ 23.57 billion │ 762.60 million │ 53.14 billion │ - 4. │ 2002-04-01 │ 24.72 billion │ 30.99 billion │ 763.92 million │ 56.47 billion │ - 5. │ 2002-05-01 │ 25.09 billion │ 30.57 billion │ 858.57 million │ 56.52 billion │ - 6. │ 2002-06-01 │ 29.10 billion │ 30.88 billion │ 875.71 million │ 60.86 billion │ - 7. │ 2002-07-01 │ 32.27 billion │ 41.73 billion │ 747.32 million │ 74.75 billion │ - 8. │ 2002-08-01 │ 28.57 billion │ 27.49 billion │ 1.17 billion │ 57.24 billion │ - 9. │ 2002-09-01 │ 23.37 billion │ 31.02 billion │ 775.66 million │ 55.17 billion │ -10. │ 2002-10-01 │ 38.57 billion │ 34.05 billion │ 956.48 million │ 73.57 billion │ -11. │ 2002-11-01 │ 34.90 billion │ 25.47 billion │ 998.34 million │ 61.37 billion │ -12. │ 2002-12-01 │ 22.99 billion │ 28.65 billion │ 1.14 billion │ 52.79 billion │ - └────────────┴───────────────────┴─────────────────────┴────────────────────────┴───────────────┘ -``` - -### 銘柄別の取引量の計算 {#calculate-trading-volume} - -この例では、[ClickHouse playground](https://sql.clickhouse.com/) で入手可能な `stock` テーブルを使用して、2006年の当時の3つの大手テクノロジー企業の銘柄別の取引量を計算します。 - -```sql title="クエリ" -SELECT - toStartOfMonth(date) AS month, - formatReadableQuantity(sumIf(volume, symbol = 'AAPL')) AS apple_volume, - formatReadableQuantity(sumIf(volume, symbol = 'MSFT')) AS microsoft_volume, - formatReadableQuantity(sumIf(volume, symbol = 'GOOG')) AS google_volume, - sum(volume) AS total_volume, - round(sumIf(volume, symbol IN ('AAPL', 'MSFT', 'GOOG')) / sum(volume) * 100, 2) AS major_tech_percentage -FROM stock.stock -WHERE date BETWEEN '2006-01-01' AND '2006-12-31' -GROUP BY month -ORDER BY month; -``` - -```markdown title="レスポンス" - ┌──────month─┬─apple_volume───┬─microsoft_volume─┬─google_volume──┬─total_volume─┬─major_tech_percentage─┐ - 1. │ 2006-01-01 │ 782.21 million │ 1.39 billion │ 299.69 million │ 84343937700 │ 2.93 │ - 2. │ 2006-02-01 │ 670.38 million │ 1.05 billion │ 297.65 million │ 73524748600 │ 2.74 │ - 3. │ 2006-03-01 │ 744.85 million │ 1.39 billion │ 288.36 million │ 87960830800 │ 2.75 │ - 4. │ 2006-04-01 │ 718.97 million │ 1.45 billion │ 185.65 million │ 78031719800 │ 3.02 │ - 5. │ 2006-05-01 │ 557.89 million │ 2.32 billion │ 174.94 million │ 97096584100 │ 3.14 │ - 6. │ 2006-06-01 │ 641.48 million │ 1.98 billion │ 142.55 million │ 96304086800 │ 2.87 │ - 7. │ 2006-07-01 │ 624.93 million │ 1.33 billion │ 127.74 million │ 79940921800 │ 2.61 │ - 8. │ 2006-08-01 │ 639.35 million │ 1.13 billion │ 107.16 million │ 84251753200 │ 2.23 │ - 9. │ 2006-09-01 │ 633.45 million │ 1.10 billion │ 121.72 million │ 82775234300 │ 2.24 │ -10. │ 2006-10-01 │ 514.82 million │ 1.29 billion │ 158.90 million │ 93406712600 │ 2.1 │ -11. │ 2006-11-01 │ 494.37 million │ 1.24 billion │ 118.49 million │ 90177365500 │ 2.06 │ -12. │ 2006-12-01 │ 603.95 million │ 1.14 billion │ 91.77 million │ 80499584100 │ 2.28 │ - └────────────┴────────────────┴──────────────────┴────────────────┴──────────────┴───────────────────────┘ -``` - -## 参照 {#see-also} -- [`sum`](/sql-reference/aggregate-functions/reference/sum) -- [`Ifコンビネータ`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md.hash deleted file mode 100644 index be094b7d3ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -0463fb6572766d08 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md deleted file mode 100644 index a78391de627..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/sumMap' -title: 'sumMap' -description: 'sumMap combinator の使用例' -keywords: -- 'sum' -- 'map' -- 'combinator' -- 'examples' -- 'sumMap' -sidebar_label: 'sumMap' ---- - - - - -# sumMap {#summap} - -## Description {#description} - -[`Map`](/sql-reference/aggregate-functions/combinators#-map) コンビネータは、`sum`(/sql-reference/aggregate-functions/reference/sum) 関数に適用して、各キーに従った Map の値の合計を計算するために、`sumMap` 集約コンビネータ関数を使用できます。 - -## Example Usage {#example-usage} - -この例では、異なるタイムスロット用のステータスコードとそのカウントを格納するテーブルを作成します。各行にはステータスコードと対応するカウントの Map が含まれています。`sumMap` を使用して、各タイムスロット内の各ステータスコードの合計カウントを計算します。 - -```sql title="Query" -CREATE TABLE metrics( - date Date, - timeslot DateTime, - status Map(String, UInt64) -) ENGINE = Log; - -INSERT INTO metrics VALUES - ('2000-01-01', '2000-01-01 00:00:00', (['a', 'b', 'c'], [15, 25, 35])), - ('2000-01-01', '2000-01-01 00:00:00', (['c', 'd', 'e'], [45, 55, 65])), - ('2000-01-01', '2000-01-01 00:01:00', (['d', 'e', 'f'], [75, 85, 95])), - ('2000-01-01', '2000-01-01 00:01:00', (['f', 'g', 'g'], [105, 115, 125])); - -SELECT - timeslot, - sumMap(status), -FROM metrics -GROUP BY timeslot; -``` - -`sumMap` 関数は、各タイムスロット内の各ステータスコードの合計カウントを計算します。例えば: -- タイムスロット '2000-01-01 00:00:00': - - ステータス 'a': 15 - - ステータス 'b': 25 - - ステータス 'c': 35 + 45 = 80 - - ステータス 'd': 55 - - ステータス 'e': 65 -- タイムスロット '2000-01-01 00:01:00': - - ステータス 'd': 75 - - ステータス 'e': 85 - - ステータス 'f': 95 + 105 = 200 - - ステータス 'g': 115 + 125 = 240 - -```response title="Response" - ┌────────────timeslot─┬─sumMap(status)───────────────────────┐ -1. │ 2000-01-01 00:01:00 │ {'d':75,'e':85,'f':200,'g':240} │ -2. │ 2000-01-01 00:00:00 │ {'a':15,'b':25,'c':80,'d':55,'e':65} │ - └─────────────────────┴──────────────────────────────────────┘ -``` - -## See also {#see-also} -- [`sum`](/sql-reference/aggregate-functions/reference/sum) -- [`Map combinator`](/sql-reference/aggregate-functions/combinators#-map) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md.hash deleted file mode 100644 index 047baa87fab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumMap.md.hash +++ /dev/null @@ -1 +0,0 @@ -002bae85cbe2d7c5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md deleted file mode 100644 index d14c12f77e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/sumSimpleState' -title: 'sumSimpleState' -description: 'sumSimpleStateコンビネータの使用例' -keywords: -- 'sum' -- 'state' -- 'simple' -- 'combinator' -- 'examples' -- 'sumSimpleState' -sidebar_label: 'sumSimpleState' ---- - - - - - -# sumSimpleState {#sumsimplestate} - -## 説明 {#description} - -[`SimpleState`](/sql-reference/aggregate-functions/combinators#-simplestate) 組み合わせ子は、[`sum`](/sql-reference/aggregate-functions/reference/sum) 関数に適用され、すべての入力値の合計を返します。結果は[`SimpleAggregateFunction`](/sql-reference/data-types/simpleaggregatefunction)型で返されます。 - -## 使用例 {#example-usage} - -### 投票の追跡 {#tracking-post-votes} - -投稿に対する投票を追跡するテーブルを使用した実用的な例を見てみましょう。各投稿について、アップボート(賛成票)、ダウンボート(反対票)、および全体のスコアの累計を維持したいと考えています。合計を計算するために`SimpleAggregateFunction`型を使用することは、集計の全体の状態を保持する必要がないため、このユースケースに適しています。その結果、より迅速に処理でき、部分的な集約状態のマージを必要としません。 - -まず、原データ用のテーブルを作成します: - -```sql title="Query" -CREATE TABLE raw_votes -( - post_id UInt32, - vote_type Enum8('upvote' = 1, 'downvote' = -1) -) -ENGINE = MergeTree() -ORDER BY post_id; -``` - -次に、集計データを保存するターゲットテーブルを作成します: - -```sql -CREATE TABLE vote_aggregates -( - post_id UInt32, - upvotes SimpleAggregateFunction(sum, UInt64), - downvotes SimpleAggregateFunction(sum, UInt64), - score SimpleAggregateFunction(sum, Int64) -) -ENGINE = AggregatingMergeTree() -ORDER BY post_id; -``` - -次に、`SimpleAggregateFunction`型のカラムを持つMaterialized Viewを作成します: - -```sql -CREATE MATERIALIZED VIEW mv_vote_processor TO vote_aggregates -AS -SELECT - post_id, - -- 合計状態の初期値(アップボートの場合は1、そうでなければ0) - toUInt64(vote_type = 'upvote') AS upvotes, - -- 合計状態の初期値(ダウンボートの場合は1、そうでなければ0) - toUInt64(vote_type = 'downvote') AS downvotes, - -- 合計状態の初期値(アップボートの場合は1、ダウンボートの場合は-1) - toInt64(vote_type) AS score -FROM raw_votes; -``` - -サンプルデータを挿入します: - -```sql -INSERT INTO raw_votes VALUES - (1, 'upvote'), - (1, 'upvote'), - (1, 'downvote'), - (2, 'upvote'), - (2, 'downvote'), - (3, 'downvote'); -``` - -`SimpleState` 組み合わせ子を使用して Materialized View にクエリを実行します: - -```sql -SELECT - post_id, - sum(upvotes) AS total_upvotes, - sum(downvotes) AS total_downvotes, - sum(score) AS total_score -FROM vote_aggregates -- ターゲットテーブルにクエリ -GROUP BY post_id -ORDER BY post_id ASC; -``` - -```response -┌─post_id─┬─total_upvotes─┬─total_downvotes─┬─total_score─┐ -│ 1 │ 2 │ 1 │ 1 │ -│ 2 │ 1 │ 1 │ 0 │ -│ 3 │ 0 │ 1 │ -1 │ -└─────────┴───────────────┴─────────────────┴─────────────┘ -``` - -## 関連情報 {#see-also} -- [`sum`](/sql-reference/aggregate-functions/reference/sum) -- [`SimpleState combinator`](/sql-reference/aggregate-functions/combinators#-simplestate) -- [`SimpleAggregateFunction type`](/sql-reference/data-types/simpleaggregatefunction) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md.hash deleted file mode 100644 index 378ed8180d5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/sumSimpleState.md.hash +++ /dev/null @@ -1 +0,0 @@ -19e781070892ef2f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md deleted file mode 100644 index a838636d306..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/uniqArray' -title: 'uniqArray' -description: 'uniqArray combinator の使用例' -keywords: -- 'uniq' -- 'array' -- 'combinator' -- 'examples' -- 'uniqArray' -sidebar_label: 'uniqArray' ---- - - - - -# uniqArray {#uniqarray} - -## Description {#description} - -[`Array`](/sql-reference/aggregate-functions/combinators#-array) コンビネーターを -[`uniq`](/sql-reference/aggregate-functions/reference/uniq) 関数に適用することで、 -`uniqArray` 集約コンビネーター関数を使用して、すべての配列にわたるユニークな要素の近似数を計算できます。 - -`uniqArray` 関数は、データセット内の複数の配列にまたがるユニークな要素をカウントする必要があるときに役立ちます。これは `uniq(arrayJoin())` を使用することと同等であり、`arrayJoin` は最初に配列をフラット化し、その後 `uniq` がユニークな要素をカウントします。 - -## Example Usage {#example-usage} - -この例では、異なるカテゴリーにおけるユーザーの興味に関するサンプルデータセットを使用して、`uniqArray` の動作を示します。ユニークな要素のカウントの違いを示すために、`uniq(arrayJoin())` と比較します。 - -```sql title="Query" -CREATE TABLE user_interests -( - user_id UInt32, - interests Array(String) -) ENGINE = Memory; - -INSERT INTO user_interests VALUES - (1, ['reading', 'gaming', 'music']), - (2, ['gaming', 'sports', 'music']), - (3, ['reading', 'cooking']); - -SELECT - uniqArray(interests) as unique_interests_total, - uniq(arrayJoin(interests)) as unique_interests_arrayJoin -FROM user_interests; -``` - -`uniqArray` 関数は、すべての配列を合わせたユニークな要素をカウントします。これは `uniq(arrayJoin())` と似ています。この例では: -- `uniqArray` は 5 を返します。これはすべてのユーザーにわたるユニークな興味が 5 つあるためです: 'reading', 'gaming', 'music', 'sports', 'cooking' -- `uniq(arrayJoin())` も 5 を返し、両方の関数がすべての配列にわたるユニークな要素をカウントしていることを示しています。 - -```response title="Response" - ┌─unique_interests_total─┬─unique_interests_arrayJoin─┐ -1. │ 5 │ 5 │ - └────────────────────────┴────────────────────────────┘ -``` - -## See also {#see-also} -- [`uniq`](/sql-reference/aggregate-functions/reference/uniq) -- [`arrayJoin`](/sql-reference/functions/array-join) -- [`Array combinator`](/sql-reference/aggregate-functions/combinators#-array) -- [`uniqCombined`](/sql-reference/aggregate-functions/reference/uniqcombined) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md.hash deleted file mode 100644 index f494423842b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArray.md.hash +++ /dev/null @@ -1 +0,0 @@ -9d8c80ca6b4f14c2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md deleted file mode 100644 index 42532bc4dbd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -slug: '/examples/aggregate-function-combinators/uniqArrayIf' -title: 'uniqArrayIf' -description: 'uniqArrayIfコンビネータの使用例' -keywords: -- 'uniq' -- 'array' -- 'if' -- 'combinator' -- 'examples' -- 'uniqArrayIf' -sidebar_label: 'uniqArrayIf' ---- - - - - -# uniqArrayIf {#uniqarrayif} - -## 説明 {#description} - -[`Array`](/sql-reference/aggregate-functions/combinators#-array) および [`If`](/sql-reference/aggregate-functions/combinators#-if) コンビネータは、`uniq` 関数に適用して、条件が真である行の配列のユニークな値の数をカウントするために、`uniqArrayIf` 集約コンビネータ関数を使用できます。 - -:::note -- `If` と `Array` は組み合わせることができます。ただし、`Array` が先に来て、その後に `If` が続かなければなりません。 -::: - -これは、`arrayJoin` を使用せずに特定の条件に基づいて配列内のユニークな要素をカウントしたい場合に便利です。 - -## 使用例 {#example-usage} - -### セグメントタイプおよびエンゲージメントレベルによるユニーク商品のカウント {#count-unique-products} - -この例では、ユーザーのショッピングセッションデータを含むテーブルを使用して、特定のユーセグメントのユーザーによって表示されたユニーク商品の数を、セッション内でのエンゲージメント指標を用いてカウントします。 - -```sql title="クエリ" -CREATE TABLE user_shopping_sessions -( - session_date Date, - user_segment String, - viewed_products Array(String), - session_duration_minutes Int32 -) ENGINE = Memory; - -INSERT INTO user_shopping_sessions VALUES - ('2024-01-01', 'new_customer', ['smartphone_x', 'headphones_y', 'smartphone_x'], 12), - ('2024-01-01', 'returning', ['laptop_z', 'smartphone_x', 'tablet_a'], 25), - ('2024-01-01', 'new_customer', ['smartwatch_b', 'headphones_y', 'fitness_tracker'], 8), - ('2024-01-02', 'returning', ['laptop_z', 'external_drive', 'laptop_z'], 30), - ('2024-01-02', 'new_customer', ['tablet_a', 'keyboard_c', 'tablet_a'], 15), - ('2024-01-02', 'premium', ['smartphone_x', 'smartwatch_b', 'headphones_y'], 22); - --- セグメントタイプおよびエンゲージメントレベルによるユニーク商品のカウント -SELECT - session_date, - -- 新規顧客による長いセッションで表示されたユニーク商品のカウント - uniqArrayIf(viewed_products, user_segment = 'new_customer' AND session_duration_minutes > 10) AS new_customer_engaged_products, - -- リピーターによる表示されたユニーク商品のカウント - uniqArrayIf(viewed_products, user_segment = 'returning') AS returning_customer_products, - -- すべてのセッションで表示されたユニーク商品のカウント - uniqArray(viewed_products) AS total_unique_products -FROM user_shopping_sessions -GROUP BY session_date -ORDER BY session_date -FORMAT Vertical; -``` - -```response title="レスポンス" -Row 1: -────── -session_date: 2024-01-01 -new_customer⋯ed_products: 2 -returning_customer_products: 3 -total_unique_products: 6 - -Row 2: -────── -session_date: 2024-01-02 -new_customer⋯ed_products: 2 -returning_customer_products: 2 -total_unique_products: 7 -``` - -## 参考 {#see-also} -- [`uniq`](/sql-reference/aggregate-functions/reference/uniq) -- [`Array combinator`](/sql-reference/aggregate-functions/combinators#-array) -- [`If combinator`](/sql-reference/aggregate-functions/combinators#-if) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md.hash deleted file mode 100644 index 16044049064..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/examples/aggregate_function_combinators/uniqArrayIf.md.hash +++ /dev/null @@ -1 +0,0 @@ -49cbd2e146adc04f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md deleted file mode 100644 index 3734b6928fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: 'ClickHouseデータの挿入' -description: 'ClickHouseにデータを挿入する方法' -keywords: -- 'insert' -- 'insert data' -- 'insert into table' -sidebar_label: 'ClickHouseデータの挿入' -slug: '/guides/inserting-data' ---- - -import postgres_inserts from '@site/static/images/guides/postgres-inserts.png'; -import Image from '@theme/IdealImage'; - -## 基本的な例 {#basic-example} - -ClickHouseでは、馴染みのある `INSERT INTO TABLE` コマンドを使用できます。最初のガイドで作成したテーブルにデータを挿入してみましょう ["ClickHouseでのテーブル作成"](./creating-tables)。 - -```sql -INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES - (101, 'Hello, ClickHouse!', now(), -1.0 ), - (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), - (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), - (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ) -``` - -これが成功したか確認するために、次の `SELECT` クエリを実行します。 - -```sql -SELECT * FROM helloworld.my_first_table -``` - -これにより、次の結果が返されます。 - -```response -user_id message timestamp metric -101 Hello, ClickHouse! 2024-11-13 20:01:22 -1 -101 Granules are the smallest chunks of data read 2024-11-13 20:01:27 3.14159 -102 Insert a lot of rows per batch 2024-11-12 00:00:00 1.41421 -102 Sort your data based on your commonly-used queries 2024-11-13 00:00:00 2.718 -``` - -## ClickHouseにデータを挿入することとOLTPデータベース {#inserting-into-clickhouse-vs-oltp-databases} - -ClickHouseはOLAP(オンライン分析処理)データベースとして、高いパフォーマンスとスケーラビリティに最適化されており、秒間に数百万行を挿入できる可能性があります。 -これは、高度に並列化されたアーキテクチャと効率的な列指向圧縮の組み合わせによって実現されていますが、即時の整合性には妥協があります。 -具体的には、ClickHouseは追加のみの操作に最適化されており、最終的な整合性のみの保証を提供します。 - -対照的に、PostgresなどのOLTPデータベースは、完全なACID準拠でトランザクション挿入に特化して最適化されており、強い整合性と信頼性の保証を確保しています。 -PostgreSQLはMVCC(マルチバージョン同時実行制御)を使用して同時トランザクションを処理し、データの複数のバージョンを維持します。 -これらのトランザクションは、通常少数の行を対象とし、信頼性の保証により挿入パフォーマンスにかなりのオーバーヘッドが発生します。 - -高い挿入パフォーマンスを維持しつつ強い整合性の保証を確保するために、ユーザーはClickHouseにデータを挿入する際に以下の単純なルールを守るべきです。 -これらのルールに従うことで、ユーザーがClickHouseを初めて使用する際に一般的に直面する問題を避けることができ、OLTPデータベースに対して機能する挿入戦略を模倣することができます。 - -## 挿入のベストプラクティス {#best-practices-for-inserts} - -### 大規模なバッチサイズで挿入する {#insert-in-large-batch-sizes} - -デフォルトでは、ClickHouseに送信された各挿入により、ClickHouseはすぐに挿入からのデータを含むストレージの一部を作成し、保存する必要のある他のメタデータも含まれます。 -したがって、より多くのデータを含む少量の挿入を送信することに比べ、より少ないデータを含む大量の挿入を送信することは、必要な書き込みの数を減少させます。 -一般的には、一度に少なくとも1,000行のかなり大きなバッチでデータを挿入することをお勧めします。理想的には10,000行から100,000行の間です。 -(さらなる詳細は [ここ](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse#data-needs-to-be-batched-for-optimal-performance))。 - -大規模なバッチが不可能な場合は、以下の非同期挿入を使用してください。 - -### 再試行を無駄なくするために一貫したバッチを確保する {#ensure-consistent-batches-for-idempotent-retries} - -デフォルトでは、ClickHouseへの挿入は同期的であり、冪等性があります(同じ挿入操作を複数回実行しても、一度だけ実行した場合と同じ効果があります)。 -MergeTreeエンジンファミリーのテーブルの場合、ClickHouseはデフォルトで挿入の[重複排除](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse#5-deduplication-at-insert-time)を自動的に行います。 - -これは、次のような場合に挿入が頑強であることを意味します: - -- 1. データを受信するノードに問題がある場合、挿入クエリはタイムアウトまたはより具体的なエラーを返し、確認応答が得られません。 -- 2. データがノードに書き込まれたが、ネットワークの中断により確認応答を送信者に返せない場合、送信者はタイムアウトまたはネットワークエラーを受け取ります。 - -クライアントの視点では、(i) と (ii) は区別が難しいかもしれません。しかし、どちらの場合でも、確認応答を受け取っていない挿入はすぐに再試行できます。 -再試行した挿入クエリが、元の(確認応答を受け取っていない)挿入と同じデータを同じ順序で含んでいる限り、ClickHouseは自動的に再試行した挿入を無視します。 - -### MergeTreeテーブルまたは分散テーブルに挿入する {#insert-to-a-mergetree-table-or-a-distributed-table} - -MergeTree(またはレプリカテーブル)に直接挿入し、データがシャードされている場合はノードのセット間でリクエストのバランスを取り、`internal_replication=true`を設定することをお勧めします。 -これにより、ClickHouseはデータを利用可能なレプリカシャードにレプリケートし、データが最終的に整合性を持つことが保証されます。 - -クライアント側の負荷分散が不便な場合、ユーザーは[分散テーブル](/engines/table-engines/special/distributed)を介して挿入することができ、これによりノード間で書き込みが分散されます。再度、`internal_replication=true`を設定することをお勧めします。 -ただし、このアプローチは、分散テーブルを持っているノードにローカルに書き込みを行い、その後シャードに送信する必要があるため、パフォーマンスがやや低下することに注意が必要です。 - -### 小さなバッチの非同期挿入を使用する {#use-asynchronous-inserts-for-small-batches} - -クライアント側のバッチ処理が非現実的なシナリオがあります。例として、ログ、メトリックス、トレースなどを送信する100台または1000台の単一目的エージェントを持つ可観測性のユースケースがあります。 -このシナリオでは、データのリアルタイム輸送が重要であり、迅速に問題や異常を検出するために重要です。 -さらに、観測システムでのイベントスパイクのリスクがあり、これが原因でクライアント側で可観測データをバッファリングしようとした場合に大きなメモリスパイクや関連する問題を引き起こす可能性があります。 -大規模なバッチを挿入できない場合、ユーザーは[非同期挿入](/best-practices/selecting-an-insert-strategy#asynchronous-inserts)を使用してClickHouseにバッチ処理を委任できます。 - -非同期挿入では、データは最初にバッファに挿入され、その後、以下の3つのステップでデータベースストレージに書き込まれます。 - - - -非同期挿入が有効になっている場合、ClickHouseは: - -(1) 非同期的に挿入クエリを受け取ります。 -(2) クエリのデータを最初にメモリ内のバッファに書き込みます。 -(3) 次のバッファフラッシュが行われるときに、データをデータベースストレージの一部としてソートして書き込みます。 - -バッファがフラッシュされる前に、同じクライアントまたは他のクライアントからの他の非同期挿入クエリのデータがバッファに収集される可能性があります。 -バッファフラッシュから作成された部分には、複数の非同期挿入クエリのデータが含まれる可能性があります。 -一般的に、これらのメカニズムはデータのバッチ処理をクライアント側からサーバー側(ClickHouseインスタンス)に移行します。 - -:::note -データは、データベースストレージにフラッシュされる前はクエリによって検索できないことに注意してください。また、バッファフラッシュは構成可能です。 - -非同期挿入を構成するための詳細は[こちら](/optimize/asynchronous-inserts#enabling-asynchronous-inserts)で、深く掘り下げた情報は[こちら](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse)をご覧ください。 -::: - -### 公式のClickHouseクライアントを使用する {#use-official-clickhouse-clients} - -ClickHouseには、最も人気のあるプログラミング言語でのクライアントが用意されています。 -これらは、挿入が正しく行われることを保証し、例えば[Goクライアント](/integrations/go#async-insert)のように直接、またはクエリ、ユーザー、接続レベルの設定で有効にした場合には非同期挿入をネイティブにサポートするように最適化されています。 - -使用可能なClickHouseクライアントおよびドライバの完全なリストは、[クライアントとドライバ](/interfaces/cli)を参照してください。 - -### ネイティブ形式を優先する {#prefer-the-native-format} - -ClickHouseは、多くの[入力形式](/interfaces/formats)を挿入(およびクエリ)時にサポートしています。 -これはOLTPデータベースとの大きな違いであり、外部ソースからのデータの読み込みを大幅に容易にします。特に、[テーブル関数](/sql-reference/table-functions)やディスク上のファイルからのデータ読み込み機能と組み合わせると便利です。 -これらの形式は、アドホックデータロードやデータエンジニアリングタスクに理想的です。 - -最適な挿入パフォーマンスを実現したいアプリケーションでは、[ネイティブ](/interfaces/formats/Native)形式を使用して挿入することをお勧めします。 -これはほとんどのクライアント(GoやPythonなど)でサポートされており、列指向のフォーマットであるため、サーバーが最小限の作業を行えば済みます。 -これにより、データを列指向フォーマットに変換する責任がクライアント側に置かれます。これは、効率的に挿入をスケールさせるために重要です。 - -また、行形式が好ましい場合は、[RowBinary形式](/interfaces/formats/RowBinary)(Javaクライアントで使用)を使用することもできます。これは、ネイティブ形式よりも書き込みやすいことが一般的です。 -これは[JSON](/interfaces/formats/JSON)のような他の行形式に比べて、圧縮、ネットワークオーバーヘッド、サーバー上の処理の面でより効率的です。 -[JSONEachRow](/interfaces/formats/JSONEachRow)形式は、速やかに統合したいが書き込みスループットが低いユーザー向けに考慮されることがあります。この形式は、ClickHouse内での解析にCPUオーバーヘッドが発生することに注意してください。 - -### HTTPインターフェースを使用する {#use-the-http-interface} - -多くの従来のデータベースとは異なり、ClickHouseはHTTPインターフェースをサポートしています。 -ユーザーは、上記のいずれかの形式を使ってデータを挿入およびクエリするためにこれを使用できます。 -これは、トラフィックをロードバランサーで簡単に切り替えることができるため、ClickHouseのネイティブプロトコルよりも好まれることがよくあります。 -ネイティブプロトコルでは小さな違いが挿入パフォーマンスにあると予想され、オーバーヘッドが少し低くなります。 -既存のクライアントは、これらのプロトコルのいずれか(場合によっては両方、例えばGoクライアント)を使用しています。 -ネイティブプロトコルでは、クエリの進捗を簡単に追跡できます。 - -詳細については[HTTPインターフェース](/interfaces/http)を参照してください。 - -## Postgresからデータをロードする {#loading-data-from-postgres} - -Postgresからデータをロードするために、ユーザーは次のものを使用できます: - -- `PeerDB by ClickHouse`、PostgreSQLデータベースのレプリケーションのために特別に設計されたETLツール。これは次の両方で利用可能です: - - ClickHouse Cloud - 当社の[新しいコネクタ](/integrations/clickpipes/postgres)(プライベートプレビュー)を通じたClickPipes、マネージドインジェストサービスの中で利用可能。興味のあるユーザーは[こちらからサインアップ](https://clickpipes.peerdb.io/)できます。 - - セルフマネージド - [オープンソースプロジェクト](https://github.com/PeerDB-io/peerdb)を通じて利用可能。 -- 以前の例で示されたように、データを直接読み込むための[PostgreSQLテーブルエンジン](/integrations/postgresql#using-the-postgresql-table-engine)。既知のウォーターマーク(例:タイムスタンプ)に基づいたバッチレプリケーションが十分な場合や、単発の移行が適している場合があります。このアプローチは1,000万行のスケールに対応できます。より大きなデータセットの移行を考えているユーザーは、各データのチャンクを扱う複数のリクエストを考慮する必要があります。各チャンクのパーティションを移動する前にステージングテーブルを使用できます。これにより、失敗したリクエストを再試行できます。このバルクローディング戦略に関する詳細は、こちらをご覧ください。 -- データはCSV形式でPostgreSQLからエクスポートできます。これをClickHouseに挿入することができ、ローカルファイルまたはオブジェクトストレージを介して、テーブル関数を使用して行います。 - -:::note 大きなデータセットの挿入に関するヘルプが必要ですか? -大きなデータセットの挿入や、ClickHouse Cloudへのデータインポート中にエラーが発生した場合は、support@clickhouse.comまでご連絡いただければ、サポートいたします。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md.hash deleted file mode 100644 index 76570d40e82..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/inserting-data.md.hash +++ /dev/null @@ -1 +0,0 @@ -cc599617925f3860 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md deleted file mode 100644 index dc9bade0d64..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: 'ClickHouse における JOIN の使用方法' -description: 'ClickHouse でのテーブルの結合方法' -keywords: -- 'joins' -- 'join tables' -slug: '/guides/joining-tables' ---- - -import Image from '@theme/IdealImage'; -import joins_1 from '@site/static/images/guides/joins-1.png'; -import joins_2 from '@site/static/images/guides/joins-2.png'; -import joins_3 from '@site/static/images/guides/joins-3.png'; -import joins_4 from '@site/static/images/guides/joins-4.png'; -import joins_5 from '@site/static/images/guides/joins-5.png'; - -ClickHouseは[完全な `JOIN` サポート](https://clickhouse.com/blog/clickhouse-fully-supports-joins-part1)を提供しており、さまざまな結合アルゴリズムを選択できます。パフォーマンスを最大化するためには、このガイドに記載されている結合最適化の提案に従うことをお勧めします。 - -- 最適なパフォーマンスを得るために、ユーザーはクエリ内の `JOIN` の数を減らすことを目指すべきです。特にミリ秒単位のパフォーマンスが要求されるリアルタイム分析ワークロードでは、クエリ内の `JOIN` の最大数は3から4に抑えることを目指してください。[データモデリングセクション](/data-modeling/schema-design)では、非正規化、辞書、およびマテリアライズドビューを含む、JOINを最小限に抑えるための多くの変更を詳述しています。 -- 現在、ClickHouseはJOINの順序を変更しません。常に最小のテーブルを結合の右側に配置してください。これにより、大部分の結合アルゴリズムでメモリに保持され、クエリのメモリオーバーヘッドが最小限に抑えられます。 -- あなたのクエリが直接結合を必要とする場合、すなわち `LEFT ANY JOIN` のように、できる限り[辞書](/dictionary)を使用することをお勧めします。 - - - -- 内部結合を行う場合、これを `IN` 句を使用したサブクエリとして記述する方がしばしば最適です。以下のクエリは機能的に等価であり、どちらも問題の中でClickHouseに言及しない `posts` の数を見つけますが、`comments` では言及があります。 - -```sql -SELECT count() -FROM stackoverflow.posts AS p -ANY INNER `JOIN` stackoverflow.comments AS c ON p.Id = c.PostId -WHERE (p.Title != '') AND (p.Title NOT ILIKE '%clickhouse%') AND (p.Body NOT ILIKE '%clickhouse%') AND (c.Text ILIKE '%clickhouse%') - -┌─count()─┐ -│ 86 │ -└─────────┘ - -1 row in set. Elapsed: 8.209 sec. Processed 150.20 million rows, 56.05 GB (18.30 million rows/s., 6.83 GB/s.) -Peak memory usage: 1.23 GiB. -``` - -ここでは、直積を避けるために `ANY INNER JOIN` を使用している点に注意してください。各投稿に対して1つのマッチのみを取得したいからです。 - -この結合はサブクエリを使用して書き換えることができ、パフォーマンスが大幅に向上します: - -```sql -SELECT count() -FROM stackoverflow.posts -WHERE (Title != '') AND (Title NOT ILIKE '%clickhouse%') AND (Body NOT ILIKE '%clickhouse%') AND (Id IN ( - SELECT PostId - FROM stackoverflow.comments - WHERE Text ILIKE '%clickhouse%' -)) -┌─count()─┐ -│ 86 │ -└─────────┘ - -1 row in set. Elapsed: 2.284 sec. Processed 150.20 million rows, 16.61 GB (65.76 million rows/s., 7.27 GB/s.) -Peak memory usage: 323.52 MiB. -``` - -ClickHouseはすべてのJOIN句とサブクエリに条件をプッシュダウンする試みを行いますが、ユーザーは可能な限りすべてのサブ句に手動で条件を適用することをお勧めします。これにより、`JOIN`するデータのサイズが最小限に抑えられます。以下の例を考慮してください。2020年以降のJava関連の投稿に対するアップボート数を計算したいとします。 - -Naiveなクエリは、左側に大きなテーブルを配置すると、56秒で完了します: - -```sql -SELECT countIf(VoteTypeId = 2) AS upvotes -FROM stackoverflow.posts AS p -INNER JOIN stackoverflow.votes AS v ON p.Id = v.PostId -WHERE has(arrayFilter(t -> (t != ''), splitByChar('|', p.Tags)), 'java') AND (p.CreationDate >= '2020-01-01') - -┌─upvotes─┐ -│ 261915 │ -└─────────┘ - -1 row in set. Elapsed: 56.642 sec. Processed 252.30 million rows, 1.62 GB (4.45 million rows/s., 28.60 MB/s.) -``` - -このJOINを再配置すると、パフォーマンスが劇的に1.5秒に改善されます: - -```sql -SELECT countIf(VoteTypeId = 2) AS upvotes -FROM stackoverflow.votes AS v -INNER JOIN stackoverflow.posts AS p ON v.PostId = p.Id -WHERE has(arrayFilter(t -> (t != ''), splitByChar('|', p.Tags)), 'java') AND (p.CreationDate >= '2020-01-01') - -┌─upvotes─┐ -│ 261915 │ -└─────────┘ - -1 row in set. Elapsed: 1.519 sec. Processed 252.30 million rows, 1.62 GB (166.06 million rows/s., 1.07 GB/s.) -``` - -右側のテーブルにフィルターを追加すると、さらにパフォーマンスが0.5秒に改善されます。 - -```sql -SELECT countIf(VoteTypeId = 2) AS upvotes -FROM stackoverflow.votes AS v -INNER JOIN stackoverflow.posts AS p ON v.PostId = p.Id -WHERE has(arrayFilter(t -> (t != ''), splitByChar('|', p.Tags)), 'java') AND (p.CreationDate >= '2020-01-01') AND (v.CreationDate >= '2020-01-01') - -┌─upvotes─┐ -│ 261915 │ -└─────────┘ - -1 row in set. Elapsed: 0.597 sec. Processed 81.14 million rows, 1.31 GB (135.82 million rows/s., 2.19 GB/s.) -Peak memory usage: 249.42 MiB. -``` - -このクエリは、前述のように `INNER JOIN` をサブクエリに移動することで、さらに改善できます。外側と内側のクエリの両方にフィルターを維持しています。 - -```sql -SELECT count() AS upvotes -FROM stackoverflow.votes -WHERE (VoteTypeId = 2) AND (PostId IN ( - SELECT Id - FROM stackoverflow.posts - WHERE (CreationDate >= '2020-01-01') AND has(arrayFilter(t -> (t != ''), splitByChar('|', Tags)), 'java') -)) - -┌─upvotes─┐ -│ 261915 │ -└─────────┘ - -1 row in set. Elapsed: 0.383 sec. Processed 99.64 million rows, 804.55 MB (259.85 million rows/s., 2.10 GB/s.) -Peak memory usage: 250.66 MiB. -``` - -## 結合アルゴリズムの選択 {#choosing-a-join-algorithm} - -ClickHouseは複数の[結合アルゴリズム](https://clickhouse.com/blog/clickhouse-fully-supports-joins-part1)をサポートしています。これらのアルゴリズムは、通常、パフォーマンスとメモリ使用量のトレードオフを行います。以下に、ClickHouseの結合アルゴリズムの概要を示します。これらは相対的なメモリ消費量と実行時間に基づいています。 - -
- - - -
- -これらのアルゴリズムは、結合クエリがどのように計画され、実行されるかを決定します。デフォルトでは、ClickHouseは、使用される結合タイプと接続されているテーブルの厳密さ、およびエンジンに基づいて、直接結合またはハッシュ結合アルゴリズムを使用します。あるいは、ClickHouseを構成して、リソースの使用状況に応じて実行時に使用する結合アルゴリズムを適応的に選択し、動的に変更することもできます。`join_algorithm=auto` の場合、ClickHouseは最初にハッシュ結合アルゴリズムを試み、そのアルゴリズムのメモリ制限が違反された場合は、アルゴリズムはその場で部分的なマージ結合に切り替わります。どのアルゴリズムが選ばれたかをトレースログで確認できます。ClickHouseはまた、ユーザーが `join_algorithm` 設定を介して自分で希望する結合アルゴリズムを指定することも許可しています。 - -各結合アルゴリズムのサポートされている `JOIN` タイプは以下に示されており、最適化前に考慮する必要があります。 - -
- - - -
- -各 `JOIN` アルゴリズムの詳細な説明は[こちら](https://clickhouse.com/blog/clickhouse-fully-supports-joins-hash-joins-part2)で見つけることができ、それぞれの長所、短所、スケーリング特性が含まれています。 - -適切な結合アルゴリズムの選択は、メモリを最適化するかパフォーマンスを最適化するかに依存します。 - -## JOINパフォーマンスの最適化 {#optimizing-join-performance} - -あなたの主要な最適化指標がパフォーマンスであり、できるだけ速く結合を実行したい場合、次の意思決定ツリーを使用して適切な結合アルゴリズムを選択できます。 - -
- - - -
- -- **(1)** 右側のテーブルからのデータをメモリ内の低遅延キーバリューデータ構造(例えば、辞書)に事前にロードでき、かつ結合キーが基礎となるキーバリューストレージのキー属性と一致する場合、さらに `LEFT ANY JOIN` の意味論が適切であれば、**直接結合**が適用可能であり、最も迅速なアプローチを提供します。 - -- **(2)** テーブルの[物理行順序](/guides/best-practices/sparse-primary-indexes#data-is-stored-on-disk-ordered-by-primary-key-columns)が結合キーのソート順に一致する場合、状況によります。この場合、**フルソートマージ結合**はソートフェーズを[スキップ](https://clickhouse.com/blog/clickhouse-fully-supports-joins-full-sort-partial-merge-part3#utilizing-physical-row-order)し、メモリ使用量が大幅に削減され、データサイズと結合キーの値の分布に応じて、一部のハッシュ結合アルゴリズムよりも速い実行時間を実現できます。 - -- **(3)** 右側のテーブルがメモリに収まる場合、たとえ[追加のメモリ使用オーバーヘッド](https://clickhouse.com/blog/clickhouse-fully-supports-joins-hash-joins-part2#summary)が**並列ハッシュ結合**の場合であっても、このアルゴリズムまたはハッシュ結合が速くなります。これはデータのサイズ、データタイプ、結合キーのカラムの値の分布に依存します。 - -- **(4)** 右側のテーブルがメモリに収まらない場合、再び状況によります。ClickHouseはメモリバウンドでない3つの結合アルゴリズムを提供します。すべてのアルゴリズムは、一時的にデータをディスクにスピルします。**フルソートマージ結合**と**部分マージ結合**は、データの事前ソートを必要とします。**グレースハッシュ結合**は代わりにデータからハッシュテーブルを構築します。データの量、データタイプ、および結合キーのカラムの値の分布に応じて、データからハッシュテーブルを構築する方がデータのソートよりも速いシナリオもあります。その逆も然りです。 - -部分マージ結合は、大きなテーブルを結合する際のメモリ使用量を最小限に抑えるよう最適化されていますが、結合速度はかなり遅くなります。これは特に左側のテーブルの物理行順序が結合キーのソート順に一致しない場合に当てはまります。 - -グレースハッシュ結合は、メモリ使用量と結合速度の良好な制御を提供する、メモリバウンドでない3つのアルゴリズムの中で最も柔軟です。[grace_hash_join_initial_buckets](https://github.com/ClickHouse/ClickHouse/blob/23.5/src/Core/Settings.h#L759)設定を使用してチューニング可能です。データ量に応じて、グレースハッシュが部分的マージアルゴリズムよりも速くなる場合もあれば、遅くなる場合もあります。その際、両アルゴリズムのメモリ使用量がほぼ align するようにバケット数を選択する必要があります。特にグレースハッシュ結合のメモリ使用量がフルソートマージとの間でおおよそ align するように構成されている場合、我々のテストではフルソートマージが常に速くなりました。 - -メモリをバウンドしない3つのアルゴリズムの中でどれが最速かは、データ量、データタイプ、および結合キーのカラムの値の分布に依存します。実際のデータ量で実行するベンチマークを行って、どのアルゴリズムが最も速いかを判断するのが常に最良です。 - -## メモリの最適化 {#optimizing-for-memory} - -結合を最も迅速な実行時間ではなく、最低のメモリ使用量に最適化したい場合、この意思決定ツリーを使用できます。 - -
- - - -
- -- **(1)** あなたのテーブルの物理行順序が結合キーのソート順に一致する場合、**フルソートマージ結合**のメモリ使用量は最低限になります。ソートフェーズが[無効](https://clickhouse.com/blog/clickhouse-fully-supports-joins-full-sort-partial-merge-part3#utilizing-physical-row-order)にされているため、良好な結合速度の追加の利点もあります。 -- **(2)** **グレースハッシュ結合**は、[構成](https://github.com/ClickHouse/ClickHouse/blob/23.5/src/Core/Settings.h#L759)することで非常に低いメモリ使用量に調整できますが、その代わり結合速度が遅くなります。**部分マージ結合**は意図的にメインメモリの低い量を使用します。**外部ソートが有効なフルソートマージ結合**は、一般的に部分マージ結合よりも多くのメモリを使用します(行順がキーのソート順に一致しないと仮定した場合)、ただし、結合実行時間は大幅に改善されます。 - -上記の詳細が必要なユーザーには、次の[ブログシリーズ](https://clickhouse.com/blog/clickhouse-fully-supports-joins-part1)をお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md.hash deleted file mode 100644 index ec15cb8661d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/joining-tables.md.hash +++ /dev/null @@ -1 +0,0 @@ -528c61ff7a36b3a0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md deleted file mode 100644 index 320f628334d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: 'Manage and Deploy Overview' -description: 'Overview page for Manage and Deploy' -slug: '/guides/manage-and-deploy-index' ---- - - - - -# 管理とデプロイ - -このセクションには以下のトピックが含まれています: - -| トピック | 説明 | -|-------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| -| [デプロイメントとスケーリング](/deployment-guides/index) | ClickHouse Support and Services組織がClickHouseユーザーに提供するアドバイスに基づいたデプロイメントの作業例。 | -| [ストレージとコンピュートの分離](/guides/separation-storage-compute) | ClickHouseとS3を使用してストレージとコンピュートを分離したアーキテクチャを実装する方法を探るガイド。 | -| [サイズとハードウェアの推奨事項](/guides/sizing-and-hardware-recommendations) | オープンソースユーザー向けのハードウェア、コンピュート、メモリ、およびディスク構成に関する一般的な推奨事項についてのガイド。 | -| [ClickHouse Keeperの設定](/guides/sre/keeper/clickhouse-keeper) | ClickHouse Keeperを構成する方法に関する情報と例。 | -| [ネットワークポート](/guides/sre/network-ports) | ClickHouseで使用されるネットワークポートのリスト。 | -| [シャードの再バランス](/guides/sre/scaling-clusters) | シャードの再バランスに関する推奨事項。 | -| [ClickHouseはマルチリージョンレプリケーションをサポートしていますか?](/faq/operations/multi-region-replication) | マルチリージョンレプリケーションに関するFAQ。 | -| [本番環境で使用するClickHouseのバージョンは?](/faq/operations/production) | 本番環境での使用に関するClickHouseのバージョンについてのFAQ。 | -| [クラスター発見](/operations/cluster-discovery) | ClickHouseのクラスター発見機能に関する情報と例。 | -| [監視](/operations/monitoring) | ClickHouseのハードウェアリソース利用率とサーバーメトリクスを監視する方法に関する情報。 | -| [OpenTelemetryとClickHouseのトレーシング](/operations/opentelemetry) | ClickHouseでOpenTelemetryを使用する方法に関する情報。 | -| [クォータ](/operations/quotas) | ClickHouseのクォータに関する情報と例。 | -| [Zookeeperとの安全な通信](/operations/ssl-zookeeper) | ClickHouseとZookeeper間での安全な通信を設定するためのガイド。 | -| [スタートアップスクリプト](/operations/startup-scripts) | スタートアップ時にスタートアップスクリプトを実行する方法の例で、マイグレーションや自動スキーマ作成に役立ちます。 | -| [データ保存のための外部ディスク](/operations/storing-data) | ClickHouseでの外部ストレージの構成に関する情報と例。 | -| [割当プロファイリング](/operations/allocation-profiling) | jemallocを使った割当サンプリングとプロファイリングに関する情報と例。 | -| [バックアップと復元](/operations/backup) | ローカルディスクや外部ストレージへのバックアップに関するガイド。 | -| [キャッシュ](/operations/caches) | ClickHouseのさまざまなキャッシュタイプに関する解説。 | -| [ワークロードスケジューリング](/operations/workload-scheduling) | ClickHouseにおけるワークロードスケジューリングの解説。 | -| [セルフマネージドアップグレード](/operations/update) | セルフマネージドアップグレードを実施するためのガイドライン。 | -| [トラブルシューティング](/guides/troubleshooting) | 多様なトラブルシューティングのヒント。 | -| [使用推奨事項](/operations/tips) | ClickHouseのハードウェアおよびソフトウェアの使用に関するさまざまな推奨事項。 | -| [分散DDL](/sql-reference/distributed-ddl) | `ON CLUSTER`句の解説。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md.hash deleted file mode 100644 index 1dd168be811..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/manage-and-deploy-index.md.hash +++ /dev/null @@ -1 +0,0 @@ -052e850e6a684abd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md deleted file mode 100644 index 79ecb6234ab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: 'ストレージとコンピューティングの分離' -slug: '/guides/separation-storage-compute' -title: 'Separation of Storage and Compute' -description: 'このガイドでは、ClickHouseとS3を使用して、ストレージとコンピューティングを分離したアーキテクチャを実装する方法について探ります。' ---- - -import Image from '@theme/IdealImage'; -import BucketDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md'; -import s3_bucket_example from '@site/static/images/guides/s3_bucket_example.png'; - - -# ストレージとコンピュートの分離 - -## 概要 {#overview} - -このガイドでは、ClickHouseとS3を使用してストレージとコンピュートを分離したアーキテクチャの実装方法を探ります。 - -ストレージとコンピュートの分離は、計算リソースとストレージリソースが独立して管理されることを意味します。ClickHouseでは、これによりスケーラビリティ、コスト効率、および柔軟性が向上します。必要に応じてストレージとコンピュートリソースを別々にスケールさせることができ、パフォーマンスとコストを最適化できます。 - -S3をバックエンドとして使用するClickHouseは、「コールド」データに対するクエリパフォーマンスがそれほど重要でないユースケースに特に有用です。ClickHouseは、`MergeTree`エンジンに対してS3をストレージとして使用するためのサポートを提供し、`S3BackedMergeTree`を使用します。このテーブルエンジンを使用することで、ユーザーはS3のスケーラビリティとコストメリットを享受しながら、`MergeTree`エンジンの挿入およびクエリパフォーマンスを維持できます。 - -ストレージとコンピュートアーキテクチャを実装および管理することは、標準的なClickHouseデプロイメントと比較してより複雑であることに注意してください。セルフマネージドのClickHouseでは、このガイドで説明したようにストレージとコンピュートの分離が可能ですが、設定なしでこのアーキテクチャでClickHouseを使用できる[ClickHouse Cloud](https://clickhouse.com/cloud)の利用をお勧めします。このサービスでは、[`SharedMergeTree`テーブルエンジン](/cloud/reference/shared-merge-tree)を使用します。 - -*このガイドは、ClickHouseのバージョン22.8以上を使用していることを前提としています。* - -:::warning -AWS/GCSのライフサイクルポリシーを構成しないでください。これはサポートされておらず、テーブルが壊れる可能性があります。 -::: - -## 1. ClickHouseディスクとしてS3を使用する {#1-use-s3-as-a-clickhouse-disk} - -### ディスクの作成 {#creating-a-disk} - -ClickHouseの`config.d`ディレクトリに新しいファイルを作成して、ストレージ構成を保存します: - -```bash -vim /etc/clickhouse-server/config.d/storage_config.xml -``` - -新しく作成したファイルに以下のXMLをコピーし、`BUCKET`、`ACCESS_KEY_ID`、`SECRET_ACCESS_KEY`をデータを保存したいAWSバケットの詳細に置き換えます: - -```xml - - - - - s3 - $BUCKET - $ACCESS_KEY_ID - $SECRET_ACCESS_KEY - /var/lib/clickhouse/disks/s3_disk/ - - - cache - s3_disk - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - - - - - -
- s3_disk -
-
-
-
-
-
-``` - -S3ディスクの設定をさらに指定する必要がある場合、たとえば`region`を指定したりカスタムHTTP`header`を送信したりする場合は、関連する設定のリストを[こちら](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3)で見つけることができます。 - -次のように`access_key_id`と`secret_access_key`を置き換えることもでき、環境変数やAmazon EC2メタデータから認証情報を取得しようとします: - -```bash -true -``` - -構成ファイルを作成した後、ファイルの所有者をclickhouseユーザーとグループに更新する必要があります: - -```bash -chown clickhouse:clickhouse /etc/clickhouse-server/config.d/storage_config.xml -``` - -これで、ClickHouseサーバーを再起動して変更を適用します: - -```bash -service clickhouse-server restart -``` - -## 2. S3によるバックアップテーブルの作成 {#2-create-a-table-backed-by-s3} - -S3ディスクが正しく構成されたかをテストするために、テーブルの作成とクエリを試みることができます。 - -新しいS3ストレージポリシーを指定してテーブルを作成します: - -```sql -CREATE TABLE my_s3_table - ( - `id` UInt64, - `column1` String - ) -ENGINE = MergeTree -ORDER BY id -SETTINGS storage_policy = 's3_main'; -``` - -`S3BackedMergeTree`としてエンジンを指定する必要がなかったことに注意してください。ClickHouseは、テーブルがS3をストレージとして使用していることを検出すると、エンジンタイプを内部的に自動的に変換します。 - -テーブルが正しいポリシーで作成されたことを示します: - -```sql -SHOW CREATE TABLE my_s3_table; -``` - -次の結果が表示されるはずです: - -```response -┌─statement──────────────────────────────────────────────────── -│ CREATE TABLE default.my_s3_table -( - `id` UInt64, - `column1` String -) -ENGINE = MergeTree -ORDER BY id -SETTINGS storage_policy = 's3_main', index_granularity = 8192 -└────────────────────────────────────────────────────────────── -``` - -次に、新しいテーブルにいくつかの行を挿入します: - -```sql -INSERT INTO my_s3_table (id, column1) - VALUES (1, 'abc'), (2, 'xyz'); -``` - -行が挿入されたことを確認しましょう: - -```sql -SELECT * FROM my_s3_table; -``` - -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -│ 2 │ xyz │ -└────┴─────────┘ - -2 rows in set. Elapsed: 0.284 sec. -``` - -AWSコンソールで、データがS3に正常に挿入された場合、ClickHouseが指定したバケットに新しいファイルを作成したことが確認できるはずです。 - -すべてが正常に機能した場合、ClickHouseを使用してストレージとコンピュートを分離した状態になっています! - - - -## 3. フォールトトレランスのためのレプリケーションの実装 (オプション) {#3-implementing-replication-for-fault-tolerance-optional} - -:::warning -AWS/GCSのライフサイクルポリシーを構成しないでください。これはサポートされておらず、テーブルが壊れる可能性があります。 -::: - -フォールトトレランスを実現するために、複数のAWSリージョンに分散された複数のClickHouseサーバーノードを使用し、各ノードにS3バケットを持つことができます。 - -S3ディスクを使用したレプリケーションは、`ReplicatedMergeTree`テーブルエンジンを使用することで実現できます。詳細については、次のガイドを参照してください: -- [S3オブジェクトストレージを使用して2つのAWSリージョンにまたがる単一シャードのレプリケーション](/integrations/s3#s3-multi-region). - -## さらなる情報 {#further-reading} - -- [SharedMergeTreeテーブルエンジン](/cloud/reference/shared-merge-tree) -- [SharedMergeTree発表ブログ](https://clickhouse.com/blog/clickhouse-cloud-boosts-performance-with-sharedmergetree-and-lightweight-updates) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md.hash deleted file mode 100644 index 01a2a1351a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/separation-storage-compute.md.hash +++ /dev/null @@ -1 +0,0 @@ -3988606d7e30b6b1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sizing-and-hardware-recommendations.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sizing-and-hardware-recommendations.md.hash deleted file mode 100644 index e1e7bbacd9f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sizing-and-hardware-recommendations.md.hash +++ /dev/null @@ -1 +0,0 @@ -bec31585492940ce diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/_category_.yml deleted file mode 100644 index 7386a92ca2c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 5 -label: 'SRE Guides' -collapsible: true -collapsed: true -link: - type: generated-index - title: SRE Guides - slug: /guides/sre diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md deleted file mode 100644 index b07ebd56d49..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md +++ /dev/null @@ -1,506 +0,0 @@ ---- -slug: '/guides/sre/configuring-ssl' -sidebar_label: 'SSL-TLSの設定' -sidebar_position: 20 -title: 'SSL-TLSの設定' -description: 'このガイドでは、ClickHouseをOpenSSL証明書を使用して接続を検証するように構成するためのシンプルで最小限の設定を提供しています。' ---- - -import SelfManaged from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md'; -import configuringSsl01 from '@site/static/images/guides/sre/configuring-ssl_01.png'; -import Image from '@theme/IdealImage'; - - -# SSL-TLSの設定 - - - -このガイドでは、ClickHouseを設定してOpenSSL証明書を使用して接続を検証するためのシンプルで最小限の設定を提供します。このデモでは、自己署名の証明書を用いた認証局(CA)証明書とキーを作成し、適切な設定で接続を行います。 - -:::note -TLSの実装は複雑であり、完全に安全で堅牢な展開を確保するために考慮すべき多くのオプションがあります。これは、基本的なSSL/TLS設定の例を含む基本的なチュートリアルです。正しい証明書を生成するためにPKI/セキュリティチームに相談してください。 - -証明書の使用に関する[この基本的なチュートリアル](https://ubuntu.com/server/docs/security-certificates)を確認して、導入の概要を理解してください。 -::: - -## 1. ClickHouseのデプロイメントを作成する {#1-create-a-clickhouse-deployment} - -このガイドは、Ubuntu 20.04を使用し、次のホストにDEBパッケージ(aptを使用)でインストールされたClickHouseを使用して書かれました。ドメインは`marsnet.local`です。 - -|ホスト |IPアドレス| -|--------|-------------| -|`chnode1` |192.168.1.221| -|`chnode2` |192.168.1.222| -|`chnode3` |192.168.1.223| - - -:::note -ClickHouseのインストール方法についての詳細は、[クイックスタート](/getting-started/install/install.mdx)をご覧ください。 -::: - - -## 2. SSL証明書を作成する {#2-create-ssl-certificates} -:::note -自己署名の証明書はデモ目的のみであり、本番環境で使用すべきではありません。証明書リクエストは、組織によって署名され、設定に構成されるCAチェーンを使用して検証されるように作成する必要があります。ただし、これらの手順は設定を構成してテストするために使用でき、その後、本番環境で使用される実際の証明書に置き換えることができます。 -::: - -1. 新しいCA用のキーを生成します: - ```bash - openssl genrsa -out marsnet_ca.key 2048 - ``` - -2. 新しい自己署名CA証明書を生成します。以下は、CAキーを使用して他の証明書に署名するために使用される新しい証明書を作成します: - ```bash - openssl req -x509 -subj "/CN=marsnet.local CA" -nodes -key marsnet_ca.key -days 1095 -out marsnet_ca.crt - ``` - - :::note - キーとCA証明書は、クラスター外の安全な場所にバックアップしてください。ノード証明書を生成した後、キーはクラスターのノードから削除する必要があります。 - ::: - -3. 新しいCA証明書の内容を確認します: - ```bash - openssl x509 -in marsnet_ca.crt -text - ``` - -4. 各ノード用に証明書リクエスト(CSR)を作成し、キーを生成します: - ```bash - openssl req -newkey rsa:2048 -nodes -subj "/CN=chnode1" -addext "subjectAltName = DNS:chnode1.marsnet.local,IP:192.168.1.221" -keyout chnode1.key -out chnode1.csr - openssl req -newkey rsa:2048 -nodes -subj "/CN=chnode2" -addext "subjectAltName = DNS:chnode2.marsnet.local,IP:192.168.1.222" -keyout chnode2.key -out chnode2.csr - openssl req -newkey rsa:2048 -nodes -subj "/CN=chnode3" -addext "subjectAltName = DNS:chnode3.marsnet.local,IP:192.168.1.223" -keyout chnode3.key -out chnode3.csr - ``` - -5. CSRとCAを使用して、新しい証明書とキーのペアを作成します: - ```bash - openssl x509 -req -in chnode1.csr -out chnode1.crt -CA marsnet_ca.crt -CAkey marsnet_ca.key -days 365 -copy_extensions copy - openssl x509 -req -in chnode2.csr -out chnode2.crt -CA marsnet_ca.crt -CAkey marsnet_ca.key -days 365 -copy_extensions copy - openssl x509 -req -in chnode3.csr -out chnode3.crt -CA marsnet_ca.crt -CAkey marsnet_ca.key -days 365 -copy_extensions copy - ``` - -6. 主題と発行者について証明書を確認します: - ```bash - openssl x509 -in chnode1.crt -text -noout - ``` - -7. 新しい証明書がCA証明書と一致することを確認します: - ```bash - openssl verify -CAfile marsnet_ca.crt chnode1.crt - chnode1.crt: OK - ``` - -## 3. 証明書とキーを保存するためのディレクトリを作成して構成する {#3-create-and-configure-a-directory-to-store-certificates-and-keys} - -:::note -これは各ノードで行う必要があります。各ホストに適切な証明書とキーを使用してください。 -::: - -1. 各ノードのClickHouseがアクセスできるディレクトリにフォルダーを作成します。デフォルトの構成ディレクトリ(例:`/etc/clickhouse-server`)を推奨します: - ```bash - mkdir /etc/clickhouse-server/certs - ``` - -2. 各ノードに対応するCA証明書、ノード証明書、キーを新しいcertsディレクトリにコピーします。 - -3. ClickHouseが証明書を読み取れるように所有者と権限を更新します: - ```bash - chown clickhouse:clickhouse -R /etc/clickhouse-server/certs - chmod 600 /etc/clickhouse-server/certs/* - chmod 755 /etc/clickhouse-server/certs - ll /etc/clickhouse-server/certs - ``` - - ```response - total 20 - drw-r--r-- 2 clickhouse clickhouse 4096 Apr 12 20:23 ./ - drwx------ 5 clickhouse clickhouse 4096 Apr 12 20:23 ../ - -rw------- 1 clickhouse clickhouse 997 Apr 12 20:22 chnode1.crt - -rw------- 1 clickhouse clickhouse 1708 Apr 12 20:22 chnode1.key - -rw------- 1 clickhouse clickhouse 1131 Apr 12 20:23 marsnet_ca.crt - ``` - -## 4. ClickHouse Keeperを使用して基本クラスターで環境を構成する {#4-configure-the-environment-with-basic-clusters-using-clickhouse-keeper} - -このデプロイメント環境では、以下のClickHouse Keeper設定が各ノードで使用されます。各サーバーにはそれぞれの``があります。(例えば、ノード`chnode1`のためには`1`など。) - -:::note -ClickHouse Keeperの推奨ポートは`9281`です。ただし、ポートは構成可能であり、このポートが環境内の他のアプリケーションによって既に使用されている場合は設定できます。 - -すべてのオプションについて完全な説明が必要な場合は、https://clickhouse.com/docs/operations/clickhouse-keeper/をご覧ください。 -::: - - -1. ClickHouseサーバー`config.xml`の``タグ内に以下を追加します。 - - :::note - 本番環境では、`config.d`ディレクトリに別の`.xml`構成ファイルを使用することが推奨されます。 - 詳細情報については、https://clickhouse.com/docs/operations/configuration-files/をご覧ください。 - ::: - - ```xml - - 9281 - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - true - - 1 - chnode1.marsnet.local - 9444 - - - 2 - chnode2.marsnet.local - 9444 - - - 3 - chnode3.marsnet.local - 9444 - - - - ``` - -2. すべてのノードでkeeper設定のコメントを外し、``フラグを1に設定します: - ```xml - - - chnode1.marsnet.local - 9281 - 1 - - - chnode2.marsnet.local - 9281 - 1 - - - chnode3.marsnet.local - 9281 - 1 - - - ``` - -3. `chnode1`および`chnode2`の``セクションに次のクラスター設定を更新して追加します。`chnode3`はClickHouse Keeperの過半数として使用されます。 - - :::note - この構成では、1つの例としてクラスターが構成されています。テストサンプルクラスターは削除、コメントアウトするか、既存のクラスターがテスト中であればポートを更新し、``オプションを追加する必要があります。デフォルトユーザーがインストール時または`users.xml`ファイルにパスワードを設定されている場合は、``と``を設定する必要があります。 - ::: - - 以下は、2つのサーバー(各ノードに1つ)のシャードレプリカを持つクラスターを作成します。 - ```xml - - - - - chnode1.marsnet.local - 9440 - default - ClickHouse123! - 1 - - - chnode2.marsnet.local - 9440 - default - ClickHouse123! - 1 - - - - - ``` - -4. テスト用のReplicatedMergeTreeテーブルを作成できるようにマクロ値を定義します。`chnode1`では: - ```xml - - 1 - replica_1 - - ``` - - `chnode2`では: - ```xml - - 1 - replica_2 - - ``` - -## 5. ClickHouseノードでSSL-TLSインターフェースを構成する {#5-configure-ssl-tls-interfaces-on-clickhouse-nodes} -以下の設定は、ClickHouseサーバーの`config.xml`に構成されます。 - -1. デプロイメントの表示名を設定します(オプション): - ```xml - clickhouse - ``` - -2. ClickHouseが外部ポートでリッスンするように設定します: - ```xml - 0.0.0.0 - ``` - -3. 各ノードの`https`ポートを構成し、`http`ポートを無効にします: - ```xml - 8443 - - ``` - -4. 各ノードでClickHouse NativeセキュアTCPポートを構成し、デフォルトの非セキュアポートを無効にします: - ```xml - 9440 - - ``` - -5. 各ノードで`interserver https`ポートを構成し、デフォルトの非セキュアポートを無効にします: - ```xml - 9010 - - ``` - -6. OpenSSLを証明書とパスで構成します - - :::note - 各ファイル名とパスは、構成されるノードに合わせて更新する必要があります。 - 例えば、`chnode2`ホストで構成する際に``項目を`chnode2.crt`に更新します。 - ::: - - ```xml - - - /etc/clickhouse-server/certs/chnode1.crt - /etc/clickhouse-server/certs/chnode1.key - relaxed - /etc/clickhouse-server/certs/marsnet_ca.crt - true - sslv2,sslv3 - true - - - false - /etc/clickhouse-server/certs/marsnet_ca.crt - true - sslv2,sslv3 - true - relaxed - - RejectCertificateHandler - - - - ``` - - 詳細情報については、https://clickhouse.com/docs/operations/server-configuration-parameters/settings/#server_configuration_parameters-opensslをご覧ください。 - -7. 各ノードでSSL用にgRPCを構成します: - ```xml - - 1 - /etc/clickhouse-server/certs/chnode1.crt - /etc/clickhouse-server/certs/chnode1.key - true - /etc/clickhouse-server/certs/marsnet_ca.crt - none - 0 - -1 - -1 - false - - ``` - - 詳細情報については、https://clickhouse.com/docs/interfaces/grpc/をご覧ください。 - -8. 少なくとも1つのノードのClickHouseクライアントをSSL接続を使用するように設定します(デフォルトでは`/etc/clickhouse-client/`にあります): - ```xml - - - false - /etc/clickhouse-server/certs/marsnet_ca.crt - true - sslv2,sslv3 - true - - RejectCertificateHandler - - - - ``` - -6. MySQLおよびPostgreSQLのデフォルトエミュレーションポートを無効にします: - ```xml - - - ``` - -## 6. テスト {#6-testing} -1. ノードを一つずつ起動します: - ```bash - service clickhouse-server start - ``` - -2. セキュアポートが起動してリッスンしていることを確認します。各ノードでの出力は次のようになります: - ```bash - root@chnode1:/etc/clickhouse-server# netstat -ano | grep tcp - ``` - - ```response - tcp 0 0 0.0.0.0:9010 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 0.0.0.0:8443 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 0.0.0.0:9440 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 0.0.0.0:9281 0.0.0.0:* LISTEN off (0.00/0/0) - tcp 0 0 192.168.1.221:33046 192.168.1.222:9444 ESTABLISHED off (0.00/0/0) - tcp 0 0 192.168.1.221:42730 192.168.1.223:9444 ESTABLISHED off (0.00/0/0) - tcp 0 0 192.168.1.221:51952 192.168.1.222:9281 ESTABLISHED off (0.00/0/0) - tcp 0 0 192.168.1.221:22 192.168.1.210:49801 ESTABLISHED keepalive (6618.05/0/0) - tcp 0 64 192.168.1.221:22 192.168.1.210:59195 ESTABLISHED on (0.24/0/0) - tcp6 0 0 :::22 :::* LISTEN off (0.00/0/0) - tcp6 0 0 :::9444 :::* LISTEN off (0.00/0/0) - tcp6 0 0 192.168.1.221:9444 192.168.1.222:59046 ESTABLISHED off (0.00/0/0) - tcp6 0 0 192.168.1.221:9444 192.168.1.223:41976 ESTABLISHED off (0.00/0/0) - ``` - - |ClickHouseポート |説明| - |--------|-------------| - |8443 | httpsインターフェース| - |9010 | interserver httpsポート| - |9281 | ClickHouse Keeperセキュアポート| - |9440 | セキュアNative TCPプロトコル| - |9444 | ClickHouse Keeper Raftポート | - -3. ClickHouse Keeperのヘルスを確認します。 -通常の[4文字単語(4lW)](/guides/sre/keeper/index.md#four-letter-word-commands)コマンドは、TLSなしで`echo`を使用しても機能しません。以下は`openssl`を使用してコマンドを実行する方法です。 - - `openssl`でインタラクティブセッションを開始します - - ```bash - openssl s_client -connect chnode1.marsnet.local:9281 - ``` - ```response - CONNECTED(00000003) - depth=0 CN = chnode1 - verify error:num=20:unable to get local issuer certificate - verify return:1 - depth=0 CN = chnode1 - verify error:num=21:unable to verify the first certificate - verify return:1 - --- - Certificate chain - 0 s:CN = chnode1 - i:CN = marsnet.local CA - --- - Server certificate - -----BEGIN CERTIFICATE----- - MIICtDCCAZwCFD321grxU3G5pf6hjitf2u7vkusYMA0GCSqGSIb3DQEBCwUAMBsx - ... - ``` - - - opensslセッションで4LWコマンドを送信します: - - ```bash - mntr - ``` - ```response - --- - Post-Handshake New Session Ticket arrived: - SSL-Session: - Protocol : TLSv1.3 - ... - read R BLOCK - zk_version v22.7.3.5-stable-e140b8b5f3a5b660b6b576747063fd040f583cf3 - zk_avg_latency 0 - # highlight-next-line - zk_max_latency 4087 - zk_min_latency 0 - zk_packets_received 4565774 - zk_packets_sent 4565773 - zk_num_alive_connections 2 - zk_outstanding_requests 0 - # highlight-next-line - zk_server_state leader - zk_znode_count 1087 - zk_watch_count 26 - zk_ephemerals_count 12 - zk_approximate_data_size 426062 - zk_key_arena_size 258048 - zk_latest_snapshot_size 0 - zk_open_file_descriptor_count 187 - zk_max_file_descriptor_count 18446744073709551615 - # highlight-next-line - zk_followers 2 - zk_synced_followers 1 - closed - ``` - -4. ClickHouseクライアントを`--secure`フラグとSSLポートを使用して起動します: - ```bash - root@chnode1:/etc/clickhouse-server# clickhouse-client --user default --password ClickHouse123! --port 9440 --secure --host chnode1.marsnet.local - ClickHouse client version 22.3.3.44 (official build). - Connecting to chnode1.marsnet.local:9440 as user default. - Connected to ClickHouse server version 22.3.3 revision 54455. - - clickhouse :) - ``` - -5. `https`インターフェースを使用してPlay UIにログインします:`https://chnode1.marsnet.local:8443/play`。 - - - - :::note - ブラウザは信頼されていない証明書を表示します。これは、ワークステーションからアクセスされており、証明書がクライアントマシンのルートCAストアに存在しないためです。 - 公的な権威または企業CAから発行された証明書を使用すると、信頼されることになります。 - ::: - -6. レプリケートテーブルを作成します: - ```sql - clickhouse :) CREATE TABLE repl_table ON CLUSTER cluster_1S_2R - ( - id UInt64, - column1 Date, - column2 String - ) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/default/repl_table', '{replica}' ) - ORDER BY (id); - ``` - - ```response - ┌─host──────────────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ - │ chnode2.marsnet.local │ 9440 │ 0 │ │ 1 │ 0 │ - │ chnode1.marsnet.local │ 9440 │ 0 │ │ 0 │ 0 │ - └───────────────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - ``` - -7. `chnode1`にいくつかの行を追加します: - ```sql - INSERT INTO repl_table - (id, column1, column2) - VALUES - (1,'2022-04-01','abc'), - (2,'2022-04-02','def'); - ``` - -8. `chnode2`で行を表示してレプリケーションを確認します: - ```sql - SELECT * FROM repl_table - ``` - - ```response - ┌─id─┬────column1─┬─column2─┐ - │ 1 │ 2022-04-01 │ abc │ - │ 2 │ 2022-04-02 │ def │ - └────┴────────────┴─────────┘ - ``` - -## まとめ {#summary} - -この記事では、SSL/TLSで構成されたClickHouse環境の設定に焦点を当てました。設定は本番環境での異なる要件に応じて異なります。たとえば、証明書の検証レベル、プロトコル、暗号などです。しかし、設定および安全な接続を実装するために関与するステップを理解できたと思います。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md.hash deleted file mode 100644 index a105b6df38d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/configuring-ssl.md.hash +++ /dev/null @@ -1 +0,0 @@ -1ae8066e8563a918 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md deleted file mode 100644 index d84571b8dd7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -slug: '/security-and-authentication' -title: 'セキュリティと認証' -description: 'セキュリティと認証のためのランディングページ' ---- - - - -| Page | Description | -|------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------| -| [ユーザーとロール](/operations/access-rights) | ClickHouseがRBACアプローチに基づくアクセス制御管理をどのようにサポートしているかについて学びます。 | -| [外部認証機関](/operations/external-authenticators) | OSS ClickHouseが外部サービスを使用してユーザーの認証と管理をどのようにサポートしているかについて学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md.hash deleted file mode 100644 index 99f9631cd5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -613c1892f3fb58ef diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/_category_.yml deleted file mode 100644 index 98af0919b7a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 5 -label: 'ClickHouse Keeper' -collapsible: true -collapsed: true -link: - type: generated-index - title: ClickHouse Keeper - slug: /guides/sre/clickhouse-keeper diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md deleted file mode 100644 index bcbffb1cfc0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md +++ /dev/null @@ -1,1301 +0,0 @@ ---- -slug: '/guides/sre/keeper/clickhouse-keeper' -sidebar_label: 'ClickHouse Keeper' -sidebar_position: 10 -keywords: -- 'Keeper' -- 'ZooKeeper' -- 'clickhouse-keeper' -description: 'ClickHouse Keeper, or clickhouse-keeper, replaces ZooKeeper and provides - replication and coordination.' -title: 'ClickHouse Keeper' ---- - -import SelfManaged from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_automated.md'; - - -# ClickHouse Keeper (clickhouse-keeper) - - - -ClickHouse Keeperは、データの[レプリケーション](/engines/table-engines/mergetree-family/replication.md)および[分散DDL](/sql-reference/distributed-ddl.md)クエリの実行のための調整システムを提供します。ClickHouse Keeperは、ZooKeeperと互換性があります。 - -### 実装の詳細 {#implementation-details} - -ZooKeeperは、初期の著名なオープンソースの調整システムの1つです。Javaで実装されており、かなりシンプルで強力なデータモデルを持っています。ZooKeeperの調整アルゴリズムであるZooKeeper Atomic Broadcast (ZAB)は、各ZooKeeperノードがローカルでリードに応じるため、リードに対する線形性の保証を提供しません。ZooKeeperとは異なり、ClickHouse KeeperはC++で書かれており、[RAFTアルゴリズム](https://raft.github.io/)の[実装](https://github.com/eBay/NuRaft)を使用しています。このアルゴリズムは、リードとライティングの両方に対して線形性を許可し、さまざまな言語でのいくつかのオープンソース実装があります。 - -デフォルトでは、ClickHouse KeeperはZooKeeperと同じ保証を提供します:線形性のある書き込みと非線形性のあるリードです。互換性のあるクライアント-サーバプロトコルを持っているため、標準的なZooKeeperクライアントを使用してClickHouse Keeperと対話できます。スナップショットとログはZooKeeperとは互換性のない形式を持っていますが、`clickhouse-keeper-converter`ツールにより、ZooKeeperデータをClickHouse Keeperスナップショットに変換できます。ClickHouse KeeperのインターサーバプロトコルもZooKeeperとは互換性がないため、混合ZooKeeper / ClickHouse Keeperクラスターは不可能です。 - -ClickHouse Keeperは、[ZooKeeper](https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)と同様にアクセス制御リスト(ACL)をサポートしています。ClickHouse Keeperは、同じ権限セットをサポートしており、`world`、`auth`、および`digest`という同一の組み込みスキームを持っています。ダイジェスト認証スキームは`username:password`のペアを使用し、パスワードはBase64でエンコードされています。 - -:::note -外部統合はサポートされていません。 -::: - -### 設定 {#configuration} - -ClickHouse Keeperは、ZooKeeperのスタンドアロンの代替として使用するか、ClickHouseサーバーの内部部分として使用できます。どちらの場合も、設定はほぼ同じ`.xml`ファイルです。 - -#### Keeperの設定項目 {#keeper-configuration-settings} - -主要なClickHouse Keeperの設定タグは``で、次のパラメータがあります。 - -| パラメータ | 説明 | デフォルト | -|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------| -| `tcp_port` | クライアントが接続するためのポート。 | `2181` | -| `tcp_port_secure` | クライアントとkeeper-server間のSSL接続のためのセキュアポート。 | - | -| `server_id` | ユニークなサーバIDで、ClickHouse Keeperクラスタの各参加者はユニークな番号(1, 2, 3など)を持たなければなりません。 | - | -| `log_storage_path` | 調整ログのパスで、ZooKeeperと同様に、非忙しいノードにログを保存するのが最適です。 | - | -| `snapshot_storage_path` | 調整スナップショットのパス。 | - | -| `enable_reconfiguration` | [`reconfig`](#reconfiguration)を介して動的なクラスター再構成を有効にします。 | `False` | -| `max_memory_usage_soft_limit` | Keeperの最大メモリ使用量のソフトリミット(バイト数)。 | `max_memory_usage_soft_limit_ratio` * `physical_memory_amount` | -| `max_memory_usage_soft_limit_ratio` | `max_memory_usage_soft_limit`が設定されていない場合やゼロに設定されている場合、この値を使用してデフォルトのソフトリミットを定義します。 | `0.9` | -| `cgroups_memory_observer_wait_time` | `max_memory_usage_soft_limit`が設定されていない場合や`0`に設定されている場合、この間隔を使用して物理メモリの量を観察します。メモリ量が変わると、`max_memory_usage_soft_limit_ratio`によってKeeperのメモリソフトリミットを再計算します。 | `15` | -| `http_control` | [HTTP制御](#http-control)インターフェイスの設定。 | - | -| `digest_enabled` | リアルタイムデータ整合性チェックを有効にします。 | `True` | -| `create_snapshot_on_exit` | シャットダウン中にスナップショットを作成します。 | - | -| `hostname_checks_enabled` | クラスター設定のためのサニティホスト名チェックを有効にします(例:リモートエンドポイントと共にlocalhostが使われている場合)。 | `True` | -| `four_letter_word_white_list` | 4lwコマンドのホワイトリスト。 | `conf, cons, crst, envi, ruok, srst, srvr, stat, wchs, dirs, mntr, isro, rcvr, apiv, csnp, lgif, rqld, ydld` | - -他の一般的なパラメータは、ClickHouseサーバーの設定から受け継がれます(`listen_host`、`logger`など)。 - -#### 内部調整設定 {#internal-coordination-settings} - -内部調整設定は、`.`セクションにあり、次のパラメータがあります。 - -| パラメータ | 説明 | デフォルト | -|---------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------| -| `operation_timeout_ms` | 単一のクライアント操作のタイムアウト(ms) | `10000` | -| `min_session_timeout_ms` | クライアントセッションの最小タイムアウト(ms) | `10000` | -| `session_timeout_ms` | クライアントセッションの最大タイムアウト(ms) | `100000` | -| `dead_session_check_period_ms` | ClickHouse Keeperがデッドセッションをチェックして削除する頻度(ms) | `500` | -| `heart_beat_interval_ms` | ClickHouse Keeperリーダーがフォロワーにハートビートを送信する頻度(ms) | `500` | -| `election_timeout_lower_bound_ms` | フォロワーがリーダーからハートビートを受信しない場合、この間隔内でリーダー選挙を開始できます。`election_timeout_upper_bound_ms`以下でなければなりません。理想的には等しくない方が良いです。 | `1000` | -| `election_timeout_upper_bound_ms` | フォロワーがリーダーからハートビートを受信しない場合、リーダー選挙を開始しなければなりません。 | `2000` | -| `rotate_log_storage_interval` | 単一ファイルに格納するログレコードの数。 | `100000` | -| `reserved_log_items` | コンパクション前に格納する調整ログレコードの数。 | `100000` | -| `snapshot_distance` | ClickHouse Keeperが新しいスナップショットを作成する頻度(ログ内のレコード数)。 | `100000` | -| `snapshots_to_keep` | 保持するスナップショットの数。 | `3` | -| `stale_log_gap` | リーダーがフォロワーをスティールと見なし、ログの代わりにスナップショットを送信するしきい値。 | `10000` | -| `fresh_log_gap` | ノードがフレッシュになったとき。 | `200` | -| `max_requests_batch_size` | RAFTに送信される前のリクエスト数の最大バッチサイズ。 | `100` | -| `force_sync` | 調整ログへの各書き込み時に`fsync`を呼び出します。 | `true` | -| `quorum_reads` | 読み取りリクエストを全てRAFTコンセンサスを通じて書き込みとして実行します。 | `false` | -| `raft_logs_level` | 調整に関するテキストロギングレベル(トレース、デバッグなど)。 | `system default` | -| `auto_forwarding` | フォロワーからリーダーへの書き込みリクエストの転送を許可します。 | `true` | -| `shutdown_timeout` | 内部接続を終了し、シャットダウンするまで待機します(ms)。 | `5000` | -| `startup_timeout` | サーバーが指定されたタイムアウト内に他のクォーラム参加者と接続しない場合、終了します(ms)。 | `30000` | -| `async_replication` | 非同期レプリケーションを有効にします。すべての書き込みおよび読み取り保証が保持され、パフォーマンスが向上します。設定はデフォルトで無効になっており、後方互換性を壊さないようになっています。 | `false` | -| `latest_logs_cache_size_threshold` | 最新のログエントリのメモリ内キャッシュの最大合計サイズ。 | `1GiB` | -| `commit_logs_cache_size_threshold` | コミットのために次に必要なログエントリのメモリ内キャッシュの最大合計サイズ。 | `500MiB` | -| `disk_move_retries_wait_ms` | ファイルがディスク間で移動中に失敗が発生した場合、再試行の間隔。 | `1000` | -| `disk_move_retries_during_init` | 初期化中にファイルがディスク間で移動されている間、失敗が発生した場合の再試行回数。 | `100` | -| `experimental_use_rocksdb` | rocksdbをバックエンドストレージとして使用します。 | `0` | - -クウォータム設定は`.`セクションにあり、サーバーの説明が含まれています。 - -クォーラム全体の唯一のパラメータは`secure`で、クォーラム参加者間の通信に対する暗号化接続を有効にします。このパラメータは、ノード間の内部通信のためにSSL接続が必要な場合は`true`に設定し、そうでない場合は未指定にしておくことができます。 - -各``の主なパラメータは次のとおりです。 - -- `id` — クォーラム内のサーバ識別子。 -- `hostname` — このサーバが配置されているホスト名。 -- `port` — このサーバが接続を待ち受けるポート。 -- `can_become_leader` — `learner`としてサーバを設定するために`false`に設定します。省略された場合、値は`true`です。 - -:::note -ClickHouse Keeperクラスタのトポロジーに変化があった場合(例:サーバの交換)、`server_id`と`hostname`のマッピングを一貫して維持し、異なるサーバに対して既存の`server_id`のシャッフルや再利用を避けるようにしてください(自動化スクリプトに依存してClickHouse Keeperを展開する場合に発生する可能性があります)。 - -Keeperインスタンスのホストが変更可能な場合は、生のIPアドレスの代わりにホスト名を定義して使用することをお勧めします。ホスト名の変更は、サーバを削除して再追加することと同じであり、場合によっては実行できないことがあります(例えば、クォーラムに必要なKeeperインスタンスが不足している場合)。 -::: - -:::note -`async_replication`はデフォルトで無効になっており、後方互換性を壊さないようになっています。すべてのKeeperインスタンスが`async_replication`をサポートするバージョン(v23.9+)を実行している場合は、パフォーマンスの向上が望めるため、有効にすることをお勧めします。 -::: - -3つのノードを持つクォーラムの設定の例は、`test_keeper_`プレフィックスの付いた[統合テスト](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration)に見つけることができます。サーバー#1の例設定は次のとおりです。 - -```xml - - 2181 - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - - 1 - zoo1 - 9234 - - - 2 - zoo2 - 9234 - - - 3 - zoo3 - 9234 - - - -``` - -### 実行方法 {#how-to-run} - -ClickHouse Keeperは、ClickHouseサーバーパッケージにバンドルされており、``の設定を`/etc/your_path_to_config/clickhouse-server/config.xml`に追加し、いつも通りClickHouseサーバーを起動します。スタンドアロンのClickHouse Keeperを実行したい場合は、次のように始めることができます。 - -```bash -clickhouse-keeper --config /etc/your_path_to_config/config.xml -``` - -シンボリックリンク(`clickhouse-keeper`)がない場合は、作成するか、`clickhouse`に対して`keeper`を引数として指定します。 - -```bash -clickhouse keeper --config /etc/your_path_to_config/config.xml -``` -### Four Letter Word Commands {#four-letter-word-commands} - -ClickHouse Keeperは、Zookeeperとほぼ同じ4lwコマンドを提供します。各コマンドは`mntr`、`stat`などの4文字で構成されています。いくつかの興味深いコマンドがあり、`stat`はサーバーや接続されたクライアントに関する一般的な情報を提供し、`srvr`と`cons`はそれぞれサーバーと接続の詳細情報を提供します。 - -4lwコマンドには、デフォルト値が`conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld,ydld`のホワイトリスト設定`four_letter_word_white_list`があります。 - -テレネットまたはncを使用してクライアントポートからClickHouse Keeperにコマンドを発行できます。 - -```bash -echo mntr | nc localhost 9181 -``` - -以下は詳細な4lwコマンドのリストです: - -- `ruok`: サーバーがエラーログなしで実行されているかどうかをテストします。サーバーが実行中であれば`imok`で応答します。そうでない場合は、全く応答しません。`imok`の応答はサーバーがクォーラムに参加していることを示すものではなく、単にサーバープロセスがアクティブで指定されたクライアントポートにバインドされていることを示します。クォーラムおよびクライアント接続情報に関する詳細は「stat」を使用してください。 - -```response -imok -``` - -- `mntr`: クラスターのヘルスを監視するために使用できる変数のリストを出力します。 - -```response -zk_version v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -zk_avg_latency 0 -zk_max_latency 0 -zk_min_latency 0 -zk_packets_received 68 -zk_packets_sent 68 -zk_num_alive_connections 1 -zk_outstanding_requests 0 -zk_server_state leader -zk_znode_count 4 -zk_watch_count 1 -zk_ephemerals_count 0 -zk_approximate_data_size 723 -zk_open_file_descriptor_count 310 -zk_max_file_descriptor_count 10240 -zk_followers 0 -zk_synced_followers 0 -``` - -- `srvr`: サーバーの完全な詳細をリストします。 - -```response -ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -Latency min/avg/max: 0/0/0 -Received: 2 -Sent : 2 -Connections: 1 -Outstanding: 0 -Zxid: 34 -Mode: leader -Node count: 4 -``` - -- `stat`: サーバーおよび接続されたクライアントに関する簡潔な情報をリストします。 - -```response -ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -Clients: - 192.168.1.1:52852(recved=0,sent=0) - 192.168.1.1:52042(recved=24,sent=48) -Latency min/avg/max: 0/0/0 -Received: 4 -Sent : 4 -Connections: 1 -Outstanding: 0 -Zxid: 36 -Mode: leader -Node count: 4 -``` - -- `srst`: サーバーの統計をリセットします。このコマンドは`srvr`、`mntr`、および`stat`の結果に影響を与えます。 - -```response -Server stats reset. -``` - -- `conf`: サーバーの設定に関する詳細を印刷します。 - -```response -server_id=1 -tcp_port=2181 -four_letter_word_white_list=* -log_storage_path=./coordination/logs -snapshot_storage_path=./coordination/snapshots -max_requests_batch_size=100 -session_timeout_ms=30000 -operation_timeout_ms=10000 -dead_session_check_period_ms=500 -heart_beat_interval_ms=500 -election_timeout_lower_bound_ms=1000 -election_timeout_upper_bound_ms=2000 -reserved_log_items=1000000000000000 -snapshot_distance=10000 -auto_forwarding=true -shutdown_timeout=5000 -startup_timeout=240000 -raft_logs_level=information -snapshots_to_keep=3 -rotate_log_storage_interval=100000 -stale_log_gap=10000 -fresh_log_gap=200 -max_requests_batch_size=100 -quorum_reads=false -force_sync=false -compress_logs=true -compress_snapshots_with_zstd_format=true -configuration_change_tries_count=20 -``` - -- `cons`: このサーバーに接続されているすべてのクライアントに関する接続/セッションの詳細をリストします。受信/送信パケット数、セッションID、操作の待機時間、最後の実行された操作などの情報が含まれます。 - -```response - 192.168.1.1:52163(recved=0,sent=0,sid=0xffffffffffffffff,lop=NA,est=1636454787393,to=30000,lzxid=0xffffffffffffffff,lresp=0,llat=0,minlat=0,avglat=0,maxlat=0) - 192.168.1.1:52042(recved=9,sent=18,sid=0x0000000000000001,lop=List,est=1636454739887,to=30000,lcxid=0x0000000000000005,lzxid=0x0000000000000005,lresp=1636454739892,llat=0,minlat=0,avglat=0,maxlat=0) -``` - -- `crst`: すべての接続の接続/セッションの統計をリセットします。 - -```response -Connection stats reset. -``` - -- `envi`: サーバーの環境に関する詳細を印刷します。 - -```response -Environment: -clickhouse.keeper.version=v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -host.name=ZBMAC-C02D4054M.local -os.name=Darwin -os.arch=x86_64 -os.version=19.6.0 -cpu.count=12 -user.name=root -user.home=/Users/JackyWoo/ -user.dir=/Users/JackyWoo/project/jd/clickhouse/cmake-build-debug/programs/ -user.tmp=/var/folders/b4/smbq5mfj7578f2jzwn602tt40000gn/T/ -``` - -- `dirs`: スナップショットおよびログファイルの合計サイズをバイト単位で表示します。 - -```response -snapshot_dir_size: 0 -log_dir_size: 3875 -``` - -- `isro`: サーバーが読み取り専用モードで実行されているかをテストします。サーバーが読み取り専用モードの場合は`ro`で応答し、そうでない場合は`rw`で応答します。 - -```response -rw -``` - -- `wchs`: サーバーのウォッチに関する簡略情報をリストします。 - -```response -1 connections watching 1 paths -Total watches:1 -``` - -- `wchc`: セッションごとのサーバーのウォッチに関する詳細情報をリストします。これにより、関連付けられたウォッチ(パス)を持つセッション(接続)のリストが出力されます。ウォッチの数によっては、この操作が高コスト(サーバーのパフォーマンスに影響を与える)となる場合があるため、注意して使用してください。 - -```response -0x0000000000000001 - /clickhouse/task_queue/ddl -``` - -- `wchp`: パスごとのサーバーのウォッチに関する詳細情報をリストします。これにより、関連付けられたセッションを持つパス(znodes)のリストが出力されます。ウォッチの数によっては、この操作が高コスト(すなわち、サーバーのパフォーマンスに影響を与える)になる可能性があるため、注意して使用してください。 - -```response -/clickhouse/task_queue/ddl - 0x0000000000000001 -``` - -- `dump`: 未処理のセッションとエフェメラルノードをリストします。これはリーダーでのみ機能します。 - -```response -Sessions dump (2): -0x0000000000000001 -0x0000000000000002 -Sessions with Ephemerals (1): -0x0000000000000001 - /clickhouse/task_queue/ddl -``` - -- `csnp`: スナップショット作成タスクをスケジュールします。成功した場合はスケジュールされたスナップショットの最後にコミットされたログインデックスを返し、失敗した場合は`Failed to schedule snapshot creation task.`と返します。スナップショットが完了したかどうかを判断するためには、`lgif`コマンドが役立ちます。 - -```response -100 -``` - -- `lgif`: Keeperログ情報。`first_log_idx`: ログストア内の最初のログインデックス; `first_log_term`: 私の最初のログターム; `last_log_idx`: ログストア内の最後のログインデックス; `last_log_term`: 私の最後のログターム; `last_committed_log_idx`: 状態マシンにおける私の最後にコミットされたログインデックス; `leader_committed_log_idx`: リーダーの視点からみたコミットされたログインデックス; `target_committed_log_idx`: コミットされるべきターゲットログインデックス; `last_snapshot_idx`: 最後のスナップショット内の最大コミットされたログインデックス。 - -```response -first_log_idx 1 -first_log_term 1 -last_log_idx 101 -last_log_term 1 -last_committed_log_idx 100 -leader_committed_log_idx 101 -target_committed_log_idx 101 -last_snapshot_idx 50 -``` - -- `rqld`: 新しいリーダーになるリクエストを送信します。リクエストが送信された場合は`Sent leadership request to leader.`と返し、リクエストが送信されなかった場合は`Failed to send leadership request to leader.`と返します。ノードがすでにリーダーの場合、結果はリクエストが送信されたかのようになります。 - -```response -Sent leadership request to leader. -``` - -- `ftfl`: すべての機能フラグをリストし、Keeperインスタンスで有効になっているかどうかを確認します。 - -```response -filtered_list 1 -multi_read 1 -check_not_exists 0 -``` - -- `ydld`: リーダーシップを放棄してフォロワーになるリクエストを送信します。このリクエストを受信したサーバーがリーダーであれば、最初に書き込み操作を一時停止し、後継者(現在のリーダーは決して後継者にはならない)が最新のログのキャッチアップを終了するまで待機し、その後辞任します。後継者は自動的に選択されます。リクエストが送信された場合は`Sent yield leadership request to leader.`と返し、リクエストが送信されなかった場合は`Failed to send yield leadership request to leader.`と返します。ノードがすでにフォロワーの場合、結果はリクエストが送信されたかのようになります。 - -```response -Sent yield leadership request to leader. -``` - -- `pfev`: すべての収集されたイベントの値を返します。各イベントについて、イベント名、イベント値、およびイベントの説明を返します。 - -```response -FileOpen 62 Number of files opened. -Seek 4 Number of times the 'lseek' function was called. -ReadBufferFromFileDescriptorRead 126 Number of reads (read/pread) from a file descriptor. Does not include sockets. -ReadBufferFromFileDescriptorReadFailed 0 Number of times the read (read/pread) from a file descriptor have failed. -ReadBufferFromFileDescriptorReadBytes 178846 Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. -WriteBufferFromFileDescriptorWrite 7 Number of writes (write/pwrite) to a file descriptor. Does not include sockets. -WriteBufferFromFileDescriptorWriteFailed 0 Number of times the write (write/pwrite) to a file descriptor have failed. -WriteBufferFromFileDescriptorWriteBytes 153 Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size. -FileSync 2 Number of times the F_FULLFSYNC/fsync/fdatasync function was called for files. -DirectorySync 0 Number of times the F_FULLFSYNC/fsync/fdatasync function was called for directories. -FileSyncElapsedMicroseconds 12756 Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for files. -DirectorySyncElapsedMicroseconds 0 Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for directories. -ReadCompressedBytes 0 Number of bytes (the number of bytes before decompression) read from compressed sources (files, network). -CompressedReadBufferBlocks 0 Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network). -CompressedReadBufferBytes 0 Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network). -AIOWrite 0 Number of writes with Linux or FreeBSD AIO interface -AIOWriteBytes 0 Number of bytes written with Linux or FreeBSD AIO interface -... -``` -### HTTP Control {#http-control} - -ClickHouse Keeperは、レプリカがトラフィックを受信する準備が整ったかどうかを確認するためのHTTPインターフェイスを提供します。これは、[Kubernetes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes)のようなクラウド環境で使用されることがあります。 - -`/ready`エンドポイントを有効にする設定の例: - -```xml - - - - 9182 - - /ready - - - - -``` -### Feature flags {#feature-flags} - -KeeperはZooKeeperおよびそのクライアントと完全に互換性がありますが、ClickHouseクライアントが使用できる独自の機能やリクエストタイプもいくつか導入されています。これらの機能は後方互換性のない変更をもたらす可能性があるため、デフォルトではほとんどが無効になっており、`keeper_server.feature_flags`設定を使用して有効化できます。すべての機能は明示的に無効にすることができます。Keeperクラスターの新しい機能を有効にする場合は、最初にその機能をサポートしているバージョンにクラスター内のすべてのKeeperインスタンスを更新し、その後に機能自体を有効にすることをお勧めします。 - -`multi_read`を無効にし、`check_not_exists`を有効にする機能フラグ設定の例: - -```xml - - - - 0 - 1 - - - -``` - -利用可能な機能は以下の通りです: - -- `multi_read` - 複数のリクエストを読むためのサポート。デフォルト: `1` -- `filtered_list` - ノードの種類(エフェメラルまたは永続)によって結果をフィルタリングするリストリクエストのサポート。デフォルト: `1` -- `check_not_exists` - ノードが存在しないことを主張する`CheckNotExists`リクエストのサポート。デフォルト: `0` -- `create_if_not_exists` - ノードが存在しない場合にノードを作成しようとする`CreateIfNotExists`リクエストのサポート。存在する場合、変更は適用されず`ZOK`が返されます。デフォルト: `0` -- `remove_recursive` - ノードとそのサブツリーを削除する`RemoveRecursive`リクエストのサポート。デフォルト: `0` -### Migration from ZooKeeper {#migration-from-zookeeper} - -ZooKeeperからClickHouse Keeperへのシームレスな移行は不可能です。ZooKeeperクラスターを停止し、データを変換し、ClickHouse Keeperを起動する必要があります。`clickhouse-keeper-converter`ツールを使用すると、ZooKeeperのログやスナップショットをClickHouse Keeperのスナップショットに変換できます。このツールはZooKeeper > 3.4でのみ動作します。移行の手順は以下の通りです: - -1. すべてのZooKeeperノードを停止します。 - -2. オプションですが推奨: ZooKeeperのリーダーノードを見つけ、それを再起動してまた停止します。これにより、ZooKeeperは一貫したスナップショットを作成します。 - -3. リーダーで`clickhouse-keeper-converter`を実行します。例えば: - -```bash -clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots -``` - -4. スナップショットを構成された`keeper`のあるClickHouseサーバーノードにコピーするか、ZooKeeperの代わりにClickHouse Keeperを起動します。スナップショットはすべてのノードに保存される必要があります。そうでない場合、空のノードが早く、いずれかのノードがリーダーになる可能性があります。 - -:::note -`keeper-converter`ツールは、Keeperのスタンドアロンバイナリからは使用できません。 -ClickHouseがインストールされている場合は、バイナリを直接使用できます: - -```bash -clickhouse keeper-converter ... -``` - -そうでない場合は、[バイナリをダウンロード](/getting-started/quick-start#download-the-binary)し、上記のようにClickHouseをインストールせずにツールを実行できます。 -::: -### Recovering after losing quorum {#recovering-after-losing-quorum} - -ClickHouse KeeperはRaftを使用しているため、クラスターのサイズに応じて一定数のノードクラッシュに耐えることができます。 \ -例えば、3ノードのクラスターでは、1ノードがクラッシュしても正常に動作し続けます。 - -クラスター構成は動的に設定可能ですが、一部の制限があります。再構成もRaftに依存しているため、ノードをクラスターに追加/削除するにはクォーラムが必要です。同時にクラスタ内のノードが多くクラッシュし、再起動の見込みがない場合、Raftは動作を停止し、従来の方法でクラスターを再構成することを許可しなくなります。 - -とはいえ、ClickHouse Keeperにはリカバリーモードがあり、ノード1つのみでクラスターを強制的に再構成することが可能です。これは、ノードを再起動できない場合や、同じエンドポイントで新しいインスタンスを立ち上げる場合にのみ、最終手段として行うべきです。 - -継続する前に確認すべき重要な点: -- 失敗したノードが再びクラスターに接続できないことを確認してください。 -- 段階で指定されるまで、新しいノードを起動しないでください。 - -上記のことが確認されたら、以下の手順を行う必要があります: -1. 新しいリーダーとして単一のKeeperノードを選択します。そのノードのデータがクラスター全体で使用されるため、最も最新の状態であるノードを使用することをお勧めします。 -2. 何かを行う前に、選択したノードの`log_storage_path`と`snapshot_storage_path`フォルダのバックアップを作成します。 -3. 使用するすべてのノードでクラスターを再設定します。 -4. 選択したノードに`rcvr`という4文字コマンドを送信し、そのノードをリカバリーモードに移行するか、選択したノードでKeeperインスタンスを停止し、`--force-recovery`引数をつけて再起動します。 -5. 1つずつ新しいノードでKeeperインスタンスを起動し、次のノードを起動する前に`mntr`が`zk_server_state`に対して`follower`を返すことを確認します。 -6. リカバリーモードの間、リーダーノードは`mntr`コマンドに対してエラーメッセージを返し、新しいノードとクォーラムを達成するまでクライアントやフォロワーからのリクエストを拒否します。 -7. クォーラムが達成されると、リーダーノードは通常の動作モードに戻り、すべてのリクエストをRaft-verifyを使用して受け入れ、`mntr`は`zk_server_state`に対して`leader`を返す必要があります。 -## Using disks with Keeper {#using-disks-with-keeper} - -Keeperはスナップショット、ログファイル、および状態ファイルを保存するための[外部ディスク](/operations/storing-data.md)のサブセットをサポートします。 - -サポートされているディスクの種類は次の通りです: -- s3_plain -- s3 -- local - -以下は設定に含まれるディスク定義の例です。 - -```xml - - - - - local - /var/lib/clickhouse/coordination/logs/ - - - s3_plain - https://some_s3_endpoint/logs/ - ACCESS_KEY - SECRET_KEY - - - local - /var/lib/clickhouse/coordination/snapshots/ - - - s3_plain - https://some_s3_endpoint/snapshots/ - ACCESS_KEY - SECRET_KEY - - - s3_plain - https://some_s3_endpoint/state/ - ACCESS_KEY - SECRET_KEY - - - - -``` - -ログ用のディスクを使用するには、`keeper_server.log_storage_disk`設定をディスクの名前に設定する必要があります。 -スナップショット用のディスクを使用するには、`keeper_server.snapshot_storage_disk`設定をディスクの名前に設定する必要があります。 -また、最新のログやスナップショットのために異なるディスクを使用することができ、`keeper_server.latest_log_storage_disk`と`keeper_server.latest_snapshot_storage_disk`をそれぞれ使用できます。 -その場合、Keeperは新しいログやスナップショットが作成されると自動的にファイルを正しいディスクに移動します。 -状態ファイル用のディスクを使用するには、`keeper_server.state_storage_disk`設定をディスクの名前に設定する必要があります。 - -ディスク間でファイルを移動することは安全であり、Keeperが転送の途中で停止した場合にデータを失うリスクはありません。 -ファイルが新しいディスクに完全に移動されるまで、古いディスクから削除されることはありません。 - -`keeper_server.coordination_settings.force_sync`が`true`に設定されているKeeperは(デフォルト値は`true`)、すべてのタイプのディスクに対していくつかの保証を満たすことができません。 -現在、`local`タイプのディスクだけが永続的な同期をサポートしています。 -`force_sync`が使用される場合は、`latest_log_storage_disk`が使用されていない場合、`log_storage_disk`は`local`ディスクである必要があります。 -`latest_log_storage_disk`が使用される場合、それは常に`local`ディスクであるべきです。 -`force_sync`が無効になっている場合は、すべてのタイプのディスクを任意の設定で使用できます。 - -Keeperインスタンスの可能なストレージセットアップは次のようになります: - -```xml - - - log_s3_plain - log_local - - snapshot_s3_plain - snapshot_local - - -``` - -このインスタンスは、最新のログ以外は`log_s3_plain`ディスクに保存し、最新のログは`log_local`ディスクに保存します。 -スナップショットにも同様のロジックが適用され、最新のスナップショット以外は`snapshot_s3_plain`に保存され、最新のスナップショットは`snapshot_local`ディスクに保存されます。 -### Changing disk setup {#changing-disk-setup} - -:::important -新しいディスクセットアップを適用する前に、すべてのKeeperログとスナップショットを手動でバックアップしてください。 -::: - -ティアードディスクセットアップが定義されている場合(最新のファイルに別々のディスクを使用)、Keeperは起動時にファイルを正しいディスクに自動的に移動しようとします。 -以前と同じ保証が適用され、ファイルが新しいディスクに完全に移動されるまで、古いディスクから削除されることはありません。これにより、安全に複数回の再起動が可能です。 - -ファイルを完全に新しいディスクに移動する必要がある場合(または2ディスク設定から単一のディスク設定に移動する場合)、`keeper_server.old_snapshot_storage_disk`および`keeper_server.old_log_storage_disk`の複数の定義を使用することができます。 - -以下の構成は、前の2ディスクセットアップから完全に新しい単一ディスクセットアップへの移行を示しています。 - -```xml - - - log_local - log_s3_plain - log_local2 - - snapshot_s3_plain - snapshot_local - snapshot_local2 - - -``` - -起動時に、すべてのログファイルは`log_local`と`log_s3_plain`から`log_local2`ディスクに移動されます。 -また、すべてのスナップショットファイルは`snapshot_local`と`snapshot_s3_plain`から`snapshot_local2`ディスクに移動されます。 -## Configuring logs cache {#configuring-logs-cache} - -ディスクから読み取るデータの量を最小限に抑えるために、Keeperはメモリにログエントリをキャッシュします。 -リクエストが大きい場合、ログエントリは過度のメモリを消費するため、キャッシュされたログの量には上限が設定されます。 -制限は以下の2つの設定で制御されます: -- `latest_logs_cache_size_threshold` - キャッシュに保存された最新のログの総サイズ -- `commit_logs_cache_size_threshold` - 次にコミットが必要な後続ログの総サイズ - -デフォルト値が大きすぎる場合は、これら2つの設定を減少させることでメモリ使用量を削減できます。 - -:::note -`pfev`コマンドを使用して、各キャッシュからおよびファイルから読み取られたログの量を確認できます。 -また、Prometheusエンドポイントのメトリクスを使用して、両方のキャッシュの現在のサイズを追跡することができます。 -::: -## Prometheus {#prometheus} - -Keeperは、[Prometheus](https://prometheus.io)からのスクレイピング用のメトリクスデータを公開できます。 - -設定: - -- `endpoint` - Prometheusサーバーによるメトリクスのスクレイピング用のHTTPエンドポイント。'/'から始まります。 -- `port` - `endpoint`用のポート。 -- `metrics` - [system.metrics](/operations/system-tables/metrics)テーブルからメトリクスを公開するフラグ。 -- `events` - [system.events](/operations/system-tables/events)テーブルからメトリクスを公開するフラグ。 -- `asynchronous_metrics` - [system.asynchronous_metrics](/operations/system-tables/asynchronous_metrics)テーブルから現在のメトリクス値を公開するフラグ。 - -**例** - -```xml - - 0.0.0.0 - 8123 - 9000 - - - /metrics - 9363 - true - true - true - - - -``` - -チェック(`127.0.0.1`をClickHouseサーバーのIPアドレスまたはホスト名に置き換える): -```bash -curl 127.0.0.1:9363/metrics -``` - -ClickHouse Cloudの[Prometheus統合](/integrations/prometheus)も参照してください。 -## ClickHouse Keeper User Guide {#clickhouse-keeper-user-guide} - -このガイドでは、ClickHouse Keeperを構成するためのシンプルで最小限の設定を提供し、分散操作をテストする方法の例を示します。この例では、Linux上の3つのノードを使用します。 -### 1. Configure Nodes with Keeper settings {#1-configure-nodes-with-keeper-settings} - -1. 3つのホスト(`chnode1`、`chnode2`、`chnode3`)に3つのClickHouseインスタンスをインストールします。(ClickHouseをインストールする詳細については、[クイックスタート](/getting-started/install/install.mdx)を参照してください。) - -2. 各ノードで、ネットワークインターフェイスを介した外部通信を許可するために、以下のエントリを追加します。 - ```xml - 0.0.0.0 - ``` - -3. すべてのサーバーに以下のClickHouse Keeper構成を追加し、各サーバーの``設定を更新します。`chnode1`では`1`、`chnode2`では`2`などです。 - ```xml - - 9181 - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - warning - - - - - 1 - chnode1.domain.com - 9234 - - - 2 - chnode2.domain.com - 9234 - - - 3 - chnode3.domain.com - 9234 - - - - ``` - - 上記で使用された基本設定は以下の通りです: - - |Parameter |Description |Example | - |----------|------------------------------|---------------------| - |tcp_port |Keeperのクライアントが使用するポート|9181(デフォルトは2181、Zookeeperと同じ)| - |server_id| Raft構成で使用される各ClickHouse Keeperサーバーの識別子| 1| - |coordination_settings| タイムアウトなどのパラメータのセクション| タイムアウト: 10000、ログレベル: trace| - |server |参加するサーバーの定義|各サーバーの定義リスト| - |raft_configuration| Keeperクラスター内の各サーバーの設定| 各サーバーの設定| - |id |keeperサービス用のサーバーの数値ID|1| - |hostname |Keeperクラスター内の各サーバーのホスト名、IPまたはFQDN|`chnode1.domain.com`| - |port|インターバルKeeper接続のためにリッスンするポート|9234| - -4. Zookeeperコンポーネントを有効にします。これはClickHouse Keeperエンジンを使用します: - ```xml - - - chnode1.domain.com - 9181 - - - chnode2.domain.com - 9181 - - - chnode3.domain.com - 9181 - - - ``` - - 上記で使用された基本設定は以下の通りです: - - |Parameter |Description |Example | - |----------|------------------------------|---------------------| - |node |ClickHouse Keeper接続用のノードのリスト|各サーバーの設定項目| - |host|各ClickHouse Keeperノードのホスト名、IPまたはFQDN| `chnode1.domain.com`| - |port|ClickHouse Keeperクライアントポート| 9181| - -5. ClickHouseを再起動し、各Keeperインスタンスが実行されていることを確認します。各サーバーで以下のコマンドを実行します。`ruok`コマンドは、Keeperが実行中で正常であれば`imok`を返します: - ```bash - # echo ruok | nc localhost 9181; echo - imok - ``` - -6. `system`データベースには、ClickHouse Keeperインスタンスの詳細を含む`zookeeper`というテーブルがあります。テーブルを表示してみましょう: - ```sql - SELECT * - FROM system.zookeeper - WHERE path IN ('/', '/clickhouse') - ``` - - テーブルは以下のようになります: - ```response - ┌─name───────┬─value─┬─czxid─┬─mzxid─┬───────────────ctime─┬───────────────mtime─┬─version─┬─cversion─┬─aversion─┬─ephemeralOwner─┬─dataLength─┬─numChildren─┬─pzxid─┬─path────────┐ - │ clickhouse │ │ 124 │ 124 │ 2022-03-07 00:49:34 │ 2022-03-07 00:49:34 │ 0 │ 2 │ 0 │ 0 │ 0 │ 2 │ 5693 │ / │ - │ task_queue │ │ 125 │ 125 │ 2022-03-07 00:49:34 │ 2022-03-07 00:49:34 │ 0 │ 1 │ 0 │ 0 │ 0 │ 1 │ 126 │ /clickhouse │ - │ tables │ │ 5693 │ 5693 │ 2022-03-07 00:49:34 │ 2022-03-07 00:49:34 │ 0 │ 3 │ 0 │ 0 │ 0 │ 3 │ 6461 │ /clickhouse │ - └────────────┴───────┴───────┴───────┴─────────────────────┴─────────────────────┴─────────┴──────────┴──────────┴────────────────┴────────────┴─────────────┴───────┴─────────────┘ - ``` -### 2. ClickHouseでクラスターを構成する {#2--configure-a-cluster-in-clickhouse} - -1. 2つのシャードと2つのノードに1つのレプリカのみを持つシンプルなクラスターを構成します。第三のノードは、ClickHouse Keeperの要件を満たすためにクォーラムを達成するために使用されます。 `chnode1`と`chnode2`で設定を更新します。以下のクラスターは、各ノードに1つのシャードを定義しており、合計で2つのシャードがあり、レプリケーションはありません。この例では、データの一部は1つのノードに、残りは別のノードに配置されます: - ```xml - - - - - chnode1.domain.com - 9000 - default - ClickHouse123! - - - - - chnode2.domain.com - 9000 - default - ClickHouse123! - - - - - ``` - - |パラメータ |説明 |例 | - |----------|------------------------------|---------------------| - |shard |クラスター定義におけるレプリカのリスト|各シャードのレプリカのリスト| - |replica|各レプリカの設定のリスト |各レプリカの設定項目| - |host|レプリカシャードをホストするサーバーのホスト名、IPまたはFQDN|`chnode1.domain.com`| - |port|ネイティブTCPプロトコルを使用して通信するために使用されるポート|9000| - |user|クラスターインスタンスへの認証に使用されるユーザー名|default| - |password|クラスターインスタンスへの接続を許可するために定義されたユーザーのパスワード|`ClickHouse123!`| - -2. ClickHouseを再起動し、クラスターが作成されたことを確認します: - ```bash - SHOW clusters; - ``` - - クラスターが表示されるはずです: - ```response - ┌─cluster───────┐ - │ cluster_2S_1R │ - └───────────────┘ - ``` -### 3. 分散テーブルを作成しテストする {#3-create-and-test-distributed-table} - -1. `chnode1`でClickHouseクライアントを使用して、新しいクラスターに新しいデータベースを作成します。 `ON CLUSTER`句は自動的に両方のノードにデータベースを作成します。 - ```sql - CREATE DATABASE db1 ON CLUSTER 'cluster_2S_1R'; - ``` - -2. `db1`データベースに新しいテーブルを作成します。再度、 `ON CLUSTER`は両方のノードにテーブルを作成します。 - ```sql - CREATE TABLE db1.table1 on cluster 'cluster_2S_1R' - ( - `id` UInt64, - `column1` String - ) - ENGINE = MergeTree - ORDER BY column1 - ``` - -3. `chnode1`ノードでいくつかの行を追加します: - ```sql - INSERT INTO db1.table1 - (id, column1) - VALUES - (1, 'abc'), - (2, 'def') - ``` - -4. `chnode2`ノードでいくつかの行を追加します: - ```sql - INSERT INTO db1.table1 - (id, column1) - VALUES - (3, 'ghi'), - (4, 'jkl') - ``` - -5. 各ノードで`SELECT`文を実行すると、そのノードにのみデータが表示されることに注意してください。例えば、`chnode1`で: - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: 7ef1edbc-df25-462b-a9d4-3fe6f9cb0b6d - - ┌─id─┬─column1─┐ - │ 1 │ abc │ - │ 2 │ def │ - └────┴─────────┘ - - 2 行のセットです。経過時間: 0.006 秒。 - ``` - - `chnode2`で: -6. - ```sql - SELECT * - FROM db1.table1 - ``` - - ```response - Query id: c43763cc-c69c-4bcc-afbe-50e764adfcbf - - ┌─id─┬─column1─┐ - │ 3 │ ghi │ - │ 4 │ jkl │ - └────┴─────────┘ - ``` - -6. `Distributed`テーブルを作成して、2つのシャード上のデータを表現できます。 `Distributed`テーブルエンジンを使用したテーブルは独自のデータを格納することはありませんが、複数のサーバーでの分散クエリ処理を許可します。読み取りはすべてのシャードにヒットし、書き込みはシャード間で分散されることができます。 `chnode1`で以下のクエリを実行します: - ```sql - CREATE TABLE db1.dist_table ( - id UInt64, - column1 String - ) - ENGINE = Distributed(cluster_2S_1R,db1,table1) - ``` - -7. `dist_table`をクエリすると、2つのシャードからの4つの行のすべてのデータが返されることに気付くでしょう: - ```sql - SELECT * - FROM db1.dist_table - ``` - - ```response - Query id: 495bffa0-f849-4a0c-aeea-d7115a54747a - - ┌─id─┬─column1─┐ - │ 1 │ abc │ - │ 2 │ def │ - └────┴─────────┘ - ┌─id─┬─column1─┐ - │ 3 │ ghi │ - │ 4 │ jkl │ - └────┴─────────┘ - - 4 行のセットです。経過時間: 0.018 秒。 - ``` -### まとめ {#summary} - -このガイドでは、ClickHouse Keeperを使用してクラスターをセットアップする方法を説明しました。ClickHouse Keeperを使用すると、クラスターを構成し、シャード全体でレプリケート可能な分散テーブルを定義できます。 -## 一意のパスでClickHouse Keeperを構成する {#configuring-clickhouse-keeper-with-unique-paths} - - -### 説明 {#description} - -この記事では、組み込みの `{uuid}` マクロ設定を使用して、ClickHouse KeeperまたはZooKeeperで一意なエントリを作成する方法について説明します。一意のパスは、テーブルを頻繁に作成および削除する場合に役立ちます。これは、パスが作成されるたびに新しい `uuid` がそのパスに使用されるため、クリーンアップのためにKeeperのガーベジコレクションを待たなければならない時間を回避します; パスが再利用されることはありません。 -### 環境の例 {#example-environment} -この構成では、すべてのノードにClickHouse Keeperが構成された3ノードクラスターを作成し、2つのノードにClickHouseがあります。これにより、ClickHouse Keeperは3つのノード(タイブレーカーノードを含む)を持ち、2つのレプリカからなる単一のClickHouseシャードを提供します。 - -|ノード|説明| -|-----|-----| -|`chnode1.marsnet.local`|データノード - クラスター `cluster_1S_2R`| -|`chnode2.marsnet.local`|データノード - クラスター `cluster_1S_2R`| -|`chnode3.marsnet.local`| ClickHouse Keeperタイブレーカーノード| - -クラスターのための例の設定: -```xml - - - - - chnode1.marsnet.local - 9440 - default - ClickHouse123! - 1 - - - chnode2.marsnet.local - 9440 - default - ClickHouse123! - 1 - - - - -``` -### `{uuid}`を使用するためのテーブル設定手順 {#procedures-to-set-up-tables-to-use-uuid} - -1. 各サーバーでマクロを構成 - サーバー1の例: -```xml - - 1 - replica_1 - -``` -:::note -`shard` と `replica` のマクロを定義しましたが、 `{uuid}` はここでは定義されていません。それは組み込みのもので、特に定義する必要はありません。 -::: - -2. データベースを作成 - -```sql -CREATE DATABASE db_uuid - ON CLUSTER 'cluster_1S_2R' - ENGINE Atomic; -``` - -```response -CREATE DATABASE db_uuid ON CLUSTER cluster_1S_2R -ENGINE = Atomic - -Query id: 07fb7e65-beb4-4c30-b3ef-bd303e5c42b5 - -┌─host──────────────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode2.marsnet.local │ 9440 │ 0 │ │ 1 │ 0 │ -│ chnode1.marsnet.local │ 9440 │ 0 │ │ 0 │ 0 │ -└───────────────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ -``` - -3. マクロと `{uuid}` を使用してクラスタ上にテーブルを作成 - -```sql -CREATE TABLE db_uuid.uuid_table1 ON CLUSTER 'cluster_1S_2R' - ( - id UInt64, - column1 String - ) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/db_uuid/{uuid}', '{replica}' ) - ORDER BY (id); -``` - -```response -CREATE TABLE db_uuid.uuid_table1 ON CLUSTER cluster_1S_2R -( - `id` UInt64, - `column1` String -) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/db_uuid/{uuid}', '{replica}') -ORDER BY id - -Query id: 8f542664-4548-4a02-bd2a-6f2c973d0dc4 - -┌─host──────────────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode1.marsnet.local │ 9440 │ 0 │ │ 1 │ 0 │ -│ chnode2.marsnet.local │ 9440 │ 0 │ │ 0 │ 0 │ -└───────────────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ -``` - -4. 分散テーブルを作成 - -```sql -create table db_uuid.dist_uuid_table1 on cluster 'cluster_1S_2R' - ( - id UInt64, - column1 String - ) - ENGINE = Distributed('cluster_1S_2R', 'db_uuid', 'uuid_table1' ); -``` - -```response -CREATE TABLE db_uuid.dist_uuid_table1 ON CLUSTER cluster_1S_2R -( - `id` UInt64, - `column1` String -) -ENGINE = Distributed('cluster_1S_2R', 'db_uuid', 'uuid_table1') - -Query id: 3bc7f339-ab74-4c7d-a752-1ffe54219c0e - -┌─host──────────────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode2.marsnet.local │ 9440 │ 0 │ │ 1 │ 0 │ -│ chnode1.marsnet.local │ 9440 │ 0 │ │ 0 │ 0 │ -└───────────────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ -``` -### テスト {#testing} -1. 最初のノード(例:`chnode1`)にデータを挿入 -```sql -INSERT INTO db_uuid.uuid_table1 - ( id, column1) - VALUES - ( 1, 'abc'); -``` - -```response -INSERT INTO db_uuid.uuid_table1 (id, column1) FORMAT Values - -Query id: 0f178db7-50a6-48e2-9a1b-52ed14e6e0f9 - -Ok. - -1 行のセットです。経過時間: 0.033 秒。 -``` - -2. 二番目のノード(例:`chnode2`)にデータを挿入 -```sql -INSERT INTO db_uuid.uuid_table1 - ( id, column1) - VALUES - ( 2, 'def'); -``` - -```response -INSERT INTO db_uuid.uuid_table1 (id, column1) FORMAT Values - -Query id: edc6f999-3e7d-40a0-8a29-3137e97e3607 - -Ok. - -1 行のセットです。経過時間: 0.529 秒。 -``` - -3. 分散テーブルを使用してレコードを表示 -```sql -SELECT * FROM db_uuid.dist_uuid_table1; -``` - -```response -SELECT * -FROM db_uuid.dist_uuid_table1 - -Query id: 6cbab449-9e7f-40fe-b8c2-62d46ba9f5c8 - -┌─id─┬─column1─┐ -│ 1 │ abc │ -└────┴─────────┘ -┌─id─┬─column1─┐ -│ 2 │ def │ -└────┴─────────┘ - -2 行のセットです。経過時間: 0.007 秒。 -``` -### 代替 {#alternatives} -デフォルトのレプリケーションパスは、事前にマクロを定義し、 `{uuid}` を使用することによって定義できます。 - -1. 各ノードでテーブルのデフォルトを設定 -```xml -/clickhouse/tables/{shard}/db_uuid/{uuid} -{replica} -``` -:::tip -各ノードに対して `{database}` マクロを定義することもできます。ノードが特定のデータベースに使用される場合。 -::: - -2. 明示的なパラメータを指定せずにテーブルを作成する: -```sql -CREATE TABLE db_uuid.uuid_table1 ON CLUSTER 'cluster_1S_2R' - ( - id UInt64, - column1 String - ) - ENGINE = ReplicatedMergeTree - ORDER BY (id); -``` - -```response -CREATE TABLE db_uuid.uuid_table1 ON CLUSTER cluster_1S_2R -( - `id` UInt64, - `column1` String -) -ENGINE = ReplicatedMergeTree -ORDER BY id - -Query id: ab68cda9-ae41-4d6d-8d3b-20d8255774ee - -┌─host──────────────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ -│ chnode2.marsnet.local │ 9440 │ 0 │ │ 1 │ 0 │ -│ chnode1.marsnet.local │ 9440 │ 0 │ │ 0 │ 0 │ -└───────────────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - -2 行のセットです。経過時間: 1.175 秒。 -``` - -3. デフォルト構成で使用されている設定を確認する -```sql -SHOW CREATE TABLE db_uuid.uuid_table1; -``` - -```response -SHOW CREATE TABLE db_uuid.uuid_table1 - -CREATE TABLE db_uuid.uuid_table1 -( - `id` UInt64, - `column1` String -) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/db_uuid/{uuid}', '{replica}') -ORDER BY id - -1 行のセットです。経過時間: 0.003 秒。 -``` -### トラブルシューティング {#troubleshooting} - -テーブル情報とUUIDを取得する例のコマンド: -```sql -SELECT * FROM system.tables -WHERE database = 'db_uuid' AND name = 'uuid_table1'; -``` - -上記のテーブルのUUIDを持つZooKeeper内のテーブルに関する情報を取得する例のコマンド -```sql -SELECT * FROM system.zookeeper -WHERE path = '/clickhouse/tables/1/db_uuid/9e8a3cc2-0dec-4438-81a7-c3e63ce2a1cf/replicas'; -``` - -:::note -データベースは `Atomic`でなければなりません。以前のバージョンからのアップグレードの場合、 `default`データベースはおそらく `Ordinary`タイプです。 -::: - -確認するには次のようにします: - -例えば、 - -```sql -SELECT name, engine FROM system.databases WHERE name = 'db_uuid'; -``` - -```response -SELECT - name, - engine -FROM system.databases -WHERE name = 'db_uuid' - -Query id: b047d459-a1d2-4016-bcf9-3e97e30e49c2 - -┌─name────┬─engine─┐ -│ db_uuid │ Atomic │ -└─────────┴────────┘ - -1 行のセットです。経過時間: 0.004 秒。 -``` -## ClickHouse Keeperの動的再構成 {#reconfiguration} - - -### 説明 {#description-1} - -ClickHouse Keeperは、 `keeper_server.enable_reconfiguration`がオンになっている場合、動的クラスター再構成のためにZooKeeper [`reconfig`](https://zookeeper.apache.org/doc/r3.5.3-beta/zookeeperReconfig.html#sc_reconfig_modifying)コマンドを部分的にサポートします。 - -:::note -この設定がオフになっている場合、レプリカの `raft_configuration` セクションを手動で変更することにより、クラスターを再構成できます。変更を適用するのはリーダーのみであるため、すべてのレプリカでファイルを編集する必要があります。代わりに、ZooKeeper互換のクライアントを通じて`reconfig`クエリを送信できます。 -::: - -仮想ノード`/keeper/config`は、次の形式で最後にコミットされたクラスターの構成を含みます: - -```text -server.id = server_host:server_port[;server_type][;server_priority] -server.id2 = ... -... -``` - -- 各サーバーエントリは、改行で区切られています。 -- `server_type`は `participant`または `learner`です([learner](https://github.com/eBay/NuRaft/blob/master/docs/readonly_member.md)はリーダー選挙に参加しません)。 -- `server_priority`は、[どのノードがリーダー選挙で優先されるべきか](https://github.com/eBay/NuRaft/blob/master/docs/leader_election_priority.md)を示す非負の整数です。 - 優先度0は、サーバーがリーダーになることはないことを意味します。 - -例: - -```sql -:) get /keeper/config -server.1=zoo1:9234;participant;1 -server.2=zoo2:9234;participant;1 -server.3=zoo3:9234;participant;1 -``` - -新しいサーバーを追加したり、既存のサーバーを削除したり、既存のサーバーの優先度を変更するために`reconfig`コマンドを使用できます。以下は例です(`clickhouse-keeper-client` を使用): - -```bash - -# 2つの新しいサーバーを追加 -reconfig add "server.5=localhost:123,server.6=localhost:234;learner" - -# 他の2つのサーバーを削除 -reconfig remove "3,4" - -# 既存のサーバー優先度を8に変更 -reconfig add "server.5=localhost:5123;participant;8" -``` - -以下は`kazoo`の例です: - -```python - -# 2つの新しいサーバーを追加し、他の2つのサーバーを削除 -reconfig(joining="server.5=localhost:123,server.6=localhost:234;learner", leaving="3,4") - - -# 既存のサーバー優先度を8に変更 -reconfig(joining="server.5=localhost:5123;participant;8", leaving=None) -``` - -`joining`内のサーバーは、上記で説明されたサーバーフォーマットである必要があります。サーバーエントリはカンマで区切る必要があります。 -新しいサーバーを追加する場合、`server_priority`(デフォルト値は1)および`server_type`(デフォルト値は`participant`)を省略することができます。 - -既存のサーバーの優先順位を変更する場合、ターゲット優先順位を用意する `joining`に追加します。 -サーバーのホスト、ポート、タイプは、既存のサーバー設定と同じである必要があります。 - -サーバーは `joining`および`leaving`に表示される順序で追加および削除されます。 -`joining`からのすべての更新は、`leaving`からの更新の前に処理されます。 - -Keeperの再構成実装にはいくつかの注意点があります: - -- インクリメンタル再構成のみがサポートされています。 `new_members`が空でないリクエストは拒否されます。 - - ClickHouse Keeperの実装は、NuRaft APIに依存して、動的にメンバーシップを変更します。 NuRaftには、1回に1つのサーバーを追加したり、削除したりする方法があります。したがって、構成の各変更(`joining`の各部分、`leaving`の各部分)は別々に決定する必要があります。したがって、大量の再構成は、エンドユーザーには誤解を招く可能性があるため、利用できません。 - - サーバーのタイプ(参加者/学習者)を変更することもできません。これはNuRaftによってサポートされていないため、サーバーを削除して追加する必要があるため、これも誤解を招くことになります。 - -- 戻り値の `znodestat`を使用することはできません。 -- `from_version` フィールドは使用されていません。 `from_version`を設定したすべてのリクエストは拒否されます。 - これは、`/keeper/config`が仮想ノードであるため、永続ストレージには保存されず、各リクエストに対して指定されたノード設定をオンザフライで生成されるためです。 - この判断はNuRaftがすでにこの構成を保存しているため、データの重複を避けるために行われました。 -- ZooKeeperとは異なり、`sync`コマンドを送信することによってクラスターの再構成を待つ方法はありません。 - 新しい構成は _最終的に_ 適用されますが、時間的保証はありません。 -- `reconfig`コマンドはさまざまな理由で失敗する可能性があります。クラスターの状態を確認し、更新が適用されたかどうかを確認できます。 -## シングルノードKeeperをクラスターに変換する {#converting-a-single-node-keeper-into-a-cluster} - -時には、実験的なKeeperノードをクラスターに拡張する必要があります。以下は、3ノードクラスターのための手順の図です: - -- **重要**: 新しいノードは、現在のクォーラムより少ないバッチで追加する必要があります。そうしないと、それらの間でリーダーが選出されます。この例では1つずつ追加します。 -- 既存のKeeperノードは、`keeper_server.enable_reconfiguration`構成パラメータがオンになっている必要があります。 -- Keeperクラスターの完全な新しい構成を使用して2番目のノードを起動します。 -- 起動後、最初のノードに新しいノードを追加します( [`reconfig`](#reconfiguration)を使用)。 -- 次に、3番目のノードを起動し、[`reconfig`](#reconfiguration)を使用して追加します。 -- 新しいKeeperノードを追加して、`clickhouse-server`の設定を更新し、変更を適用するために再起動します。 -- 最初のノードのraft設定を更新し、オプションで再起動します。 - -このプロセスに慣れるために、こちらの[サンドボックスリポジトリ](https://github.com/ClickHouse/keeper-extend-cluster)を参照してください。 -## サポートされていない機能 {#unsupported-features} - -ClickHouse KeeperはZooKeeperと完全に互換性を持つことを目指していますが、現在実装されていない機能がいくつかあります(開発は進行中です): - -- [`create`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#create(java.lang.String,byte%5B%5D,java.util.List,org.apache.zookeeper.CreateMode,org.apache.zookeeper.data.Stat)) は `Stat`オブジェクトを返すことをサポートしていません。 -- [`create`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#create(java.lang.String,byte%5B%5D,java.util.List,org.apache.zookeeper.CreateMode,org.apache.zookeeper.data.Stat)) は [TTL](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/CreateMode.html#PERSISTENT_WITH_TTL)をサポートしていません。 -- [`addWatch`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#addWatch(java.lang.String,org.apache.zookeeper.Watcher,org.apache.zookeeper.AddWatchMode)) は [`PERSISTENT`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/AddWatchMode.html#PERSISTENT) ウォッチで機能しません。 -- [`removeWatch`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#removeWatches(java.lang.String,org.apache.zookeeper.Watcher,org.apache.zookeeper.Watcher.WatcherType,boolean)) と [`removeAllWatches`](https://zookeeper.apache.org/doc/r3.9.1/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#removeAllWatches(java.lang.String,org.apache.zookeeper.Watcher.WatcherType,boolean)) はサポートされていません。 -- `setWatches`はサポートされていません。 -- [`CONTAINER`](https://zookeeper.apache.org/doc/r3.5.1-alpha/api/org/apache/zookeeper/CreateMode.html) タイプのznodesを作成することはサポートされていません。 -- [`SASL認証`](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Zookeeper+and+SASL) はサポートされていません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md.hash deleted file mode 100644 index cf0dbcd086f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/keeper/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -be6a7fe87dc95fd9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md deleted file mode 100644 index e46bc64eb35..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -slug: '/guides/sre/network-ports' -sidebar_label: 'ネットワークポート' -title: 'ネットワークポート' -description: '利用可能なネットワークポートの説明とそれらの用途について' ---- - - - - -# ネットワークポート - -:::note -**デフォルト**として説明されるポートは、ポート番号が「/etc/clickhouse-server/config.xml」に設定されていることを意味します。設定をカスタマイズするには、「/etc/clickhouse-server/config.d/」にファイルを追加してください。 [設定ファイル](/operations/configuration-files) に関するドキュメントを参照してください。 -::: - -|ポート|説明| -|----|-----------| -|2181|ZooKeeper デフォルトサービスポート。 **注意: ClickHouse Keeper のために `9181` を参照してください**| -|8123|HTTP デフォルトポート| -|8443|HTTP SSL/TLS デフォルトポート| -|9000|ネイティブプロトコルポート(ClickHouse TCP プロトコルとも呼ばれます)。 `clickhouse-server`、`clickhouse-client`、およびネイティブ ClickHouse ツールなどの ClickHouse アプリケーションとプロセスによって使用されます。分散クエリのためのサーバー間通信に使用されます。| -|9004|MySQL エミュレーションポート| -|9005|PostgreSQL エミュレーションポート(SSL が ClickHouse 用に有効になっている場合は、安全な通信にも使用されます)。| -|9009|低レベルのデータアクセスのためのサーバー間通信ポート。データの交換、レプリケーション、サーバー間通信に使用されます。| -|9010|サーバー間通信のための SSL/TLS| -|9011|ネイティブプロトコル PROXYv1 プロトコルポート| -|9019|JDBC ブリッジ| -|9100|gRPC ポート| -|9181|推奨される ClickHouse Keeper ポート| -|9234|推奨される ClickHouse Keeper Raft ポート(`1` が有効な場合は、安全な通信にも使用されます)| -|9363|Prometheus デフォルトメトリクスポート| -|9281|推奨されるセキュア SSL ClickHouse Keeper ポート| -|9440|ネイティブプロトコル SSL/TLS ポート| -|42000|Graphite デフォルトポート| diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md.hash deleted file mode 100644 index b04c32e3cff..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/network-ports.md.hash +++ /dev/null @@ -1 +0,0 @@ -e1a1ce935a2dde8a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md deleted file mode 100644 index 03d68023684..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -slug: '/guides/sre/scaling-clusters' -sidebar_label: 'シャードの再バランス' -sidebar_position: 20 -description: 'ClickHouseは自動的なシャードの再バランスをサポートしていませんので、シャードの再バランス方法についていくつかのベストプラクティスを提供しています。' -title: 'データの再バランス' ---- - - - - -# データの再バランス - -ClickHouseは自動シャード再バランスをサポートしていません。ただし、シャードを再バランスする方法はいくつかあります。優先順位は以下の通りです。 - -1. [分散テーブル](/engines/table-engines/special/distributed.md) のシャードを調整し、新しいシャードに書き込みが偏るようにします。これにより、クラスター内で負荷の不均衡やホットスポットが発生する可能性がありますが、書き込みスループットが極端に高くないシナリオでは実行可能です。ユーザーが書き込みターゲットを変更する必要はなく、分散テーブルのままにしておくことができます。この方法は既存のデータの再バランスには役立ちません。 - -2. (1)の代替として、既存のクラスターを変更し、新しいシャードに専用で書き込むことを通じてクラスターをバランスさせます - 手動で書き込みの重み付けを行います。これには(1)と同じ制限があります。 - -3. 既存のデータを再バランスする必要があり、データをパーティション分割している場合は、パーティションを切り離し、手動で別のノードに移動させてから新しいシャードに再接続することを検討してください。これは次の技術よりも手動作業が多くなりますが、より速く、リソースの消費が少ないかもしれません。この操作は手動で行うため、データの再バランスを考慮する必要があります。 - -4. [INSERT FROM SELECT](/sql-reference/statements/insert-into.md/#inserting-the-results-of-select)を通じて、ソースクラスターから新しいクラスターにデータをエクスポートします。これは非常に大きなデータセットに対しては性能が良くなく、ソースクラスターに対してはかなりのIOが発生し、 considerableなネットワークリソースを使用する可能性があります。これは最後の手段を示しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md.hash deleted file mode 100644 index 39c0c9db855..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/scaling-clusters.md.hash +++ /dev/null @@ -1 +0,0 @@ -18940ce41ab50c80 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/_category_.yml deleted file mode 100644 index 08ba2d82c45..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 5 -label: 'User and role management' -collapsible: true -collapsed: true -link: - type: generated-index - slug: /guides/sre/user-role diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md deleted file mode 100644 index 4b2c96a5b82..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -sidebar_label: 'LDAPの構成' -sidebar_position: 2 -slug: '/guides/sre/configuring-ldap' -title: 'LDAPを使用したClickHouseの認証とロールマッピングの構成' -description: 'ClickHouseをLDAPを使用して認証とロールマッピングに設定する方法について説明します' ---- - -import SelfManaged from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md'; - - -# ClickHouseをLDAPで認証およびロールマッピングに使用するための設定 - - - -ClickHouseは、LDAPを使用してClickHouseデータベースユーザーを認証するように構成できます。このガイドでは、公開されているディレクトリに対して認証を行うLDAPシステムとClickHouseを統合する簡単な例を提供します。 - -## 1. ClickHouseでのLDAP接続設定の構成 {#1-configure-ldap-connection-settings-in-clickhouse} - -1. この公開LDAPサーバーへの接続をテストします: - ```bash - $ ldapsearch -x -b dc=example,dc=com -H ldap://ldap.forumsys.com - ``` - - 応答は次のようになります: - ```response - # extended LDIF - # - # LDAPv3 - # base with scope subtree - # filter: (objectclass=*) - # requesting: ALL - # - - # example.com - dn: dc=example,dc=com - objectClass: top - objectClass: dcObject - objectClass: organization - o: example.com - dc: example - ... - ``` - -2. `config.xml`ファイルを編集し、以下を追加してLDAPを構成します: - ```xml - - - ldap.forumsys.com - 389 - uid={user_name},dc=example,dc=com - no - never - - - ``` - - :::note - ``タグは特定のLDAPサーバーを識別するための任意のラベルです。 - ::: - - 上記で使用される基本設定は次の通りです: - - |パラメータ |説明 |例 | - |----------|----------------------|---------------------| - |host |LDAPサーバーのホスト名またはIP |ldap.forumsys.com | - |port |LDAPサーバー用のディレクトリポート|389 | - |bind_dn |ユーザーへのテンプレートパス|`uid={user_name},dc=example,dc=com`| - |enable_tls|安全なLDAPを使用するかどうか|no | - |tls_require_cert |接続のために証明書が必要かどうか|never| - - :::note - この例では、公開サーバーが389を使用し安全なポートを使用していないため、デモ目的でTLSを無効にしています。 - ::: - - :::note - LDAP設定の詳細については、[LDAPドキュメントページ](../../../operations/external-authenticators/ldap.md)を参照してください。 - ::: - -3. ``セクションに``セクションを追加してユーザーロールのマッピングを構成します。このセクションは、ユーザーが認証されたときにどのロールを取得するかを定義します。この基本的な例では、LDAPに対して認証を行ったユーザーは`scientists_role`を取得し、これは後のステップでClickHouseで定義されます。このセクションは次のようになります: - ```xml - - - users.xml - - - /var/lib/clickhouse/access/ - - - test_ldap_server - - - - - dc=example,dc=com - (&(objectClass=groupOfUniqueNames)(uniqueMember={bind_dn})) - cn - - - - ``` - - 上記で使用される基本設定は次の通りです: - - |パラメータ |説明 |例 | - |----------|----------------------|---------------------| - |server |前のldap_serversセクションで定義されたラベル|test_ldap_server| - |roles |ClickHouseで定義されたユーザーがマッピングされるロールの名前|scientists_role| - |base_dn |ユーザーとグループの検索を開始する基本パス|dc=example,dc=com| - |search_filter|マッピングのために選択するグループを識別するLDAP検索フィルター|`(&(objectClass=groupOfUniqueNames)(uniqueMember={bind_dn}))`| - |attribute |返される属性名|cn| - -4. 設定を適用するためにClickHouseサーバーを再起動します。 - -## 2. ClickHouseデータベースのロールと権限を構成する {#2-configure-clickhouse-database-roles-and-permissions} - -:::note -このセクションの手順は、ClickHouseでSQLアクセス制御とアカウント管理が有効になっていることを前提としています。有効にするには、[SQLユーザーとロールガイド](index.md)を参照してください。 -::: - -1. `config.xml`ファイルのロールマッピングセクションで使用されたのと同じ名前のロールをClickHouseで作成します。 - ```sql - CREATE ROLE scientists_role; - ``` - -2. ロールに必要な権限を付与します。次のステートメントは、LDAPを通じて認証できるユーザーに管理者権限を付与します: - ```sql - GRANT ALL ON *.* TO scientists_role; - ``` - -## 3. LDAP設定をテストする {#3-test-the-ldap-configuration} - -1. ClickHouseクライアントを使用してログインします。 - ```bash - $ clickhouse-client --user einstein --password password - ClickHouse client version 22.2.2.1. - Connecting to localhost:9000 as user einstein. - Connected to ClickHouse server version 22.2.2 revision 54455. - - chnode1 :) - ``` - - :::note - ステップ1で`ldapsearch`コマンドを使用してディレクトリに利用可能なすべてのユーザーを表示し、すべてのユーザーのパスワードが`password`であることを確認してください。 - ::: - -2. ユーザーが`scientists_role`ロールに正しくマッピングされており、管理者権限を持っているかテストします。 - ```sql - SHOW DATABASES - ``` - - ```response - Query id: 93b785ff-1482-4eda-95b0-b2d68b2c5e0f - - ┌─name───────────────┐ - │ INFORMATION_SCHEMA │ - │ db1_mysql │ - │ db2 │ - │ db3 │ - │ db4_mysql │ - │ db5_merge │ - │ default │ - │ information_schema │ - │ system │ - └────────────────────┘ - - 9 rows in set. Elapsed: 0.004 sec. - ``` - -## 要約 {#summary} -この記事では、ClickHouseをLDAPサーバーに対して認証し、ロールにマッピングする基本を示しました。また、ClickHouse内の個々のユーザーを構成するオプションもありますが、これらのユーザーが自動的なロールマッピングを構成せずにLDAPで認証されるようにすることも可能です。LDAPモジュールはActive Directoryへの接続にも使用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md.hash deleted file mode 100644 index 368c2f078cb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/configuring-ldap.md.hash +++ /dev/null @@ -1 +0,0 @@ -7da78a8a41020218 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md deleted file mode 100644 index 2a825cdbbe3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md +++ /dev/null @@ -1,548 +0,0 @@ ---- -slug: '/operations/access-rights' -sidebar_position: 1 -sidebar_label: 'ユーザーとロール' -title: 'アクセス制御とアカウント管理' -keywords: -- 'ClickHouse Cloud' -- 'Access Control' -- 'User Management' -- 'RBAC' -- 'Security' -description: 'ClickHouse Cloud におけるアクセス制御とアカウント管理の説明' ---- - - - - -# ClickHouseにおけるユーザーとロールの作成 - -ClickHouseは、[RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) アプローチに基づくアクセス制御管理をサポートしています。 - -ClickHouseのアクセスエンティティ: -- [ユーザーアカウント](#user-account-management) -- [ロール](#role-management) -- [行ポリシー](#row-policy-management) -- [設定プロファイル](#settings-profiles-management) -- [クォータ](#quotas-management) - -アクセスエンティティは次の方法で設定できます: - -- SQL駆動型ワークフロー。 - - この機能を[有効にする](#enabling-access-control)必要があります。 - -- サーバーの[設定ファイル](/operations/configuration-files.md) `users.xml` と `config.xml`。 - -SQL駆動型ワークフローの使用をお勧めします。両方の設定方法は同時に機能するため、アカウントおよびアクセス権を管理するためにサーバー設定ファイルを使用する場合は、スムーズにSQL駆動型ワークフローに切り替えることができます。 - -:::note -同じアクセスエンティティを両方の設定方法で同時に管理することはできません。 -::: - -:::note -ClickHouse Cloud Consoleのユーザーを管理する場合は、この[ページ](/cloud/security/cloud-access-management)を参照してください。 -::: - -すべてのユーザー、ロール、プロファイルなどとその付与内容を確認するには、[`SHOW ACCESS`](/sql-reference/statements/show#show-access) ステートメントを使用します。 - -## 概要 {#access-control-usage} - -デフォルトでは、ClickHouseサーバーは `default` ユーザーアカウントを提供します。このアカウントはSQL駆動型アクセス制御やアカウント管理の使用を許可されていませんが、すべての権限と許可があります。 `default` ユーザーアカウントは、ログイン時にユーザー名が定義されていない場合や、分散クエリの際に使用されます。分散クエリ処理では、サーバーまたはクラスタの構成で[ユーザーとパスワード](/engines/table-engines/special/distributed.md)のプロパティが指定されていない場合、デフォルトのユーザーアカウントが使用されます。 - -ClickHouseの使用を開始したばかりの場合は、次のシナリオを考慮してください: - -1. `default` ユーザーのためにSQL駆動型アクセス制御およびアカウント管理を[有効にする](#enabling-access-control)。 -2. `default` ユーザーアカウントにログインし、必要なすべてのユーザーを作成します。管理者アカウントを作成することを忘れないでください(`GRANT ALL ON *.* TO admin_user_account WITH GRANT OPTION`)。 -3. `default` ユーザーのために[権限を制限](/operations/settings/permissions-for-queries)し、そのユーザーに対するSQL駆動型アクセス制御およびアカウント管理を無効にします。 - -### 現在のソリューションのプロパティ {#access-control-properties} - -- 存在しないデータベースやテーブルに対しても権限を付与できます。 -- テーブルが削除された場合、そのテーブルに対応するすべての権限は取り消されません。つまり、後で同じ名前の新しいテーブルを作成しても、すべての権限は有効のままとなります。削除されたテーブルに対応する権限を取り消すには、例えば `REVOKE ALL PRIVILEGES ON db.table FROM ALL` クエリを実行する必要があります。 -- 権限に対する有効期限の設定はありません。 - -### ユーザーアカウント {#user-account-management} - -ユーザーアカウントは、ClickHouseでの認証を可能にするアクセスエンティティです。ユーザーアカウントには以下が含まれます: - -- 識別情報。 -- ユーザーが実行できるクエリの範囲を定義する[権限](/sql-reference/statements/grant.md#privileges)。 -- ClickHouseサーバーに接続できるホスト。 -- 複数のロール(役割)。 -- ユーザーログイン時に適用される設定とその制約。 -- 割り当てられた設定プロファイル。 - -権限は、[GRANT](/sql-reference/statements/grant.md)クエリを使用してユーザーアカウントに付与することができ、[ロール](#role-management)を割り当てることによっても付与できます。ユーザーから権限を取り消すために、ClickHouseは[REVOKE](/sql-reference/statements/revoke.md)クエリを提供します。ユーザーの権限を一覧表示するには、[SHOW GRANTS](/sql-reference/statements/show#show-grants)ステートメントを使用します。 - -管理クエリ: - -- [CREATE USER](/sql-reference/statements/create/user.md) -- [ALTER USER](/sql-reference/statements/alter/user) -- [DROP USER](/sql-reference/statements/drop.md) -- [SHOW CREATE USER](/sql-reference/statements/show#show-create-user) -- [SHOW USERS](/sql-reference/statements/show#show-users) - -### 設定の適用 {#access-control-settings-applying} - -設定は異なる方法で構成できます: ユーザーアカウントのために、その付与されたロール内、および設定プロファイルの中で。ユーザーのログイン時に、異なるアクセスエンティティに設定が構成されている場合、設定の値と制約は以下のように適用されます(優先順位が高いものから低いものへ): - -1. ユーザーアカウントの設定。 -2. ユーザーアカウントのデフォルトロール用の設定。いくつかのロールに設定が構成されている場合、設定の適用順序は未定義です。 -3. ユーザーまたはそのデフォルトロールに割り当てられた設定プロファイルからの設定。いくつかのプロファイルに設定が構成されている場合、設定の適用順序は未定義です。 -4. デフォルトとして、または[デフォルトプロファイル](/operations/server-configuration-parameters/settings#default_profile)からサーバー全体に適用される設定。 - -### ロール {#role-management} - -ロールは、ユーザーアカウントに付与できるアクセスエンティティのコンテナです。 - -ロールには以下が含まれます: - -- [権限](/sql-reference/statements/grant#privileges) -- 設定と制約 -- 割り当てられたロールのリスト - -管理クエリ: - -- [CREATE ROLE](/sql-reference/statements/create/role) -- [ALTER ROLE](/sql-reference/statements/alter/role) -- [DROP ROLE](/sql-reference/statements/drop#drop-role) -- [SET ROLE](/sql-reference/statements/set-role) -- [SET DEFAULT ROLE](/sql-reference/statements/set-role) -- [SHOW CREATE ROLE](/sql-reference/statements/show#show-create-role) -- [SHOW ROLES](/sql-reference/statements/show#show-roles) - -権限は、[GRANT](/sql-reference/statements/grant.md)クエリを使用してロールに付与できます。ロールから権限を取り消すために、ClickHouseは[REVOKE](/sql-reference/statements/revoke.md)クエリを提供します。 - -#### 行ポリシー {#row-policy-management} - -行ポリシーは、ユーザーまたはロールにどの行が利用できるかを定義するフィルタです。行ポリシーには特定のテーブルのためのフィルタと、この行ポリシーを使用する必要があるロールやユーザーのリストが含まれます。 - -:::note -行ポリシーは、読み取り専用アクセスを持つユーザーに対してのみ意味があります。ユーザーがテーブルを変更したり、テーブル間でパーティションをコピーできる場合、行ポリシーの制約は無効になります。 -::: - -管理クエリ: - -- [CREATE ROW POLICY](/sql-reference/statements/create/row-policy) -- [ALTER ROW POLICY](/sql-reference/statements/alter/row-policy) -- [DROP ROW POLICY](/sql-reference/statements/drop#drop-row-policy) -- [SHOW CREATE ROW POLICY](/sql-reference/statements/show#show-create-row-policy) -- [SHOW POLICIES](/sql-reference/statements/show#show-policies) - -### 設定プロファイル {#settings-profiles-management} - -設定プロファイルは、[設定](/operations/settings/index.md)のコレクションです。設定プロファイルには設定や制約が含まれており、このプロファイルが適用されるロールやユーザーのリストも含まれています。 - -管理クエリ: - -- [CREATE SETTINGS PROFILE](/sql-reference/statements/create/settings-profile) -- [ALTER SETTINGS PROFILE](/sql-reference/statements/alter/settings-profile) -- [DROP SETTINGS PROFILE](/sql-reference/statements/drop#drop-settings-profile) -- [SHOW CREATE SETTINGS PROFILE](/sql-reference/statements/show#show-create-settings-profile) -- [SHOW PROFILES](/sql-reference/statements/show#show-profiles) - -### クォータ {#quotas-management} - -クォータはリソース使用量を制限します。詳細は[クォータ](/operations/quotas.md)を参照してください。 - -クォータは、特定の期間のための制限のセットと、このクォータを使用するロールやユーザーのリストを含みます。 - -管理クエリ: - -- [CREATE QUOTA](/sql-reference/statements/create/quota) -- [ALTER QUOTA](/sql-reference/statements/alter/quota) -- [DROP QUOTA](/sql-reference/statements/drop#drop-quota) -- [SHOW CREATE QUOTA](/sql-reference/statements/show#show-create-quota) -- [SHOW QUOTA](/sql-reference/statements/show#show-quota) -- [SHOW QUOTAS](/sql-reference/statements/show#show-quotas) - -### SQL駆動型アクセス制御およびアカウント管理の有効化 {#enabling-access-control} - -- 設定ストレージ用のディレクトリをセットアップします。 - - ClickHouseは、[access_control_path](/operations/server-configuration-parameters/settings.md#access_control_path)サーバー設定パラメータで設定されたフォルダにアクセスエンティティの設定を保存します。 - -- 少なくとも1つのユーザーアカウントに対してSQL駆動型アクセス制御およびアカウント管理を有効にします。 - - デフォルトでは、すべてのユーザーに対してSQL駆動型アクセス制御とアカウント管理は無効になっています。 `users.xml` 設定ファイルに少なくとも1つのユーザーを構成し、[`access_management`](/operations/settings/settings-users.md#access_management-user-setting)、`named_collection_control`、`show_named_collections`、および `show_named_collections_secrets` の値を 1 に設定する必要があります。 - -## SQLユーザーとロールの定義 {#defining-sql-users-and-roles} - -:::tip -ClickHouse Cloudを使用している場合は、[クラウドアクセス管理](/cloud/security/cloud-access-management)を参照してください。 -::: - -この記事では、SQLユーザーおよびロールの定義の基本と、それらの権限や許可をデータベース、テーブル、行、カラムに適用する方法を示します。 - -### SQLユーザーモードを有効にする {#enabling-sql-user-mode} - -1. `` ユーザーの `users.xml` ファイルでSQLユーザーモードを有効にします: - ```xml - 1 - 1 - 1 - 1 - ``` - - :::note - `default` ユーザーは、新規インストール時に作成される唯一のユーザーであり、デフォルトではノード間通信に使用されるアカウントでもあります。 - - 本番環境では、SQL管理ユーザーでノード間通信が構成され、ノード間通信が ``、クラスタ認証情報、および/またはノード間HTTPおよびトランスポートプロトコル認証情報で設定された後、このユーザーを無効にすることが推奨されます。なぜなら、`default` アカウントはノード間通信に使用されるからです。 - ::: - -2. 変更を適用するためにノードを再起動します。 - -3. ClickHouseクライアントを起動します: - ```sql - clickhouse-client --user default --password - ``` -### ユーザーの定義 {#defining-users} - -1. SQL管理者アカウントを作成します: - ```sql - CREATE USER clickhouse_admin IDENTIFIED BY 'password'; - ``` -2. 新しいユーザーにフル管理権限を付与します - ```sql - GRANT ALL ON *.* TO clickhouse_admin WITH GRANT OPTION; - ``` - -## ALTER権限 {#alter-permissions} - -この記事は、権限の定義方法や、特権ユーザーが `ALTER` ステートメントを使用する際に権限がどのように機能するかを理解するのに役立つことを目的としています。 - -`ALTER` ステートメントは、いくつかのカテゴリに分かれており、その中には階層的なものとそうでないものがあり、明示的に定義する必要があります。 - -**例:DB、テーブル、およびユーザーの構成** -1. 管理ユーザーでサンプルユーザーを作成します -```sql -CREATE USER my_user IDENTIFIED BY 'password'; -``` - -2. サンプルデータベースを作成します -```sql -CREATE DATABASE my_db; -``` - -3. サンプルテーブルを作成します -```sql -CREATE TABLE my_db.my_table (id UInt64, column1 String) ENGINE = MergeTree() ORDER BY id; -``` - -4. 権限を付与または取り消すためのサンプル管理ユーザーを作成します -```sql -CREATE USER my_alter_admin IDENTIFIED BY 'password'; -``` - -:::note -権限を付与または取り消すためには、管理ユーザーは `WITH GRANT OPTION` 権限を持っている必要があります。 -例えば: - ```sql - GRANT ALTER ON my_db.* WITH GRANT OPTION - ``` -権限を `GRANT` または `REVOKE` するためには、そのユーザーはまずその権限を持っている必要があります。 -::: - -**権限を付与または取り消す** - -`ALTER` 階層: - -```response -├── ALTER (テーブルとビューのみ)/ -│ ├── ALTER TABLE/ -│ │ ├── ALTER UPDATE -│ │ ├── ALTER DELETE -│ │ ├── ALTER COLUMN/ -│ │ │ ├── ALTER ADD COLUMN -│ │ │ ├── ALTER DROP COLUMN -│ │ │ ├── ALTER MODIFY COLUMN -│ │ │ ├── ALTER COMMENT COLUMN -│ │ │ ├── ALTER CLEAR COLUMN -│ │ │ └── ALTER RENAME COLUMN -│ │ ├── ALTER INDEX/ -│ │ │ ├── ALTER ORDER BY -│ │ │ ├── ALTER SAMPLE BY -│ │ │ ├── ALTER ADD INDEX -│ │ │ ├── ALTER DROP INDEX -│ │ │ ├── ALTER MATERIALIZE INDEX -│ │ │ └── ALTER CLEAR INDEX -│ │ ├── ALTER CONSTRAINT/ -│ │ │ ├── ALTER ADD CONSTRAINT -│ │ │ └── ALTER DROP CONSTRAINT -│ │ ├── ALTER TTL/ -│ │ │ └── ALTER MATERIALIZE TTL -│ │ ├── ALTER SETTINGS -│ │ ├── ALTER MOVE PARTITION -│ │ ├── ALTER FETCH PARTITION -│ │ └── ALTER FREEZE PARTITION -│ └── ALTER LIVE VIEW/ -│ ├── ALTER LIVE VIEW REFRESH -│ └── ALTER LIVE VIEW MODIFY QUERY -├── ALTER DATABASE -├── ALTER USER -├── ALTER ROLE -├── ALTER QUOTA -├── ALTER [ROW] POLICY -└── ALTER [SETTINGS] PROFILE -``` - -1. ユーザーまたはロールに `ALTER` 権限を付与する - -`GRANT ALTER ON *.* TO my_user`を使用すると、トップレベルの `ALTER TABLE` と `ALTER VIEW` にのみ影響します。他の `ALTER` ステートメントは、個別に付与または取り消す必要があります。 - -例えば、基本的な `ALTER` 権限を付与する: - -```sql -GRANT ALTER ON my_db.my_table TO my_user; -``` - -権限の結果セット: - -```sql -SHOW GRANTS FOR my_user; -``` - -```response -SHOW GRANTS FOR my_user - -Query id: 706befbc-525e-4ec1-a1a2-ba2508cc09e3 - -┌─GRANTS FOR my_user───────────────────────────────────────────┐ -│ GRANT ALTER TABLE, ALTER VIEW ON my_db.my_table TO my_user │ -└──────────────────────────────────────────────────────────────┘ -``` - -これにより、上記の例での `ALTER TABLE` と `ALTER VIEW` の下にあるすべての権限が付与されますが、 `ALTER ROW POLICY` のような他の特定の `ALTER` 権限は付与されません(階層を参照すると、`ALTER ROW POLICY` は `ALTER TABLE` または `ALTER VIEW` の子ではないことがわかります)。それらは明示的に付与または取り消す必要があります。 - -`ALTER` 権限のサブセットのみが必要な場合は、それぞれを個別に付与できますが、その権限にサブ権限がある場合は自動的に付与されます。 - -例えば: - -```sql -GRANT ALTER COLUMN ON my_db.my_table TO my_user; -``` - -権限は次のように設定されます: - -```sql -SHOW GRANTS FOR my_user; -``` - -```response -SHOW GRANTS FOR my_user - -Query id: 47b3d03f-46ac-4385-91ec-41119010e4e2 - -┌─GRANTS FOR my_user────────────────────────────────┐ -│ GRANT ALTER COLUMN ON default.my_table TO my_user │ -└───────────────────────────────────────────────────┘ - -1 row in set. Elapsed: 0.004 sec. -``` - -これにより、以下のサブ権限も付与されます: - -```sql -ALTER ADD COLUMN -ALTER DROP COLUMN -ALTER MODIFY COLUMN -ALTER COMMENT COLUMN -ALTER CLEAR COLUMN -ALTER RENAME COLUMN -``` - -2. ユーザーとロールから `ALTER` 権限を取り消す - -`REVOKE` ステートメントは、`GRANT` ステートメントと同様に機能します。 - -ユーザー/ロールにサブ権限が付与されている場合、そのサブ権限を直接取り消すか、継承している上位の権限を取り消すことができます。 - -例えば、ユーザーに `ALTER ADD COLUMN`が付与されている場合 - -```sql -GRANT ALTER ADD COLUMN ON my_db.my_table TO my_user; -``` - -```response -GRANT ALTER ADD COLUMN ON my_db.my_table TO my_user - -Query id: 61fe0fdc-1442-4cd6-b2f3-e8f2a853c739 - -Ok. - -0 rows in set. Elapsed: 0.002 sec. -``` - -```sql -SHOW GRANTS FOR my_user; -``` - -```response -SHOW GRANTS FOR my_user - -Query id: 27791226-a18f-46c8-b2b4-a9e64baeb683 - -┌─GRANTS FOR my_user──────────────────────────────────┐ -│ GRANT ALTER ADD COLUMN ON my_db.my_table TO my_user │ -└─────────────────────────────────────────────────────┘ -``` - -権限は個別に取り消すことができます: - -```sql -REVOKE ALTER ADD COLUMN ON my_db.my_table FROM my_user; -``` - -または、すべての上位レベルから取り消すこともできます(すべてのCOLUMNサブ権限を取り消す): - -```response -REVOKE ALTER COLUMN ON my_db.my_table FROM my_user; -``` - -```response -REVOKE ALTER COLUMN ON my_db.my_table FROM my_user - -Query id: b882ba1b-90fb-45b9-b10f-3cda251e2ccc - -Ok. - -0 rows in set. Elapsed: 0.002 sec. -``` - -```sql -SHOW GRANTS FOR my_user; -``` - -```response -SHOW GRANTS FOR my_user - -Query id: e7d341de-de65-490b-852c-fa8bb8991174 - -Ok. - -0 rows in set. Elapsed: 0.003 sec. -``` - -**追加情報** - -権限は、`WITH GRANT OPTION`だけでなく、実際にその権限を持っているユーザーによって付与されなければなりません。 - -1. 管理ユーザーに権限を付与し、一連の権限を管理できるようにします -以下はその例です: - -```sql -GRANT SELECT, ALTER COLUMN ON my_db.my_table TO my_alter_admin WITH GRANT OPTION; -``` - -これで、ユーザーは `ALTER COLUMN` とそのすべてのサブ権限を付与または取り消すことができます。 - -**テスト** - -1. `SELECT` 権限を追加します -```sql -GRANT SELECT ON my_db.my_table TO my_user; -``` - -2. ユーザーにカラム追加権限を付与します -```sql -GRANT ADD COLUMN ON my_db.my_table TO my_user; -``` - -3. 制限付きユーザーでログインします -```bash -clickhouse-client --user my_user --password password --port 9000 --host -``` - -4. カラムを追加するテスト -```sql -ALTER TABLE my_db.my_table ADD COLUMN column2 String; -``` - -```response -ALTER TABLE my_db.my_table - ADD COLUMN `column2` String - -Query id: d5d6bfa1-b80c-4d9f-8dcd-d13e7bd401a5 - -Ok. - -0 rows in set. Elapsed: 0.010 sec. -``` - -```sql -DESCRIBE my_db.my_table; -``` - -```response -DESCRIBE TABLE my_db.my_table - -Query id: ab9cb2d0-5b1a-42e1-bc9c-c7ff351cb272 - -┌─name────┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ id │ UInt64 │ │ │ │ │ │ -│ column1 │ String │ │ │ │ │ │ -│ column2 │ String │ │ │ │ │ │ -└─────────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -4. カラムを削除するテスト -```sql -ALTER TABLE my_db.my_table DROP COLUMN column2; -``` - -```response -ALTER TABLE my_db.my_table - DROP COLUMN column2 - -Query id: 50ad5f6b-f64b-4c96-8f5f-ace87cea6c47 - - -0 rows in set. Elapsed: 0.004 sec. - -Received exception from server (version 22.5.1): -Code: 497. DB::Exception: Received from chnode1.marsnet.local:9440. DB::Exception: my_user: 権限が不足しています。このクエリを実行するには、my_db.my_table の ALTER DROP COLUMN(column2) の権限が必要です。(ACCESS_DENIED) -``` - -5. 権限を付与するために管理者をテスト -```sql -GRANT SELECT, ALTER COLUMN ON my_db.my_table TO my_alter_admin WITH GRANT OPTION; -``` - -6. アルターユーザーでログインします -```bash -clickhouse-client --user my_alter_admin --password password --port 9000 --host -``` - -7. サブ権限を付与します -```sql -GRANT ALTER ADD COLUMN ON my_db.my_table TO my_user; -``` - -```response -GRANT ALTER ADD COLUMN ON my_db.my_table TO my_user - -Query id: 1c7622fa-9df1-4c54-9fc3-f984c716aeba - -Ok. -``` - -8. アルターユーザーが持っていない権限を付与しようとするテスト -```sql -GRANT ALTER UPDATE ON my_db.my_table TO my_user; -``` - -```response -GRANT ALTER UPDATE ON my_db.my_table TO my_user - -Query id: 191690dc-55a6-4625-8fee-abc3d14a5545 - - -0 rows in set. Elapsed: 0.004 sec. - -Received exception from server (version 22.5.1): -Code: 497. DB::Exception: Received from chnode1.marsnet.local:9440. DB::Exception: my_alter_admin: 権限が不足しています。このクエリを実行するには、my_db.my_table に対し ALTER UPDATE ON の権限を WITH GRANT OPTION で付与する必要があります。(ACCESS_DENIED) -``` - -**まとめ** -`ALTER` 権限はテーブルおよびビューに対して階層的ですが、他の `ALTER` ステートメントではそうではありません。権限は細かいレベルで設定することも、一群の権限として設定することもでき、同様に取り消すこともできます。権限を付与または取り消すユーザーには、ユーザーに権限を設定するための `WITH GRANT OPTION` が必要であり、またその権限自体を持っている必要があります。実行ユーザーは、自分自身が権限付与オプションを持っていない場合、自分の権限を取り消すことはできません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md.hash deleted file mode 100644 index 36876786bab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -6917ecc06b94e582 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md deleted file mode 100644 index 5231e588fe0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -sidebar_label: 'SSLユーザー証明書認証' -sidebar_position: 3 -slug: '/guides/sre/ssl-user-auth' -title: 'SSLユーザー証明書を使用した認証の構成' -description: 'このガイドでは、SSLユーザー証明書を使用した認証を構成するためのシンプルで最小限の設定を提供します。' ---- - -import SelfManaged from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md'; - - -# SSLユーザー証明書による認証の設定 - - -このガイドでは、SSLユーザー証明書を使用した認証を設定するためのシンプルで最小限の設定を提供します。このチュートリアルは、[SSL-TLSの設定ガイド](../configuring-ssl.md)に基づいています。 - -:::note -SSLユーザー認証は、`https`、`native`、`mysql`、および`postgresql`インターフェースを使用する際にサポートされています。 - -ClickHouseノードには、セキュアな認証のために`strict`を設定する必要があります(ただし、テスト目的で`relaxed`は機能します)。 - -AWS NLBをMySQLインターフェースで使用する場合、AWSサポートに文書化されていないオプションを有効にするよう依頼する必要があります: - -> `proxy_protocol_v2.client_to_server.header_placement,Value=on_first_ack`として、NLBプロキシプロトコルv2を設定できるようにしたいです。 -::: - -## 1. SSLユーザー証明書の作成 {#1-create-ssl-user-certificates} - -:::note -この例では、自己署名CAを用いた自己署名証明書を使用します。本番環境では、CSRを作成し、PKIチームまたは証明書プロバイダに提出して適切な証明書を取得してください。 -::: - -1. 証明書署名要求(CSR)とキーを生成します。基本的な形式は次のとおりです: - ```bash - openssl req -newkey rsa:2048 -nodes -subj "/CN=:" -keyout .key -out .csr - ``` - この例では、このサンプル環境で使用されるドメインとユーザーに対して次のようにします: - ```bash - openssl req -newkey rsa:2048 -nodes -subj "/CN=chnode1.marsnet.local:cert_user" -keyout chnode1_cert_user.key -out chnode1_cert_user.csr - ``` - :::note - CNは任意であり、証明書の識別子として任意の文字列を使用できます。これは、次のステップでユーザーを作成する際に使用されます。 - ::: - -2. 認証に使用する新しいユーザー証明書を生成して署名します。基本的な形式は次のとおりです: - ```bash - openssl x509 -req -in .csr -out .crt -CA .crt -CAkey .key -days 365 - ``` - この例では、このサンプル環境で使用されるドメインとユーザーに対して次のようにします: - ```bash - openssl x509 -req -in chnode1_cert_user.csr -out chnode1_cert_user.crt -CA marsnet_ca.crt -CAkey marsnet_ca.key -days 365 - ``` - -## 2. SQLユーザーを作成し、権限を付与する {#2-create-a-sql-user-and-grant-permissions} - -:::note -SQLユーザーを有効にし、ロールを設定する方法の詳細については、[SQLユーザーとロールの定義](index.md)ユーザーガイドを参照してください。 -::: - -1. 証明書認証を使用するように定義されたSQLユーザーを作成します: - ```sql - CREATE USER cert_user IDENTIFIED WITH ssl_certificate CN 'chnode1.marsnet.local:cert_user'; - ``` - -2. 新しい証明書ユーザーに権限を付与します: - ```sql - GRANT ALL ON *.* TO cert_user WITH GRANT OPTION; - ``` - :::note - この演習では、デモ目的のためにユーザーにフル管理者権限が付与されます。権限設定については、ClickHouseの[RBACドキュメント](/guides/sre/user-management/index.md)を参照してください。 - ::: - - :::note - ユーザーとロールを定義するためにSQLを使用することをお勧めします。ただし、現在設定ファイルでユーザーとロールを定義している場合、ユーザーは次のようになります: - ```xml - - - - chnode1.marsnet.local:cert_user - - - ::/0 - - default - 1 - - - - ``` - ::: - - -## 3. テスト {#3-testing} - -1. ユーザー証明書、ユーキー、およびCA証明書をリモートノードにコピーします。 - -2. ClickHouseの[クライアント設定](/interfaces/cli.md#configuration_files)で証明書とパスを使用してOpenSSLを構成します。 - - ```xml - - - my_cert_name.crt - my_cert_name.key - my_ca_cert.crt - - - ``` - -3. `clickhouse-client`を実行します。 - ```bash - clickhouse-client --user --query 'SHOW TABLES' - ``` - :::note - 証明書が設定に指定されている場合、clickhouse-clientに渡されたパスワードは無視されることに注意してください。 - ::: - - -## 4. HTTPをテストする {#4-testing-http} - -1. ユーザー証明書、ユーキー、およびCA証明書をリモートノードにコピーします。 - -2. `curl`を使用してサンプルSQLコマンドをテストします。基本的な形式は次のとおりです: - ```bash - echo 'SHOW TABLES' | curl 'https://:8443' --cert .crt --key .key --cacert .crt -H "X-ClickHouse-SSL-Certificate-Auth: on" -H "X-ClickHouse-User: " --data-binary @- - ``` - 例えば: - ```bash - echo 'SHOW TABLES' | curl 'https://chnode1:8443' --cert chnode1_cert_user.crt --key chnode1_cert_user.key --cacert marsnet_ca.crt -H "X-ClickHouse-SSL-Certificate-Auth: on" -H "X-ClickHouse-User: cert_user" --data-binary @- - ``` - 出力は次のようになります: - ```response - INFORMATION_SCHEMA - default - information_schema - system - ``` - :::note - パスワードが指定されていないことに注意してください。証明書がパスワードの代わりに使用され、ClickHouseがユーザーを認証する方法です。 - ::: - - -## まとめ {#summary} - -この記事では、SSL証明書認証のためのユーザーを作成および設定する基本を示しました。この方法は、`clickhouse-client`や`https`インターフェースをサポートする任意のクライアントで使用でき、HTTPヘッダーを設定できます。生成された証明書とキーはプライベートに保ち、限定的なアクセス権を持たせるべきです。なぜなら、証明書はユーザーのClickHouseデータベースに対する操作を認証および承認するために使用されるからです。証明書とキーはパスワードのように扱ってください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md.hash deleted file mode 100644 index 38838f83536..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/sre/user-management/ssl-user-auth.md.hash +++ /dev/null @@ -1 +0,0 @@ -d14411943b5aef88 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md deleted file mode 100644 index c7af7371cd8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: 'トラブルシューティング' -description: 'インストールのトラブルシューティングガイド' -slug: '/guides/troubleshooting' ---- - - - -## インストール {#installation} - -### apt-key で keyserver.ubuntu.com から GPG キーをインポートできない {#cannot-import-gpg-keys-from-keyserverubuntucom-with-apt-key} - -`apt-key` 機能は、[Advanced package tool (APT) で非推奨になりました](https://manpages.debian.org/bookworm/apt/apt-key.8.en.html)。ユーザーは代わりに `gpg` コマンドを使用するべきです。詳細は [インストールガイド](../getting-started/install/install.mdx) を参照してください。 - -### gpg で keyserver.ubuntu.com から GPG キーをインポートできない {#cannot-import-gpg-keys-from-keyserverubuntucom-with-gpg} - -1. `gpg` がインストールされているか確認します: - -```shell -sudo apt-get install gnupg -``` - -### apt-get で ClickHouse リポジトリから deb パッケージを取得できない {#cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} - -1. ファイアウォールの設定を確認します。 -1. 何らかの理由でリポジトリにアクセスできない場合は、[インストールガイド](../getting-started/install/install.mdx) に記載されている方法でパッケージをダウンロードし、`sudo dpkg -i ` コマンドを使用して手動でインストールします。また、`tzdata` パッケージも必要です。 - -### apt-get で ClickHouse リポジトリから deb パッケージを更新できない {#cannot-update-deb-packages-from-clickhouse-repository-with-apt-get} - -GPG キーが変更されると、この問題が発生することがあります。 - -リポジトリ設定を更新するには、[セットアップ](/install/debian_ubuntu) ページのマニュアルを使用してください。 - -### `apt-get update` で異なる警告が表示される {#you-get-different-warnings-with-apt-get-update} - -表示される警告メッセージは以下のいずれかです: - -```shell -N: Skipping acquire of configured file 'main/binary-i386/Packages' as repository 'https://packages.clickhouse.com/deb stable InRelease' doesn't support architecture 'i386' -``` - -```shell -E: Failed to fetch https://packages.clickhouse.com/deb/dists/stable/main/binary-amd64/Packages.gz File has unexpected size (30451 != 28154). Mirror sync in progress? -``` - -```shell -E: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Origin' value from 'Artifactory' to 'ClickHouse' -E: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Label' value from 'Artifactory' to 'ClickHouse' -N: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Suite' value from 'stable' to '' -N: This must be accepted explicitly before updates for this repository can be applied. See apt-secure(8) manpage for details. -``` - -```shell -Err:11 https://packages.clickhouse.com/deb stable InRelease -400 Bad Request [IP: 172.66.40.249 443] -``` - -上記の問題を解決するには、次のスクリプトを使用してください: - -```shell -sudo rm /var/lib/apt/lists/packages.clickhouse.com_* /var/lib/dpkg/arch /var/lib/apt/lists/partial/packages.clickhouse.com_* -sudo apt-get clean -sudo apt-get autoclean -``` - -### Yum でパッケージを取得できない理由が署名の不正 {#cant-get-packages-with-yum-because-of-wrong-signature} - -考えられる問題:キャッシュが不正で、2022-09 に GPG キーが更新された後に破損した可能性があります。 - -解決策は、Yum のキャッシュと lib ディレクトリを削除することです: - -```shell -sudo find /var/lib/yum/repos/ /var/cache/yum/ -name 'clickhouse-*' -type d -exec rm -rf {} + -sudo rm -f /etc/yum.repos.d/clickhouse.repo -``` - -その後、[インストールガイド](/install/redhat) に従ってください。 - -## サーバーへの接続 {#connecting-to-the-server} - -考えられる問題: - -- サーバーが実行されていない。 -- 予期しないか不正な設定パラメータ。 - -### サーバーが実行されていない {#server-is-not-running} - -#### サーバーが実行されているか確認 {#check-if-server-is-running} - -```shell -sudo service clickhouse-server status -``` - -サーバーが実行されていない場合は、次のコマンドで起動します: - -```shell -sudo service clickhouse-server start -``` - -#### ログを確認 {#check-the-logs} - -`clickhouse-server` の主要なログは、デフォルトで `/var/log/clickhouse-server/clickhouse-server.log` にあります。 - -サーバーが正常に起動した場合は、次の文字列が表示されるはずです: - -- ` Application: starting up.` — サーバーが起動しました。 -- ` Application: Ready for connections.` — サーバーが実行中で、接続の準備が整いました。 - -`clickhouse-server` の起動が設定エラーで失敗した場合は、エラーの説明が含まれた `` 文字列が表示されます。例えば: - -```plaintext -2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused -``` - -ファイルの末尾にエラーが表示されない場合は、次の文字列からファイル全体を確認してください: - -```plaintext - Application: starting up. -``` - -サーバー上で `clickhouse-server` の2回目のインスタンスを起動しようとすると、次のログが表示されます: - -```plaintext -2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 -2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up -2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: -PID: 8510 -Started at: 2019-01-11 15:24:23 -Revision: 54413 - -2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. -2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down -2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem -2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread -``` - -#### system.d ログを確認 {#see-systemd-logs} - -`clickhouse-server` のログに役立つ情報が見つからない場合やログがない場合は、次のコマンドを使用して `system.d` ログを表示できます: - -```shell -sudo journalctl -u clickhouse-server -``` - -#### インタラクティブモードで clickhouse-server を起動 {#start-clickhouse-server-in-interactive-mode} - -```shell -sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml -``` - -このコマンドは、autostart スクリプトの標準パラメータでサーバーをインタラクティブアプリとして起動します。このモードでは `clickhouse-server` がコンソールにすべてのイベントメッセージを出力します。 - -### 設定パラメータ {#configuration-parameters} - -確認してください: - -1. Docker 設定: - - - IPv6 ネットワークで Docker 内で ClickHouse を実行している場合は、`network=host` が設定されていることを確認してください。 - -1. エンドポイント設定。 - - [listen_host](/operations/server-configuration-parameters/settings#listen_host) と [tcp_port](/operations/server-configuration-parameters/settings#tcp_port) の設定を確認します。 - - デフォルトでは、ClickHouse サーバーはローカルホスト接続のみを受け入れます。 - -1. HTTP プロトコル設定: - - - HTTP API のプロトコル設定を確認してください。 - -1. セキュア接続設定。 - - - 次の項目を確認してください: - - [tcp_port_secure](/operations/server-configuration-parameters/settings#tcp_port_secure) の設定。 - - [SSL証明書](/operations/server-configuration-parameters/settings#openssl) の設定。 - - 接続時に適切なパラメータを使用してください。例えば、`clickhouse_client` で `port_secure` パラメータを使用します。 - -1. ユーザー設定: - - - 不正なユーザー名またはパスワードを使用している可能性があります。 - -## クエリ処理 {#query-processing} - -ClickHouse がクエリを処理できない場合、エラーの説明をクライアントに送信します。`clickhouse-client` では、コンソールにエラーの説明が表示されます。HTTP インターフェースを使用している場合、ClickHouse はレスポンスボディにエラーの説明を送信します。例えば: - -```shell -$ curl 'http://localhost:8123/' --data-binary "SELECT a" -Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception -``` - -`clickhouse-client` を `stack-trace` パラメータで起動する場合、ClickHouse はエラーの説明とともにサーバースタックトレースを返します。 - -接続の切断に関するメッセージが表示されることがあります。この場合、クエリを繰り返すことができます。クエリを実行するたびに接続が切断される場合は、サーバーログにエラーがないか確認してください。 - -## クエリ処理の効率 {#efficiency-of-query-processing} - -ClickHouse の動作が非常に遅い場合は、サーバーリソースとネットワークへの負荷をクエリごとにプロファイリングする必要があります。 - -clickhouse-benchmark ツールを使用してクエリをプロファイリングできます。これにより、1秒あたりに処理されたクエリの数、1秒あたりに処理された行の数、クエリ処理時間のパーセンタイルが表示されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md.hash deleted file mode 100644 index dead6aad5a5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/troubleshooting.md.hash +++ /dev/null @@ -1 +0,0 @@ -4b26882df651ae33 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md b/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md deleted file mode 100644 index 3eb0d6acdef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -sidebar_position: 3 -sidebar_label: 'データの選択' -title: 'ClickHouseデータの選択' -slug: '/guides/writing-queries' -description: 'ClickHouseデータの選択について学びます' ---- - - - -ClickHouseはSQLデータベースであり、データをクエリするには、すでに慣れ親しんでいるタイプの`SELECT`クエリを書きます。例えば: - -```sql -SELECT * -FROM helloworld.my_first_table -ORDER BY timestamp -``` - -:::note -構文および利用可能な句とオプションの詳細については、[SQLリファレンス](../sql-reference/statements/select/index.md)を参照してください。 -::: - -応答がきれいなテーブル形式で返されることに注意してください: - -```response -┌─user_id─┬─message────────────────────────────────────────────┬───────────timestamp─┬──metric─┐ -│ 102 │ Insert a lot of rows per batch │ 2022-03-21 00:00:00 │ 1.41421 │ -│ 102 │ Sort your data based on your commonly-used queries │ 2022-03-22 00:00:00 │ 2.718 │ -│ 101 │ Hello, ClickHouse! │ 2022-03-22 14:04:09 │ -1 │ -│ 101 │ Granules are the smallest chunks of data read │ 2022-03-22 14:04:14 │ 3.14159 │ -└─────────┴────────────────────────────────────────────────────┴─────────────────────┴─────────┘ - -4 rows in set. Elapsed: 0.008 sec. -``` - -`FORMAT`句を追加して、ClickHouseの[多くのサポートされた出力形式](../interfaces/formats.md)の1つを指定します: -```sql -SELECT * -FROM helloworld.my_first_table -ORDER BY timestamp -FORMAT TabSeparated -``` - -上記のクエリでは、出力がタブ区切りで返されます: - -```response -Query id: 3604df1c-acfd-4117-9c56-f86c69721121 - -102 Insert a lot of rows per batch 2022-03-21 00:00:00 1.41421 -102 Sort your data based on your commonly-used queries 2022-03-22 00:00:00 2.718 -101 Hello, ClickHouse! 2022-03-22 14:04:09 -1 -101 Granules are the smallest chunks of data read 2022-03-22 14:04:14 3.14159 - -4 rows in set. Elapsed: 0.005 sec. -``` - -:::note -ClickHouseは70以上の入力および出力形式をサポートしているため、何千もの関数とすべてのデータ形式を使用して、ClickHouseを使って印象的で迅速なETLのようなデータ変換を行うことができます。実際、データを変換するためにClickHouseサーバーを稼働させる必要はなく、`clickhouse-local`ツールを使用できます。詳細については、[`clickhouse-local`のドキュメントページ](../operations/utilities/clickhouse-local.md)を参照してください。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md.hash deleted file mode 100644 index ca6bc76f9ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/guides/writing-queries.md.hash +++ /dev/null @@ -1 +0,0 @@ -c573a0046225fb0e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/home_links/deployment_links.json b/i18n/jp/docusaurus-plugin-content-docs/current/home_links/deployment_links.json deleted file mode 100644 index 1f0c1507cf0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/home_links/deployment_links.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "title": "Quick Start", - "description": "Get up and running on ClickHouse in minutes, explore some sample data, and build your solution", - "url": "/docs/quick-start/", - "background": "cloud" - }, - { - "title": "Tutorials and Sample Datasets", - "description": "From taxi rides to property prices, learn how to get data into ClickHouse and model it for query performance", - "url": "/docs/getting-started/example-datasets/", - "background": "cloud" - } -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/home_links/links_101.json b/i18n/jp/docusaurus-plugin-content-docs/current/home_links/links_101.json deleted file mode 100644 index 814797c9a34..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/home_links/links_101.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "title": "SQL reference", - "description": "Learn the statements, functions, and data types that are available", - "url": "/docs/sql-reference" - }, - { - "title": "Ingest data", - "description": "Explore the many ways to get data into ClickHouse", - "url": "/docs/integrations/data-ingestion/" - }, - { - "title": "Visualize data", - "description": "Now that your data is in ClickHouse, it's time to analyze it", - "url": "/docs/integrations/data-visualization/" - }, - { - "title": "Optimize data", - "description": "Ways to improve the performance of your ClickHouse service", - "url": "/docs/optimize/" - }, - { - "title": "Migrate data", - "description": "Importing your data from an external source into ClickHouse", - "url": "/docs/integrations/migration/" - } -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx deleted file mode 100644 index 528925868fe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -'sidebar_position': 30 -'sidebar_label': 'ClickHouseクライアント' -'title': 'ClickHouseクライアント' -'slug': '/integrations/sql-clients/cli' -'displayed_sidebar': 'integrations' -'description': 'CLIインターフェースを説明するページ' ---- - -import Content from '@site/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx.hash deleted file mode 100644 index c9d883b4301..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/cli.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -f1678b93e25bfcec diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/_category_.yml deleted file mode 100644 index be17e8ff107..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 200 -label: 'Data ingestion' -collapsible: true -collapsed: true -link: - type: generated-index - title: Data ingestion - slug: /integrations/data-ingestion diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md deleted file mode 100644 index 548fe4e2f6c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -sidebar_label: 'Integrating Apache Spark with ClickHouse' -sidebar_position: 1 -slug: '/integrations/apache-spark' -description: 'Apache SparkとClickHouseの統合' -keywords: -- 'clickhouse' -- 'Apache Spark' -- 'migrating' -- 'data' -title: 'Integrating Apache Spark with ClickHouse' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import TOCInline from '@theme/TOCInline'; - - -# Apache Spark と ClickHouse の統合 - -
- -[Apache Spark](https://spark.apache.org/) は、単一ノードのマシンまたはクラスターでデータエンジニアリング、データサイエンス、および機械学習を実行するためのマルチ言語エンジンです。 - -Apache Spark と ClickHouse を接続する主な方法は二つです。 - -1. [Spark Connector](./apache-spark/spark-native-connector) - Spark コネクタは `DataSourceV2` を実装しており、独自のカタログ管理があります。現在、これが ClickHouse と Spark を統合する推奨の方法です。 -2. [Spark JDBC](./apache-spark/spark-jdbc) - [JDBC データソース](https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html) を使用して Spark と ClickHouse を統合します。 - -
-
-両方のソリューションは成功裏にテストされており、Java、Scala、PySpark、Spark SQL を含むさまざまな API と完全に互換性があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md.hash deleted file mode 100644 index b32ae38e39b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -ad7e56deb9684ee1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md deleted file mode 100644 index 504556d73c0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md +++ /dev/null @@ -1,358 +0,0 @@ ---- -sidebar_label: 'Spark JDBC' -sidebar_position: 3 -slug: '/integrations/apache-spark/spark-jdbc' -description: 'ClickHouseとの統合に関するApache Sparkの概要' -keywords: -- 'clickhouse' -- 'Apache Spark' -- 'jdbc' -- 'migrating' -- 'data' -title: 'Spark JDBC' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import TOCInline from '@theme/TOCInline'; - - -# Spark JDBC -JDBCは、Sparkで最も一般的に使用されるデータソースの1つです。 -このセクションでは、Sparkと共に使用するための[ClickHouse公式JDBCコネクタ](/integrations/language-clients/java/jdbc)の詳細を提供します。 - - - -## データの読み取り {#read-data} - - - - -```java -public static void main(String[] args) { - // Sparkセッションの初期化 - SparkSession spark = SparkSession.builder().appName("example").master("local").getOrCreate(); - - String jdbcURL = "jdbc:ch://localhost:8123/default"; - String query = "select * from example_table where id > 2"; - - - //--------------------------------------------------------------------------------------------------- - // jdbcメソッドを使用してClickHouseからテーブルをロード - //--------------------------------------------------------------------------------------------------- - Properties jdbcProperties = new Properties(); - jdbcProperties.put("user", "default"); - jdbcProperties.put("password", "123456"); - - Dataset df1 = spark.read().jdbc(jdbcURL, String.format("(%s)", query), jdbcProperties); - - df1.show(); - - //--------------------------------------------------------------------------------------------------- - // loadメソッドを使用してClickHouseからテーブルをロード - //--------------------------------------------------------------------------------------------------- - Dataset df2 = spark.read() - .format("jdbc") - .option("url", jdbcURL) - .option("user", "default") - .option("password", "123456") - .option("query", query) - .load(); - - - df2.show(); - - - // Sparkセッションを停止 - spark.stop(); - } -``` - - - - -```java -object ReadData extends App { - // Sparkセッションの初期化 - val spark: SparkSession = SparkSession.builder.appName("example").master("local").getOrCreate - - val jdbcURL = "jdbc:ch://localhost:8123/default" - val query: String = "select * from example_table where id > 2" - - - //--------------------------------------------------------------------------------------------------- - // jdbcメソッドを使用してClickHouseからテーブルをロード - //--------------------------------------------------------------------------------------------------- - val connectionProperties = new Properties() - connectionProperties.put("user", "default") - connectionProperties.put("password", "123456") - - val df1: Dataset[Row] = spark.read. - jdbc(jdbcURL, s"($query)", connectionProperties) - - df1.show() - //--------------------------------------------------------------------------------------------------- - // loadメソッドを使用してClickHouseからテーブルをロード - //--------------------------------------------------------------------------------------------------- - val df2: Dataset[Row] = spark.read - .format("jdbc") - .option("url", jdbcURL) - .option("user", "default") - .option("password", "123456") - .option("query", query) - .load() - - df2.show() - - - - // Sparkセッションを停止 - spark.stop() - -} -``` - - - - -```python -from pyspark.sql import SparkSession - -jar_files = [ - "jars/clickhouse-jdbc-X.X.X-SNAPSHOT-all.jar" -] - - -# JARファイルを使用してSparkセッションを初期化 -spark = SparkSession.builder \ - .appName("example") \ - .master("local") \ - .config("spark.jars", ",".join(jar_files)) \ - .getOrCreate() - -url = "jdbc:ch://localhost:8123/default" -user = "your_user" -password = "your_password" -query = "select * from example_table where id > 2" -driver = "com.clickhouse.jdbc.ClickHouseDriver" - -df = (spark.read - .format('jdbc') - .option('driver', driver) - .option('url', url) - .option('user', user) - .option('password', password).option( - 'query', query).load()) - -df.show() - -``` - - - - -```sql - CREATE TEMPORARY VIEW jdbcTable - USING org.apache.spark.sql.jdbc - OPTIONS ( - url "jdbc:ch://localhost:8123/default", - dbtable "schema.tablename", - user "username", - password "password", - driver "com.clickhouse.jdbc.ClickHouseDriver" - ); - - SELECT * FROM jdbcTable; -``` - - - - -## データの書き込み {#write-data} - - - - -```java - public static void main(String[] args) { - // Sparkセッションの初期化 - SparkSession spark = SparkSession.builder().appName("example").master("local").getOrCreate(); - - // JDBC接続の詳細 - String jdbcUrl = "jdbc:ch://localhost:8123/default"; - Properties jdbcProperties = new Properties(); - jdbcProperties.put("user", "default"); - jdbcProperties.put("password", "123456"); - - // サンプルDataFrameの作成 - StructType schema = new StructType(new StructField[]{ - DataTypes.createStructField("id", DataTypes.IntegerType, false), - DataTypes.createStructField("name", DataTypes.StringType, false) - }); - - List rows = new ArrayList(); - rows.add(RowFactory.create(1, "John")); - rows.add(RowFactory.create(2, "Doe")); - - - Dataset df = spark.createDataFrame(rows, schema); - - //--------------------------------------------------------------------------------------------------- - // jdbcメソッドを使用してdfをClickHouseに書き込む - //--------------------------------------------------------------------------------------------------- - - df.write() - .mode(SaveMode.Append) - .jdbc(jdbcUrl, "example_table", jdbcProperties); - - //--------------------------------------------------------------------------------------------------- - // saveメソッドを使用してdfをClickHouseに書き込む - //--------------------------------------------------------------------------------------------------- - - df.write() - .format("jdbc") - .mode("append") - .option("url", jdbcUrl) - .option("dbtable", "example_table") - .option("user", "default") - .option("password", "123456") - .save(); - - - // Sparkセッションを停止 - spark.stop(); - } -``` - - - - -```java -object WriteData extends App { - - val spark: SparkSession = SparkSession.builder.appName("example").master("local").getOrCreate - - // JDBC接続の詳細 - val jdbcUrl: String = "jdbc:ch://localhost:8123/default" - val jdbcProperties: Properties = new Properties - jdbcProperties.put("user", "default") - jdbcProperties.put("password", "123456") - - // サンプルDataFrameの作成 - - - val rows = Seq(Row(1, "John"), Row(2, "Doe")) - - val schema = List( - StructField("id", DataTypes.IntegerType, nullable = false), - StructField("name", StringType, nullable = true) - ) - - val df: DataFrame = spark.createDataFrame( - spark.sparkContext.parallelize(rows), - StructType(schema) - ) - - //---------------------------------------------------------------------------------------------------//--------------------------------------------------------------------------------------------------- - // jdbcメソッドを使用してdfをClickHouseに書き込む - //---------------------------------------------------------------------------------------------------//--------------------------------------------------------------------------------------------------- - - df.write - .mode(SaveMode.Append) - .jdbc(jdbcUrl, "example_table", jdbcProperties) - - //---------------------------------------------------------------------------------------------------//--------------------------------------------------------------------------------------------------- - // saveメソッドを使用してdfをClickHouseに書き込む - //---------------------------------------------------------------------------------------------------//--------------------------------------------------------------------------------------------------- - - df.write - .format("jdbc") - .mode("append") - .option("url", jdbcUrl) - .option("dbtable", "example_table") - .option("user", "default") - .option("password", "123456") - .save() - - - // Sparkセッションを停止// Sparkセッションを停止 - spark.stop() - -} -``` - - - - -```python -from pyspark.sql import SparkSession -from pyspark.sql import Row - -jar_files = [ - "jars/clickhouse-jdbc-X.X.X-SNAPSHOT-all.jar" -] - - -# JARファイルを使用してSparkセッションを初期化 -spark = SparkSession.builder \ - .appName("example") \ - .master("local") \ - .config("spark.jars", ",".join(jar_files)) \ - .getOrCreate() - - -# DataFrameの作成 -data = [Row(id=11, name="John"), Row(id=12, name="Doe")] -df = spark.createDataFrame(data) - -url = "jdbc:ch://localhost:8123/default" -user = "your_user" -password = "your_password" -driver = "com.clickhouse.jdbc.ClickHouseDriver" - - -# DataFrameをClickHouseに書き込む -df.write \ - .format("jdbc") \ - .option("driver", driver) \ - .option("url", url) \ - .option("user", user) \ - .option("password", password) \ - .option("dbtable", "example_table") \ - .mode("append") \ - .save() - - -``` - - - - -```sql - CREATE TEMPORARY VIEW jdbcTable - USING org.apache.spark.sql.jdbc - OPTIONS ( - url "jdbc:ch://localhost:8123/default", - dbtable "schema.tablename", - user "username", - password "password", - driver "com.clickhouse.jdbc.ClickHouseDriver" - ); - -- resultTableはdf.createTempViewまたはSpark SQLで作成できます - INSERT INTO TABLE jdbcTable - SELECT * FROM resultTable; - -``` - - - - - -## 並列性 {#parallelism} - -Spark JDBCを使用する場合、Sparkは単一のパーティションを使用してデータを読み取ります。より高い同時実行性を達成するためには、`partitionColumn`、`lowerBound`、`upperBound`、および`numPartitions`を指定する必要があり、これは複数のワーカーから並列して読み取る際のテーブルのパーティショニング方法を説明します。 -詳細については、Apache Sparkの公式ドキュメントにある[ JDBCの構成](https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option)をご覧ください。 - -## JDBCの制限 {#jdbc-limitations} - -* 現在のところ、JDBCを使用して既存のテーブルにのみデータを挿入することができます(DF挿入時にテーブルを自動作成する方法はなく、Sparkが他のコネクタで行うように)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md.hash deleted file mode 100644 index 1144b7581ea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-jdbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -f7be0b9af2d201ad diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md deleted file mode 100644 index 717db5be604..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md +++ /dev/null @@ -1,563 +0,0 @@ ---- -sidebar_label: 'Sparkネイティブコネクタ' -sidebar_position: 2 -slug: '/integrations/apache-spark/spark-native-connector' -description: 'ClickHouseとのApache Sparkへの導入' -keywords: -- 'clickhouse' -- 'Apache Spark' -- 'migrating' -- 'data' -title: 'Spark Connector' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import TOCInline from '@theme/TOCInline'; - - -# Spark Connector - -このコネクタは、クエリのパフォーマンスとデータ処理を改善するために、高度なパーティショニングや述語プッシュダウンなど、ClickHouse固有の最適化を活用します。このコネクタは、[ClickHouseの公式JDBCコネクタ](https://github.com/ClickHouse/clickhouse-java)に基づいており、自身のカタログを管理します。 - -Spark 3.0以前は、Sparkにはビルトインのカタログ概念が欠けていたため、ユーザーは通常、Hive MetastoreやAWS Glueなどの外部カタログシステムに依存していました。これらの外部ソリューションでは、ユーザーはSparkでアクセスする前にデータソーステーブルを手動で登録する必要がありました。しかし、Spark 3.0でカタログ概念が導入されたことで、Sparkはカタログプラグインを登録することによって自動的にテーブルを検出できるようになりました。 - -Sparkのデフォルトカタログは`spark_catalog`であり、テーブルは`{catalog name}.{database}.{table}`で識別されます。新しいカタログ機能により、単一のSparkアプリケーション内で複数のカタログを追加して作業することが可能になりました。 - - -## 要件 {#requirements} - -- Java 8または17 -- Scala 2.12または2.13 -- Apache Spark 3.3または3.4または3.5 -## 互換性マトリックス {#compatibility-matrix} - -| バージョン | 互換性のあるSparkバージョン | ClickHouse JDBCバージョン | -|------------|----------------------------------|---------------------------| -| main | Spark 3.3, 3.4, 3.5 | 0.6.3 | -| 0.8.1 | Spark 3.3, 3.4, 3.5 | 0.6.3 | -| 0.8.0 | Spark 3.3, 3.4, 3.5 | 0.6.3 | -| 0.7.3 | Spark 3.3, 3.4 | 0.4.6 | -| 0.6.0 | Spark 3.3 | 0.3.2-patch11 | -| 0.5.0 | Spark 3.2, 3.3 | 0.3.2-patch11 | -| 0.4.0 | Spark 3.2, 3.3 | 依存しない | -| 0.3.0 | Spark 3.2, 3.3 | 依存しない | -| 0.2.1 | Spark 3.2 | 依存しない | -| 0.1.2 | Spark 3.2 | 依存しない | -## インストールとセットアップ {#installation--setup} - -ClickHouseをSparkと統合するためには、さまざまなプロジェクトセットアップに適した複数のインストールオプションがあります。ClickHouse Sparkコネクタをプロジェクトのビルドファイル(Mavenの場合は`pom.xml`、SBTの場合は`build.sbt`など)に依存関係として直接追加できます。あるいは、必要なJARファイルを`$SPARK_HOME/jars/`フォルダーに置くか、`spark-submit`コマンドの`--jars`フラグを使用して直接渡すこともできます。どちらのアプローチも、ClickHouseコネクタがSpark環境で利用可能になることを保証します。 -### 依存関係としてインポート {#import-as-a-dependency} - - - - -```maven - - com.clickhouse.spark - clickhouse-spark-runtime-{{ spark_binary_version }}_{{ scala_binary_version }} - {{ stable_version }} - - - com.clickhouse - clickhouse-jdbc - all - {{ clickhouse_jdbc_version }} - - - * - * - - - -``` - -SNAPSHOTバージョンを使用する場合は、以下のリポジトリを追加します。 - -```maven - - - sonatype-oss-snapshots - Sonatype OSS Snapshots Repository - https://s01.oss.sonatype.org/content/repositories/snapshots - - -``` - - - - -```gradle -dependencies { - implementation("com.clickhouse.spark:clickhouse-spark-runtime-{{ spark_binary_version }}_{{ scala_binary_version }}:{{ stable_version }}") - implementation("com.clickhouse:clickhouse-jdbc:{{ clickhouse_jdbc_version }}:all") { transitive = false } -} -``` - -SNAPSHOTバージョンを使用する場合は、以下のリポジトリを追加します: - -```gradle -repositries { - maven { url = "https://s01.oss.sonatype.org/content/repositories/snapshots" } -} -``` - - - - -```sbt -libraryDependencies += "com.clickhouse" % "clickhouse-jdbc" % {{ clickhouse_jdbc_version }} classifier "all" -libraryDependencies += "com.clickhouse.spark" %% clickhouse-spark-runtime-{{ spark_binary_version }}_{{ scala_binary_version }} % {{ stable_version }} -``` - - - - -Sparkのシェルオプション(Spark SQL CLI、Spark Shell CLI、Spark Submitコマンド)を使用する場合、必要なJARを渡すことで依存関係を登録できます: - -```text -$SPARK_HOME/bin/spark-sql \ - --jars /path/clickhouse-spark-runtime-{{ spark_binary_version }}_{{ scala_binary_version }}:{{ stable_version }}.jar,/path/clickhouse-jdbc-{{ clickhouse_jdbc_version }}-all.jar -``` - -JARファイルをSparkクライアントノードにコピーするのを避ける場合は、次のように使用できます: - -```text - --repositories https://{maven-central-mirror or private-nexus-repo} \ - --packages com.clickhouse.spark:clickhouse-spark-runtime-{{ spark_binary_version }}_{{ scala_binary_version }}:{{ stable_version }},com.clickhouse:clickhouse-jdbc:{{ clickhouse_jdbc_version }}:all -``` - -注:SQLのみのユースケースの場合、[Apache Kyuubi](https://github.com/apache/kyuubi)が本番環境に推奨されます。 - - - -### ライブラリのダウンロード {#download-the-library} - -バイナリJARの名前パターンは以下の通りです: - -```bash -clickhouse-spark-runtime-${spark_binary_version}_${scala_binary_version}-${version}.jar -``` - -利用可能なすべてのリリースJARファイルは、[Maven Central Repository](https://repo1.maven.org/maven2/com/clickhouse/spark/)で見つけることができ、すべてのデイリービルドSNAPSHOT JARファイルは、[Sonatype OSS Snapshots Repository](https://s01.oss.sonatype.org/content/repositories/snapshots/com/clickhouse/)で見つけることができます。 - -:::important -"all"クラシファイアを持つ[clickhouse-jdbc JAR](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc)を含めることが必須です。コネクタは[clickhouse-http](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client)および[clickhouse-client](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-client)に依存しており、これらは全てclickhouse-jdbc:allにバンドルされています。フルJDBCパッケージを使用したくない場合は、[clickhouse-client JAR](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-client)と[clickhouse-http](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client)を個別に追加することもできます。 - -いずれにしても、パッケージバージョンが、[互換性マトリックス](#compatibility-matrix)に従って互換性があることを確認してください。 -::: -## カタログの登録(必須) {#register-the-catalog-required} - -ClickHouseのテーブルにアクセスするためには、以下の設定を使用して新しいSparkカタログを構成する必要があります。 - -| プロパティ | 値 | デフォルト値 | 必須 | -|--------------------------------------------------|--------------------------------------------|-------------------|--------| -| `spark.sql.catalog.` | `com.clickhouse.spark.ClickHouseCatalog` | N/A | はい | -| `spark.sql.catalog..host` | `` | `localhost` | いいえ | -| `spark.sql.catalog..protocol` | `http` | `http` | いいえ | -| `spark.sql.catalog..http_port` | `` | `8123` | いいえ | -| `spark.sql.catalog..user` | `` | `default` | いいえ | -| `spark.sql.catalog..password` | `` | (空文字列) | いいえ | -| `spark.sql.catalog..database` | `` | `default` | いいえ | -| `spark..write.format` | `json` | `arrow` | いいえ | - -これらの設定は次のいずれかによって設定できます: - -* `spark-defaults.conf`を編集または作成する。 -* `spark-submit`コマンド(または`spark-shell`や`spark-sql` CLIコマンド)に設定を渡す。 -* コンテキストを初期化する際に設定を追加する。 - -:::important -ClickHouseクラスタで作業する場合、各インスタンスに対して一意のカタログ名を設定する必要があります。例えば: - -```text -spark.sql.catalog.clickhouse1 com.clickhouse.spark.ClickHouseCatalog -spark.sql.catalog.clickhouse1.host 10.0.0.1 -spark.sql.catalog.clickhouse1.protocol https -spark.sql.catalog.clickhouse1.http_port 8443 -spark.sql.catalog.clickhouse1.user default -spark.sql.catalog.clickhouse1.password -spark.sql.catalog.clickhouse1.database default -spark.sql.catalog.clickhouse1.option.ssl true - -spark.sql.catalog.clickhouse2 com.clickhouse.spark.ClickHouseCatalog -spark.sql.catalog.clickhouse2.host 10.0.0.2 -spark.sql.catalog.clickhouse2.protocol https -spark.sql.catalog.clickhouse2.http_port 8443 -spark.sql.catalog.clickhouse2.user default -spark.sql.catalog.clickhouse2.password -spark.sql.catalog.clickhouse2.database default -spark.sql.catalog.clickhouse2.option.ssl true -``` - -そのようにすることで、Spark SQLからclickhouse1テーブル`.`にアクセスするために`clickhouse1..`を使用でき、clickhouse2テーブル`.`にアクセスするために`clickhouse2..`を使用できるようになります。 - -::: -## ClickHouse Cloud設定 {#clickhouse-cloud-settings} - -[ClickHouse Cloud](https://clickhouse.com)に接続する際は、SSLを有効にし、適切なSSLモードを設定してください。例えば: - -```text -spark.sql.catalog.clickhouse.option.ssl true -spark.sql.catalog.clickhouse.option.ssl_mode NONE -``` -## データの読み込み {#read-data} - - - - -```java -public static void main(String[] args) { - // Sparkセッションを作成 - SparkSession spark = SparkSession.builder() - .appName("example") - .master("local[*]") - .config("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") - .config("spark.sql.catalog.clickhouse.host", "127.0.0.1") - .config("spark.sql.catalog.clickhouse.protocol", "http") - .config("spark.sql.catalog.clickhouse.http_port", "8123") - .config("spark.sql.catalog.clickhouse.user", "default") - .config("spark.sql.catalog.clickhouse.password", "123456") - .config("spark.sql.catalog.clickhouse.database", "default") - .config("spark.clickhouse.write.format", "json") - .getOrCreate(); - - Dataset df = spark.sql("select * from clickhouse.default.example_table"); - - df.show(); - - spark.stop(); - } -``` - - - - -```java -object NativeSparkRead extends App { - val spark = SparkSession.builder - .appName("example") - .master("local[*]") - .config("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") - .config("spark.sql.catalog.clickhouse.host", "127.0.0.1") - .config("spark.sql.catalog.clickhouse.protocol", "http") - .config("spark.sql.catalog.clickhouse.http_port", "8123") - .config("spark.sql.catalog.clickhouse.user", "default") - .config("spark.sql.catalog.clickhouse.password", "123456") - .config("spark.sql.catalog.clickhouse.database", "default") - .config("spark.clickhouse.write.format", "json") - .getOrCreate - - val df = spark.sql("select * from clickhouse.default.example_table") - - df.show() - - spark.stop() -} -``` - - - - -```python -from pyspark.sql import SparkSession - -packages = [ - "com.clickhouse.spark:clickhouse-spark-runtime-3.4_2.12:0.8.0", - "com.clickhouse:clickhouse-client:0.7.0", - "com.clickhouse:clickhouse-http-client:0.7.0", - "org.apache.httpcomponents.client5:httpclient5:5.2.1" - -] - -spark = (SparkSession.builder - .config("spark.jars.packages", ",".join(packages)) - .getOrCreate()) - -spark.conf.set("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") -spark.conf.set("spark.sql.catalog.clickhouse.host", "127.0.0.1") -spark.conf.set("spark.sql.catalog.clickhouse.protocol", "http") -spark.conf.set("spark.sql.catalog.clickhouse.http_port", "8123") -spark.conf.set("spark.sql.catalog.clickhouse.user", "default") -spark.conf.set("spark.sql.catalog.clickhouse.password", "123456") -spark.conf.set("spark.sql.catalog.clickhouse.database", "default") -spark.conf.set("spark.clickhouse.write.format", "json") - -df = spark.sql("select * from clickhouse.default.example_table") -df.show() - -``` - - - - -```sql - CREATE TEMPORARY VIEW jdbcTable - USING org.apache.spark.sql.jdbc - OPTIONS ( - url "jdbc:ch://localhost:8123/default", - dbtable "schema.tablename", - user "username", - password "password", - driver "com.clickhouse.jdbc.ClickHouseDriver" - ); - - SELECT * FROM jdbcTable; -``` - - - -## データの書き込み {#write-data} - - - - -```java - public static void main(String[] args) throws AnalysisException { - - // Sparkセッションを作成 - SparkSession spark = SparkSession.builder() - .appName("example") - .master("local[*]") - .config("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") - .config("spark.sql.catalog.clickhouse.host", "127.0.0.1") - .config("spark.sql.catalog.clickhouse.protocol", "http") - .config("spark.sql.catalog.clickhouse.http_port", "8123") - .config("spark.sql.catalog.clickhouse.user", "default") - .config("spark.sql.catalog.clickhouse.password", "123456") - .config("spark.sql.catalog.clickhouse.database", "default") - .config("spark.clickhouse.write.format", "json") - .getOrCreate(); - - // DataFrameのスキーマを定義 - StructType schema = new StructType(new StructField[]{ - DataTypes.createStructField("id", DataTypes.IntegerType, false), - DataTypes.createStructField("name", DataTypes.StringType, false), - }); - - - List data = Arrays.asList( - RowFactory.create(1, "Alice"), - RowFactory.create(2, "Bob") - ); - - // DataFrameを作成 - Dataset df = spark.createDataFrame(data, schema); - - df.writeTo("clickhouse.default.example_table").append(); - - spark.stop(); - } -``` - - - - -```java -object NativeSparkWrite extends App { - // Sparkセッションを作成 - val spark: SparkSession = SparkSession.builder - .appName("example") - .master("local[*]") - .config("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") - .config("spark.sql.catalog.clickhouse.host", "127.0.0.1") - .config("spark.sql.catalog.clickhouse.protocol", "http") - .config("spark.sql.catalog.clickhouse.http_port", "8123") - .config("spark.sql.catalog.clickhouse.user", "default") - .config("spark.sql.catalog.clickhouse.password", "123456") - .config("spark.sql.catalog.clickhouse.database", "default") - .config("spark.clickhouse.write.format", "json") - .getOrCreate - - // DataFrameのスキーマを定義 - val rows = Seq(Row(1, "John"), Row(2, "Doe")) - - val schema = List( - StructField("id", DataTypes.IntegerType, nullable = false), - StructField("name", StringType, nullable = true) - ) - // dfを作成 - val df: DataFrame = spark.createDataFrame( - spark.sparkContext.parallelize(rows), - StructType(schema) - ) - - df.writeTo("clickhouse.default.example_table").append() - - spark.stop() -} -``` - - - - -```python -from pyspark.sql import SparkSession -from pyspark.sql import Row - - -# 互換性マトリックスに準拠する他のパッケージの組み合わせを自由に使用できます。 -packages = [ - "com.clickhouse.spark:clickhouse-spark-runtime-3.4_2.12:0.8.0", - "com.clickhouse:clickhouse-client:0.7.0", - "com.clickhouse:clickhouse-http-client:0.7.0", - "org.apache.httpcomponents.client5:httpclient5:5.2.1" - -] - -spark = (SparkSession.builder - .config("spark.jars.packages", ",".join(packages)) - .getOrCreate()) - -spark.conf.set("spark.sql.catalog.clickhouse", "com.clickhouse.spark.ClickHouseCatalog") -spark.conf.set("spark.sql.catalog.clickhouse.host", "127.0.0.1") -spark.conf.set("spark.sql.catalog.clickhouse.protocol", "http") -spark.conf.set("spark.sql.catalog.clickhouse.http_port", "8123") -spark.conf.set("spark.sql.catalog.clickhouse.user", "default") -spark.conf.set("spark.sql.catalog.clickhouse.password", "123456") -spark.conf.set("spark.sql.catalog.clickhouse.database", "default") -spark.conf.set("spark.clickhouse.write.format", "json") - - -# DataFrameを作成 -data = [Row(id=11, name="John"), Row(id=12, name="Doe")] -df = spark.createDataFrame(data) - - -# DataFrameをClickHouseに書き込む -df.writeTo("clickhouse.default.example_table").append() - -``` - - - - -```sql - -- resultTalbeは、clickhouse.default.example_tableに挿入したいSparkの中間dfです - INSERT INTO TABLE clickhouse.default.example_table - SELECT * FROM resultTable; - -``` - - - -## DDL操作 {#ddl-operations} - -ClickHouseインスタンス上でDDL操作を実行することができ、すべての変更がClickHouseに即座に永続化されます。Spark SQLを使用すると、ClickHouseと同じようにクエリを書くことができるため、CREATE TABLEやTRUNCATEなどのコマンドを修正なしで直接実行できます。例えば: - -```sql - -use clickhouse; - -CREATE TABLE test_db.tbl_sql ( - create_time TIMESTAMP NOT NULL, - m INT NOT NULL COMMENT 'part key', - id BIGINT NOT NULL COMMENT 'sort key', - value STRING -) USING ClickHouse -PARTITIONED BY (m) -TBLPROPERTIES ( - engine = 'MergeTree()', - order_by = 'id', - settings.index_granularity = 8192 -); -``` - -上記の例は、Spark SQLクエリを示しており、Java、Scala、PySpark、またはシェルのいずれかのAPIを使用してアプリケーション内で実行できます。 -## Configurations {#configurations} - -以下はコネクタで調整可能な設定です。 - -
- -| キー | デフォルト | 説明 | 以来 | -|-----------------------------------------------------|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------| -| spark.clickhouse.ignoreUnsupportedTransform | false | ClickHouseは、シャーディングキーやパーティション値として複雑な式を使用することをサポートしています。例えば、`cityHash64(col_1, col_2)`のように、現在Sparkではサポートされていません。`true`の場合、サポートされていない式を無視します。そうでない場合、例外で早期に失敗します。注意:`spark.clickhouse.write.distributed.convertLocal`が有効な場合、サポートされていないシャーディングキーを無視するとデータが破損する可能性があります。 | 0.4.0 | -| spark.clickhouse.read.compression.codec | lz4 | 読み取り用のデータを展開するために使用されるコーデック。サポートされているコーデック:none、lz4。 | 0.5.0 | -| spark.clickhouse.read.distributed.convertLocal | true | 分散テーブルを読み取るとき、テーブル自身の代わりにローカルテーブルを読み取ります。`true`の場合、`spark.clickhouse.read.distributed.useClusterNodes`を無視します。 | 0.1.0 | -| spark.clickhouse.read.fixedStringAs | binary | ClickHouseのFixedString型を指定されたSparkデータ型として読み取ります。サポートされている型:binary、string | 0.8.0 | -| spark.clickhouse.read.format | json | 読み取り用のシリアライズ形式。サポートされている形式:json、binary | 0.6.0 | -| spark.clickhouse.read.runtimeFilter.enabled | false | 読み取り用のランタイムフィルターを有効にします。 | 0.8.0 | -| spark.clickhouse.read.splitByPartitionId | true | `true`の場合、仮想カラム`_partition_id`によって入力パーティションフィルターを構築します。パーティション値によるSQL述語の組み立てには既知の問題があります。この機能にはClickHouse Server v21.6+が必要です。 | 0.4.0 | -| spark.clickhouse.useNullableQuerySchema | false | `true`の場合、テーブルを作成する際に`CREATE/REPLACE TABLE ... AS SELECT ...`を実行する際に、クエリスキーマのすべてのフィールドをNullableとしてマークします。この設定にはSPARK-43390(Spark 3.5に利用可能)が必要で、これがないと常に`true`として動作します。 | 0.8.0 | -| spark.clickhouse.write.batchSize | 10000 | ClickHouseに書き込む際のバッチごとのレコード数。 | 0.1.0 | -| spark.clickhouse.write.compression.codec | lz4 | 書き込み用のデータを圧縮するために使用されるコーデック。サポートされているコーデック:none、lz4。 | 0.3.0 | -| spark.clickhouse.write.distributed.convertLocal | false | 分散テーブルを書き込むとき、テーブル自身の代わりにローカルテーブルに書き込みます。`true`の場合、`spark.clickhouse.write.distributed.useClusterNodes`を無視します。 | 0.1.0 | -| spark.clickhouse.write.distributed.useClusterNodes | true | 分散テーブルを書き込む際、クラスタのすべてのノードに書き込みます。 | 0.1.0 | -| spark.clickhouse.write.format | arrow | 書き込み用のシリアライズ形式。サポートされている形式:json、arrow | 0.4.0 | -| spark.clickhouse.write.localSortByKey | true | `true`の場合、書き込む前にソートキーでローカルソートを行います。 | 0.3.0 | -| spark.clickhouse.write.localSortByPartition | spark.clickhouse.write.repartitionByPartitionの値 | `true`の場合、書き込む前にパーティションによるローカルソートを行います。設定されていない場合、`spark.clickhouse.write.repartitionByPartition`と同じになります。 | 0.3.0 | -| spark.clickhouse.write.maxRetry | 3 | 再試行可能なコードで失敗した単一バッチ書き込みに対して再試行する最大回数。 | 0.1.0 | -| spark.clickhouse.write.repartitionByPartition | true | ClickHouseテーブルの分布を満たすために書き込む前に、ClickHouseのパーティションキーによってデータを再パーティションします。 | 0.3.0 | -| spark.clickhouse.write.repartitionNum | 0 | ClickHouseテーブルの分布を満たすために、書き込む前にデータを再パーティションする必要があり、この設定で再パーティションの数を指定します。値が1未満の場合、要件がないことを示します。 | 0.1.0 | -| spark.clickhouse.write.repartitionStrictly | false | `true`の場合、Sparkは、データソーステーブルにレコードを渡す前に、必要な分布を満たすために受信レコードをパーティションに厳密に分散させます。そうでない場合、Sparkはクエリを高速化するために特定の最適化を適用し、分布要件を壊す可能性があります。この設定にはSPARK-37523(Spark 3.4に利用可能)が必要で、これがないと常に`true`として動作します。 | 0.3.0 | -| spark.clickhouse.write.retryInterval | 10s | 書き込み再試行の間隔(秒)。 | 0.1.0 | -| spark.clickhouse.write.retryableErrorCodes | 241 | 書き込みが失敗したときにClickHouseサーバーから返される再試行可能なエラーコード。 | 0.1.0 | -## Supported Data Types {#supported-data-types} - -このセクションでは、SparkとClickHouse間のデータ型のマッピングを示します。以下の表は、ClickHouseからSparkへ読み取る際、およびSparkからClickHouseにデータを挿入する際のデータ型変換のためのクイックリファレンスを提供します。 -### ClickHouseからSparkへデータを読み取る {#reading-data-from-clickhouse-into-spark} - -| ClickHouseデータ型 | Sparkデータ型 | サポート | プリミティブ | ノート | -|------------------------------------------------------------|------------------------|--------|--------|-----------------------------------------------------| -| `Nothing` | `NullType` | ✅ | はい | | -| `Bool` | `BooleanType` | ✅ | はい | | -| `UInt8`, `Int16` | `ShortType` | ✅ | はい | | -| `Int8` | `ByteType` | ✅ | はい | | -| `UInt16`,`Int32` | `IntegerType` | ✅ | はい | | -| `UInt32`,`Int64`, `UInt64` | `LongType` | ✅ | はい | | -| `Int128`,`UInt128`, `Int256`, `UInt256` | `DecimalType(38, 0)` | ✅ | はい | | -| `Float32` | `FloatType` | ✅ | はい | | -| `Float64` | `DoubleType` | ✅ | はい | | -| `String`, `JSON`, `UUID`, `Enum8`, `Enum16`, `IPv4`, `IPv6` | `StringType` | ✅ | はい | | -| `FixedString` | `BinaryType`, `StringType` | ✅ | はい | 設定`READ_FIXED_STRING_AS`によって制御されます | -| `Decimal` | `DecimalType` | ✅ | はい | 精度とスケールは`Decimal128`までサポート | -| `Decimal32` | `DecimalType(9, scale)` | ✅ | はい | | -| `Decimal64` | `DecimalType(18, scale)`| ✅ | はい | | -| `Decimal128` | `DecimalType(38, scale)`| ✅ | はい | | -| `Date`, `Date32` | `DateType` | ✅ | はい | | -| `DateTime`, `DateTime32`, `DateTime64` | `TimestampType` | ✅ | はい | | -| `Array` | `ArrayType` | ✅ | いいえ | 配列要素型も変換されます | -| `Map` | `MapType` | ✅ | いいえ | キーは`StringType`に制限されています | -| `IntervalYear` | `YearMonthIntervalType(Year)` | ✅ | はい | | -| `IntervalMonth` | `YearMonthIntervalType(Month)` | ✅ | はい | | -| `IntervalDay`, `IntervalHour`, `IntervalMinute`, `IntervalSecond` | `DayTimeIntervalType` | ✅ | いいえ | 特定の間隔タイプが使用されます | -| `Object` | | ❌ | | | -| `Nested` | | ❌ | | | -| `Tuple` | | ❌ | | | -| `Point` | | ❌ | | | -| `Polygon` | | ❌ | | | -| `MultiPolygon` | | ❌ | | | -| `Ring` | | ❌ | | | -| `IntervalQuarter` | | ❌ | | | -| `IntervalWeek` | | ❌ | | | -| `Decimal256` | | ❌ | | | -| `AggregateFunction` | | ❌ | | | -| `SimpleAggregateFunction` | | ❌ | | | -### SparkからClickHouseへデータを挿入する {#inserting-data-from-spark-into-clickhouse} - -| Sparkデータ型 | ClickHouseデータ型 | サポート | プリミティブ | ノート | -|-------------------------------------|----------------------|-----------|--------------|---------------------------------------| -| `BooleanType` | `UInt8` | ✅ | はい | | -| `ByteType` | `Int8` | ✅ | はい | | -| `ShortType` | `Int16` | ✅ | はい | | -| `IntegerType` | `Int32` | ✅ | はい | | -| `LongType` | `Int64` | ✅ | はい | | -| `FloatType` | `Float32` | ✅ | はい | | -| `DoubleType` | `Float64` | ✅ | はい | | -| `StringType` | `String` | ✅ | はい | | -| `VarcharType` | `String` | ✅ | はい | | -| `CharType` | `String` | ✅ | はい | | -| `DecimalType` | `Decimal(p, s)` | ✅ | はい | 精度とスケールは`Decimal128`までサポート | -| `DateType` | `Date` | ✅ | はい | | -| `TimestampType` | `DateTime` | ✅ | はい | | -| `ArrayType` (リスト、タプル、または配列) | `Array` | ✅ | いいえ | 配列要素型も変換されます | -| `MapType` | `Map` | ✅ | いいえ | キーは`StringType`に制限されています | -| `Object` | | ❌ | | | -| `Nested` | | ❌ | | | -## Contributing and Support {#contributing-and-support} - -プロジェクトへの貢献や問題の報告をご希望の場合は、皆様のご意見をお待ちしております! -[GitHubリポジトリ](https://github.com/ClickHouse/spark-clickhouse-connector)を訪れて、問題を開いたり、改善を提案したり、プルリクエストを提出したりしてください。 -貢献をお待ちしております!始める前にリポジトリの貢献ガイドラインを確認してください。 -ClickHouse Sparkコネクタの改善にご協力いただきありがとうございます! diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md.hash deleted file mode 100644 index b571bdc939d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/apache-spark/spark-native-connector.md.hash +++ /dev/null @@ -1 +0,0 @@ -d4895b120ec23145 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md deleted file mode 100644 index ff8415052d1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -sidebar_label: 'Amazon Glue' -sidebar_position: 1 -slug: '/integrations/glue' -description: 'ClickHouse と Amazon Glue を 統合する' -keywords: -- 'clickhouse' -- 'amazon' -- 'aws' -- 'glue' -- 'migrating' -- 'data' -title: 'Amazon Glue と ClickHouse の 統合' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# Amazon Glue と ClickHouse の統合 - -[Amazon Glue](https://aws.amazon.com/glue/) は、Amazon Web Services (AWS) が提供する完全に管理されたサーバーレスのデータ統合サービスです。これにより、分析、機械学習、およびアプリケーション開発のためのデータの発見、準備、および変換プロセスが簡素化されます。 - -現時点では Glue ClickHouse コネクタは利用できませんが、公式の JDBC コネクタを活用して ClickHouse に接続し、統合することができます。 - - - - -```java -import com.amazonaws.services.glue.util.Job -import com.amazonaws.services.glue.util.GlueArgParser -import com.amazonaws.services.glue.GlueContext -import org.apache.spark.SparkContext -import org.apache.spark.sql.SparkSession -import org.apache.spark.sql.DataFrame -import scala.collection.JavaConverters._ -import com.amazonaws.services.glue.log.GlueLogger - - -// Glue ジョブの初期化 -object GlueJob { - def main(sysArgs: Array[String]) { - val sc: SparkContext = new SparkContext() - val glueContext: GlueContext = new GlueContext(sc) - val spark: SparkSession = glueContext.getSparkSession - val logger = new GlueLogger - import spark.implicits._ - // @params: [JOB_NAME] - val args = GlueArgParser.getResolvedOptions(sysArgs, Seq("JOB_NAME").toArray) - Job.init(args("JOB_NAME"), glueContext, args.asJava) - - // JDBC 接続詳細 - val jdbcUrl = "jdbc:ch://{host}:{port}/{schema}" - val jdbcProperties = new java.util.Properties() - jdbcProperties.put("user", "default") - jdbcProperties.put("password", "*******") - jdbcProperties.put("driver", "com.clickhouse.jdbc.ClickHouseDriver") - - // ClickHouse からテーブルをロードする - val df: DataFrame = spark.read.jdbc(jdbcUrl, "my_table", jdbcProperties) - - // Spark df を表示するか、お好きなように使用する - df.show() - - // ジョブをコミットする - Job.commit() - } -} -``` - - - - -```python -import sys -from awsglue.transforms import * -from awsglue.utils import getResolvedOptions -from pyspark.context import SparkContext -from awsglue.context import GlueContext -from awsglue.job import Job - -## @params: [JOB_NAME] -args = getResolvedOptions(sys.argv, ['JOB_NAME']) - -sc = SparkContext() -glueContext = GlueContext(sc) -logger = glueContext.get_logger() -spark = glueContext.spark_session -job = Job(glueContext) -job.init(args['JOB_NAME'], args) -jdbc_url = "jdbc:ch://{host}:{port}/{schema}" -query = "select * from my_table" - -# クラウド利用時は、ssl オプションを追加してください -df = (spark.read.format("jdbc") - .option("driver", 'com.clickhouse.jdbc.ClickHouseDriver') - .option("url", jdbc_url) - .option("user", 'default') - .option("password", '*******') - .option("query", query) - .load()) - -logger.info("行数:") -logger.info(str(df.count())) -logger.info("データサンプル:") -logger.info(str(df.take(10))) - - -job.commit() -``` - - - - -詳細については、[Spark & JDBC ドキュメント](/integrations/apache-spark/spark-jdbc#read-data)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md.hash deleted file mode 100644 index 4c1623be060..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/aws-glue/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -61bd53b1772bfbcc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md deleted file mode 100644 index f829c120b5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -slug: '/integrations/azure-data-factory' -description: 'Azure データを ClickHouse に取り込む' -keywords: -- 'azure data factory' -- 'azure' -- 'microsoft' -- 'data' -title: 'ClickHouse への Azure データの取り込み' ---- - - - -| Page | Description | -|-----------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [概要](./overview.md) | Azure Data を ClickHouse に取り込むための 2 つのアプローチの概要 | -| [ClickHouse の azureBlobStorage テーブル関数の使用](./using_azureblobstorage.md) | オプション 1 - `azureBlobStorage` テーブル関数を使用して、Azure Blob Storage または Azure Data Lake Storage から ClickHouse にデータをコピーする効率的で簡単な方法 | -| [ClickHouse の HTTP インターフェースの使用](./using_http_interface.md) | オプション 2 - ClickHouse が Azure からデータをプルする代わりに、Azure Data Factory がその HTTP インターフェースを使用してデータを ClickHouse にプッシュします | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md.hash deleted file mode 100644 index 5b1eb577748..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -9cbabb93b437918f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md deleted file mode 100644 index 0c329835e6e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -sidebar_label: '概要' -slug: '/integrations/azure-data-factory/overview' -description: 'Azure データを ClickHouse に取り込む - 概要' -keywords: -- 'azure data factory' -- 'azure' -- 'microsoft' -- 'data' -title: 'Bringing Azure Data into ClickHouse' ---- - - - - -# AzureデータをClickHouseに取り込む - -Microsoft Azureは、データを保存、変換、分析するための広範なツールを提供しています。しかし、多くのシナリオにおいて、ClickHouseは低遅延のクエリ処理と巨大なデータセットの処理においてはるかに優れたパフォーマンスを提供できます。さらに、ClickHouseの列指向ストレージと圧縮は、一般的なAzureデータベースと比較して、大量の分析データをクエリするコストを大幅に削減できます。 - -このセクションでは、Microsoft AzureからClickHouseにデータを取り込む2つの方法を探ります。 - -| 方法 | 説明 | -|----------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [`azureBlobStorage`テーブル関数を使用](./using_azureblobstorage.md) | ClickHouseの[`azureBlobStorage`テーブル関数](https://clickhouse.com/docs/sql-reference/table-functions/azureBlobStorage)を使用して、Azure Blob Storageから直接データを転送します。 | -| [ClickHouse HTTPインターフェースを使用](./using_http_interface.md) | [ClickHouse HTTPインターフェース](https://clickhouse.com/docs/interfaces/http)をAzure Data Factory内のデータソースとして利用し、データをコピーしたり、パイプラインの一部としてデータフローアクティビティで使用します。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md.hash deleted file mode 100644 index 42713d7db5f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -b1a7d563c479b984 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md deleted file mode 100644 index eb169e09dbf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -sidebar_label: 'azureBlobStorageテーブル関数の使用' -slug: '/integrations/azure-data-factory/table-function' -description: 'ClickHouseのazureBlobStorageテーブル関数の使用' -keywords: -- 'azure data factory' -- 'azure' -- 'microsoft' -- 'data' -- 'azureBlobStorage' -title: 'ClickHouseのazureBlobStorageテーブル関数を使用してAzureデータをClickHouseに取り込む' ---- - -import Image from '@theme/IdealImage'; -import azureDataStoreSettings from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-data-store-settings.png'; -import azureDataStoreAccessKeys from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-data-store-access-keys.png'; - - -# Using ClickHouse's azureBlobStorage table function {#using-azureBlobStorage-function} - -これは、Azure Blob Storage または Azure Data Lake Storage から ClickHouse にデータをコピーする最も効率的かつ簡単な方法の一つです。このテーブル関数を使用すると、ClickHouse に Azure ストレージに直接接続し、データをオンデマンドで読み取るよう指示できます。 - -これは、データをソースから直接選択、挿入、フィルタリングできるテーブルのようなインターフェイスを提供します。この関数は高度に最適化されており、`CSV`、`JSON`、`Parquet`、`Arrow`、`TSV`、`ORC`、`Avro` など、多くの一般的に使用されるファイル形式をサポートしています。完全なリストについては ["Data formats"](/interfaces/formats) を参照してください。 - -このセクションでは、Azure Blob Storage から ClickHouse へのデータ転送に関する簡単なスタートアップガイドと、この関数を効果的に使用するための重要な考慮事項を説明します。詳細および高度なオプションについては、公式ドキュメントを参照してください: -[`azureBlobStorage` Table Function documentation page](https://clickhouse.com/docs/sql-reference/table-functions/azureBlobStorage) - -## Acquiring Azure Blob Storage Access Keys {#acquiring-azure-blob-storage-access-keys} - -ClickHouse が Azure Blob Storage にアクセスできるようにするには、アクセスキーを含む接続文字列が必要です。 - -1. Azure ポータルで、**ストレージアカウント**に移動します。 - -2. 左側のメニューで、**セキュリティ + ネットワーキング**セクションの下にある **アクセスキー** を選択します。 - - -3. **key1** または **key2** のいずれかを選択し、**接続文字列**フィールドの横にある **表示** ボタンをクリックします。 - - -4. 接続文字列をコピーします。この接続文字列は、azureBlobStorage テーブル関数のパラメータとして使用します。 - -## Querying the data from Azure Blob Storage {#querying-the-data-from-azure-blob-storage} - -お好みの ClickHouse クエリコンソールを開きます。このクエリコンソールは、ClickHouse Cloud の Web インターフェイス、ClickHouse CLI クライアント、またはクエリを実行するために使用する他のツールのいずれでもかまいません。接続文字列と ClickHouse クエリコンソールの準備が整ったら、Azure Blob Storage からデータを直接クエリできます。 - -以下の例では、data-container という名前のコンテナに保存されている JSON ファイル内のすべてのデータをクエリします: - -```sql -SELECT * FROM azureBlobStorage( - '', - 'data-container', - '*.json', - 'JSONEachRow'); -``` - -そのデータをローカルの ClickHouse テーブル(例:my_table)にコピーしたい場合は、`INSERT INTO ... SELECT` ステートメントを使用できます: - -```sql -INSERT INTO my_table -SELECT * FROM azureBlobStorage( - '', - 'data-container', - '*.json', - 'JSONEachRow'); -``` - -これにより、中間的な ETL ステップを必要とせずに、外部データを効率的に ClickHouse に取り込むことができます。 - -## A simple example using the Environmental Sensors Dataset {#simple-example-using-the-environmental-sensors-dataset} - -例として、Environmental Sensors Dataset から単一のファイルをダウンロードします。 - -1. [サンプルファイル](https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/sensors/monthly/2019-06_bmp180.csv.zst)を [Environmental Sensors Dataset](https://clickhouse.com/docs/getting-started/example-datasets/environmental-sensors) からダウンロードします。 - -2. Azure ポータルで、まだ持っていない場合は新しいストレージアカウントを作成します。 - -:::warning -ストレージアカウントのキーアクセスを許可する設定が有効になっていることを確認してください。そうしないと、アカウントキーを使用してデータにアクセスできません。 -::: - -3. ストレージアカウント内に新しいコンテナを作成します。この例では、コンテナの名前を sensors とします。 - 既存のコンテナを使用している場合、このステップはスキップできます。 - -4. 前にダウンロードした `2019-06_bmp180.csv.zst` ファイルをコンテナにアップロードします。 - -5. 前述の手順に従って Azure Blob Storage の接続文字列を取得します。 - -すべての設定が完了したので、Azure Blob Storage から直接データをクエリできます: - -```sql -SELECT * -FROM azureBlobStorage( - '', - 'sensors', - '2019-06_bmp180.csv.zst', - 'CSVWithNames') -LIMIT 10 -SETTINGS format_csv_delimiter = ';' -``` - -7. テーブルにデータをロードするため、元のデータセットで使用されているスキーマの簡略版を作成します: -```sql -CREATE TABLE sensors -( - sensor_id UInt16, - lat Float32, - lon Float32, - timestamp DateTime, - temperature Float32 -) -ENGINE = MergeTree -ORDER BY (timestamp, sensor_id); -``` - -:::info -Azure Blob Storage のような外部ソースをクエリするときの構成オプションやスキーマ推論に関する詳細情報は、[入力データからの自動スキーマ推論](https://clickhouse.com/docs/interfaces/schema-inference) を参照してください。 -::: - -8. それでは、Azure Blob Storage から sensors テーブルにデータを挿入します: -```sql -INSERT INTO sensors -SELECT sensor_id, lat, lon, timestamp, temperature -FROM azureBlobStorage( - '', - 'sensors', - '2019-06_bmp180.csv.zst', - 'CSVWithNames') -SETTINGS format_csv_delimiter = ';' -``` - -これで、sensors テーブルには Azure Blob Storage に保存されている `2019-06_bmp180.csv.zst` ファイルからのデータが満たされました。 - -## Additional Resources {#additional-resources} - -これは、azureBlobStorage 関数を使用するための基本的な導入に過ぎません。より高度なオプションや設定の詳細については、公式ドキュメントを参照してください: - -- [azureBlobStorage Table Function](https://clickhouse.com/docs/sql-reference/table-functions/azureBlobStorage) -- [Formats for Input and Output Data](https://clickhouse.com/docs/sql-reference/formats) -- [Automatic schema inference from input data](https://clickhouse.com/docs/interfaces/schema-inference) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md.hash deleted file mode 100644 index 900f319016c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_azureblobstorage.md.hash +++ /dev/null @@ -1 +0,0 @@ -4b2d29f859500469 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md deleted file mode 100644 index 0679800df12..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -sidebar_label: 'Using the HTTP interface' -slug: '/integrations/azure-data-factory/http-interface' -description: 'Using ClickHouse''s HTTP interface to bring data from Azure Data Factory - into ClickHouse' -keywords: -- 'azure data factory' -- 'azure' -- 'microsoft' -- 'data' -- 'http interface' -title: 'Using ClickHouse HTTP Interface to bring Azure data into ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import azureHomePage from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-home-page.png'; -import azureNewResourceAnalytics from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-new-resource-analytics.png'; -import azureNewDataFactory from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-new-data-factory.png'; -import azureNewDataFactoryConfirm from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-new-data-factory-confirm.png'; -import azureNewDataFactorySuccess from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-new-data-factory-success.png'; -import azureHomeWithDataFactory from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-home-with-data-factory.png'; -import azureDataFactoryPage from '@site/static/images/integrations/data-ingestion/azure-data-factory/azure-data-factory-page.png'; -import adfCreateLinkedServiceButton from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-create-linked-service-button.png'; -import adfNewLinkedServiceSearch from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-linked-service-search.png'; -import adfNewLinedServicePane from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-lined-service-pane.png'; -import adfNewLinkedServiceBaseUrlEmpty from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-linked-service-base-url-empty.png'; -import adfNewLinkedServiceParams from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-linked-service-params.png'; -import adfNewLinkedServiceExpressionFieldFilled from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-linked-service-expression-field-filled.png'; -import adfNewLinkedServiceCheckConnection from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-linked-service-check-connection.png'; -import adfLinkedServicesList from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-linked-services-list.png'; -import adfNewDatasetItem from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-dataset-item.png'; -import adfNewDatasetPage from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-dataset-page.png'; -import adfNewDatasetProperties from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-dataset-properties.png'; -import adfNewDatasetQuery from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-dataset-query.png'; -import adfNewDatasetConnectionSuccessful from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-dataset-connection-successful.png'; -import adfNewPipelineItem from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-pipeline-item.png'; -import adfNewCopyDataItem from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-new-copy-data-item.png'; -import adfCopyDataSource from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-copy-data-source.png'; -import adfCopyDataSinkSelectPost from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-copy-data-sink-select-post.png'; -import adfCopyDataDebugSuccess from '@site/static/images/integrations/data-ingestion/azure-data-factory/adf-copy-data-debug-success.png'; - - -# Azure Data FactoryにおけるClickHouse HTTPインターフェースの使用 {#using-clickhouse-http-interface-in-azure-data-factory} - -[`azureBlobStorage` テーブル関数](https://clickhouse.com/docs/sql-reference/table-functions/azureBlobStorage) -は、Azure Blob StorageからClickHouseにデータを取り込むための -迅速かつ便利な方法です。しかし、以下の理由から常に適切であるとは限りません。 - -- データがAzure Blob Storageに保存されていない場合 — 例えば、Azure SQL Database、Microsoft SQL Server、またはCosmos DBにある可能性があります。 -- セキュリティポリシーにより、Blob Storageへの外部アクセスが - 完全に制限される場合があります — 例えば、ストレージアカウントに - 公共のエンドポイントがない場合です。 - -このようなシナリオでは、Azure Data Factoryを使用して -[ClickHouse HTTP インターフェース](https://clickhouse.com/docs/interfaces/http) -を利用し、AzureサービスからClickHouseにデータを送信することができます。 - -この方法は流れを逆転させます:ClickHouseがAzureからデータを -引き出すのではなく、Azure Data FactoryがデータをClickHouseに -プッシュします。このアプローチは通常、ClickHouseインスタンスが -公共のインターネットからアクセス可能である必要があります。 - -:::info -Azure Data FactoryのSelf-hosted Integration Runtimeを使用することで、 -ClickHouseインスタンスをインターネットにさらすことなく、プライベートネットワークを介してデータを送信することが可能です。この設定はこの記事の範囲を超えますが、公式ガイドでさらに詳しい情報が得られます: -[セルフホスト統合 -ランタイムの作成と構成](https://learn.microsoft.com/en-us/azure/data-factory/create-self-hosted-integration-runtime?tabs=data-factory) -::: - -## ClickHouseをRESTサービスに変える {#turning-clickhouse-to-a-rest-service} - -Azure Data Factoryは、HTTPを介して外部システムにデータをJSONフォーマットで送ることをサポートしています。この機能を利用して、[ClickHouse HTTPインターフェース](https://clickhouse.com/docs/interfaces/http)を使って直接ClickHouseにデータを挿入することができます。詳細は[ClickHouse HTTPインターフェースの -ドキュメント](https://clickhouse.com/docs/interfaces/http)を参照してください。 - -この例では、宛先テーブルを指定し、入力データフォーマットをJSONとして定義し、より柔軟なタイムスタンプ解析を許可するオプションを含めるだけで済みます。 - -```sql -INSERT INTO my_table -SETTINGS - date_time_input_format='best_effort', - input_format_json_read_objects_as_strings=1 -FORMAT JSONEachRow -``` - -このクエリをHTTPリクエストの一部として送信するには、クエリパラメータに -URLエンコードされた文字列として渡します: -```text -https://your-clickhouse-url.com?query=INSERT%20INTO%20my_table%20SETTINGS%20date_time_input_format%3D%27best_effort%27%2C%20input_format_json_read_objects_as_strings%3D1%20FORMAT%20JSONEachRow%0A -``` - -:::info -Azure Data Factoryは組み込みの -`encodeUriComponent`関数を使用してこのエンコーディングを自動的に処理できるため、手動で行う必要はありません。 -::: - -これでJSON形式のデータをこのURLに送信できます。データは対象のテーブルの構造に合致する必要があります。以下は、3つのカラム`col_1`、`col_2`、`col_3`を持つテーブルを仮定した簡単なcurlの例です。 -```text -curl \ - -XPOST "https://your-clickhouse-url.com?query=" \ - --data '{"col_1":9119,"col_2":50.994,"col_3":"2019-06-01 00:00:00"}' -``` - -JSONオブジェクトの配列やJSON Lines(改行区切りのJSONオブジェクト)を送信することも可能です。Azure Data FactoryはJSON配列形式を使用しており、これはClickHouseの`JSONEachRow`入力と完全に互換性があります。 - -このステップでは、ClickHouse側で特別な操作を行う必要はありません。HTTPインターフェース自体がRESTのようなエンドポイントとして機能するために必要なものをすでに提供しています — 追加の設定は不要です。 - -ClickHouseをRESTエンドポイントのように振る舞わせたので、Azure Data Factoryをそれを使用するように設定する時が来ました。 - -次のステップでは、Azure Data Factoryインスタンスを作成し、ClickHouseインスタンスへのLinked Serviceを設定し、 -[REST出力](https://learn.microsoft.com/en-us/azure/data-factory/connector-rest)のためのDatasetを定義し、データをAzureからClickHouseに送信するためのCopy Dataアクティビティを作成します。 - -## Azure Data Factoryインスタンスの作成 {#create-an-azure-data-factory-instance} - -このガイドでは、Microsoft Azureアカウントにアクセスでき、 -すでにサブスクリプションとリソースグループが構成されていることを前提としています。もしすでにAzure Data Factoryが設定されている場合は、このステップをスキップして -既存のサービスを利用できます。 - -1. [Microsoft Azureポータル](https://portal.azure.com/)にログインし、**リソースの作成**をクリックします。 - - -2. 左側のカテゴリペインで**分析**を選択し、その後一般的なサービスのリストから**Data Factory**をクリックします。 - - -3. サブスクリプションとリソースグループを選択し、新しいData Factoryインスタンスの名前を入力し、リージョンを選択し、バージョンはV2のままにします。 - - -3. **確認 + 作成**をクリックし、次に**作成**をクリックしてデプロイを開始します。 - - - - -デプロイが正常に完了したら、新しいAzure Data Factoryインスタンスの使用を開始できます。 - -## 新しいRESTベースのリンクサービスの作成 {#-creating-new-rest-based-linked-service} - -1. Microsoft Azure Portalにログインし、Data Factoryインスタンスを開きます。 - - -2. Data Factoryの概要ページで、**スタジオを起動**をクリックします。 - - -3. 左側のメニューで**管理**を選択し、**Linked services**に移動し、**+ 新規**をクリックして新しいリンクサービスを作成します。 - - -4. **新規リンクサービス検索バー**に**REST**と入力し、RESTを選択し、**続行**をクリックして[RESTコネクタ](https://learn.microsoft.com/en-us/azure/data-factory/connector-rest) - インスタンスを作成します。 - - -5. リンクサービス設定ペインで新しいサービスの名前を入力し、**ベースURL**フィールドをクリックして**動的コンテンツの追加**をクリックします(このリンクはフィールドが選択されているときのみ表示されます)。 - - -6. 動的コンテンツペインで、パラメータ化されたURLを作成できます。これにより、異なるテーブルのデータセットを作成する際にクエリを後で定義できるため、リンクサービスを再利用可能にします。 - - -7. フィルター入力の横にある**"+"**をクリックして新しいパラメータを追加し、名前を`pQuery`とし、型をStringに設定して、デフォルト値を`SELECT 1`に設定します。**保存**をクリックします。 - - -8. 式フィールドに以下を入力し、**OK**をクリックします。`your-clickhouse-url.com`を実際のClickHouseインスタンスのアドレスに置き換えます。 - ```text - @{concat('https://your-clickhouse-url.com:8443/?query=', encodeUriComponent(linkedService().pQuery))} - ``` - - -9. メインフォームに戻って基本認証を選択し、ClickHouse HTTPインターフェースに接続するために使用するユーザー名とパスワードを入力し、**接続テスト**をクリックします。すべてが正しく設定されていれば、成功メッセージが表示されます。 - - -10. **作成**をクリックして設定を完了します。 - - -新たに登録されたRESTベースのリンクサービスがリストに表示されるはずです。 - -## ClickHouse HTTPインターフェース用の新しいデータセットの作成 {#creating-a-new-dataset-for-the-clickhouse-http-interface} - -ClickHouse HTTPインターフェース用のリンクサービスが設定されたので、Azure Data FactoryがClickHouseにデータを送信するために使用するデータセットを作成できます。 - -この例では、[環境センサー -データ](https://clickhouse.com/docs/getting-started/example-datasets/environmental-sensors)の一部を挿入します。 - -1. お好みのClickHouseクエリコンソールを開いてください — これはClickHouse CloudのWeb UI、CLIクライアント、またはクエリを実行するために使用する他のインターフェースでも構いません — そしてターゲットテーブルを作成します: - ```sql - CREATE TABLE sensors - ( - sensor_id UInt16, - lat Float32, - lon Float32, - timestamp DateTime, - temperature Float32 - ) - ENGINE = MergeTree - ORDER BY (timestamp, sensor_id); - ``` - -2. Azure Data Factory Studioで、左側のペインで作成を選択します。データセット項目にマウスを乗せ、三点アイコンをクリックして新しいデータセットを選択します。 - - -3. 検索バーに**REST**と入力し、RESTを選択して**続行**をクリックします。データセットの名前を入力し、前のステップで作成した**リンクサービス**を選択します。**OK**をクリックしてデータセットを作成します。 - - -4. 左側のファクトリリソースペインのデータセットセクションに、新しく作成したデータセットが表示されるはずです。そのデータセットを選択して、プロパティを開きます。リンクサービスで定義された`pQuery`パラメータが表示されます。**値**のテキストフィールドをクリックし、次に**動的内容の追加**をクリックします。 - - -5. 開いたペインに次のクエリを貼り付けます: - ```sql - INSERT INTO sensors - SETTINGS - date_time_input_format=''best_effort'', - input_format_json_read_objects_as_strings=1 - FORMAT JSONEachRow - ``` - - :::danger - クエリ内のすべてのシングルクォート`'`は、二重シングルクォート`''`に置き換える必要があります。これはAzure Data Factoryの式パーサーによって要求されます。これらをエスケープしないと、直ちにエラーが表示されることはありませんが、データセットを使用または保存しようとしたときに失敗します。例えば、`'best_effort'`は`''best_effort''`と書かれる必要があります。 - ::: - - - -6. 式を保存するためにOKをクリックします。接続テストをクリックします。すべてが正しく設定されていれば、接続成功メッセージが表示されます。ページ上部の**すべてを公開**をクリックして変更を保存します。 - - -### 例データセットの設定 {#setting-up-an-example-dataset} - -この例では、完全な環境センサーのデータセットを使用するのではなく、[センサー -データセットのサンプル](https://datasets-documentation.s3.eu-west-3.amazonaws.com/environmental/sensors.csv)の小さな部分を使用します。 - -:::info -このガイドを集中させるために、Azure Data Factoryでソースデータセットを作成するための正確な手順には触れません。サンプルデータをお好きなストレージサービスにアップロードできます — 例えば、Azure Blob Storage、Microsoft SQL Server、またはAzure Data Factoryがサポートする別のファイル形式です。 -::: - -データセットをAzure Blob Storage(または他の好みのストレージサービス)にアップロードしたら、Azure Data Factory Studioでファクトリリソースペインに移動します。アップロードしたデータを指す新しいデータセットを作成します。**すべてを公開**をクリックして変更を保存します。 - -## ClickHouseへのデータ転送のためのCopyアクティビティの作成 {#creating-the-copy-activity-to-transfer-data-to-clickhouse} - -入力データセットと出力データセットの両方が設定されたので、**データのコピー**アクティビティを設定して、例のデータセットからClickHouseの`sensors`テーブルにデータを転送できます。 - -1. **Azure Data Factory Studio**を開き、**作成タブ**に移動します。**ファクトリリソース**ペインで**パイプライン**にマウスを乗せ、三点アイコンをクリックして**新しいパイプライン**を選択します。 - - -2. **アクティビティ**ペインで、**移動と変換**セクションを展開し、**データのコピー**アクティビティをキャンバスにドラッグします。 - - -3. **ソース**タブを選択し、先に作成したソースデータセットを選択します。 - - -4. **シンク**タブに移動し、センサー テーブル用に作成したClickHouseデータセットを選択します。**リクエストメソッド**をPOSTに設定します。**HTTP圧縮タイプ**は**なし**に設定されていることを確認してください。 - :::warning - HTTP圧縮はAzure Data Factoryのデータコピーアクティビティで正しく機能しません。有効にすると、Azureはゼロバイトのみを含むペイロードを送信する — サービスのバグの可能性が高いです。圧縮を無効のままにしてください。 - ::: - :::info - デフォルトのバッチサイズ10,000を維持することをお勧めします。さらに増やすこともできます。詳細については、[挿入戦略の選択 / 同期的なバッチ挿入](https://clickhouse.com/docs/best-practices/selecting-an-insert-strategy#batch-inserts-if-synchronous)を参照してください。 - ::: - - -5. キャンバスの上部で**デバッグ**をクリックしてパイプラインを実行します。少し待った後、アクティビティはキューに追加され、実行されます。すべてが正しく設定されていれば、タスクは**成功**の状態で終了するはずです。 - - -6. 完了したら、**すべてを公開**をクリックしてパイプラインとデータセットの変更を保存します。 - -## 追加リソース {#additional-resources-1} -- [HTTPインターフェース](https://clickhouse.com/docs/interfaces/http) -- [Azure Data Factoryを使用してRESTエンドポイント間でデータをコピーおよび変換する](https://learn.microsoft.com/en-us/azure/data-factory/connector-rest?tabs=data-factory) -- [挿入戦略の選択](https://clickhouse.com/docs/best-practices/selecting-an-insert-strategy) -- [セルフホスト統合ランタイムの作成と構成](https://learn.microsoft.com/en-us/azure/data-factory/create-self-hosted-integration-runtime?tabs=data-factory) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md.hash deleted file mode 100644 index 7d8858dfa57..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-data-factory/using_http_interface.md.hash +++ /dev/null @@ -1 +0,0 @@ -70ce4a81195e5495 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md deleted file mode 100644 index 09a69cc3ea4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -sidebar_label: 'Azure Synapse' -slug: '/integrations/azure-synapse' -description: 'Introduction to Azure Synapse with ClickHouse' -keywords: -- 'clickhouse' -- 'azure synapse' -- 'azure' -- 'synapse' -- 'microsoft' -- 'azure spark' -- 'data' -title: 'Integrating Azure Synapse with ClickHouse' ---- - -import TOCInline from '@theme/TOCInline'; -import Image from '@theme/IdealImage'; -import sparkConfigViaNotebook from '@site/static/images/integrations/data-ingestion/azure-synapse/spark_notebook_conf.png'; -import sparkUICHSettings from '@site/static/images/integrations/data-ingestion/azure-synapse/spark_ui_ch_settings.png'; - - -# Azure Synapse と ClickHouse の統合 - -[Azure Synapse](https://azure.microsoft.com/en-us/products/synapse-analytics) は、ビッグデータ、データサイエンス、データウェアハウジングを組み合わせ、迅速で大規模なデータ分析を可能にする統合分析サービスです。 -Synapse 内では、Spark プールがオンデマンドでスケーラブルな [Apache Spark](https://spark.apache.org) クラスターを提供し、ユーザーが複雑なデータ変換、機械学習、および外部システムとの統合を実行できます。 - -この記事では、Azure Synapse 内で Apache Spark を使用する際に [ClickHouse Spark コネクタ](/integrations/apache-spark/spark-native-connector) を統合する方法を示します。 - - - - -## コネクタの依存関係を追加する {#add-connector-dependencies} -Azure Synapse では、[パッケージ管理の3つのレベル](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-portal-add-libraries)をサポートしています: -1. デフォルトパッケージ -2. Spark プールレベル -3. セッションレベル - -
- -[Apache Spark プールのライブラリ管理ガイド](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-manage-pool-packages)に従い、Spark アプリケーションに以下の必要な依存関係を追加してください。 - - `clickhouse-spark-runtime-{spark_version}_{scala_version}-{connector_version}.jar` - [公式 maven](https://mvnrepository.com/artifact/com.clickhouse.spark) - - `clickhouse-jdbc-{java_client_version}-all.jar` - [公式 maven](https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc) - -どのバージョンがニーズに合っているかを理解するために、[Spark コネクタの互換性マトリクス](/integrations/apache-spark/spark-native-connector#compatibility-matrix) のドキュメントをご覧ください。 - -## ClickHouse をカタログとして追加する {#add-clickhouse-as-catalog} - -Spark の設定をセッションに追加するには、様々な方法があります: -* セッションと共にロードするカスタム設定ファイル -* Azure Synapse UI を介して設定を追加 -* Synapse ノートブック内で設定を追加 - -[Apache Spark 設定管理ガイド](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-create-spark-configuration)に従い、[コネクタに必要な Spark 設定](/integrations/apache-spark/spark-native-connector#register-the-catalog-required)を追加してください。 - -例えば、以下の設定でノートブック内の Spark セッションを構成できます: - -```python -%%configure -f -{ - "conf": { - "spark.sql.catalog.clickhouse": "com.clickhouse.spark.ClickHouseCatalog", - "spark.sql.catalog.clickhouse.host": "", - "spark.sql.catalog.clickhouse.protocol": "https", - "spark.sql.catalog.clickhouse.http_port": "", - "spark.sql.catalog.clickhouse.user": "", - "spark.sql.catalog.clickhouse.password": "password", - "spark.sql.catalog.clickhouse.database": "default" - } -} -``` - -最初のセルにこの設定を配置してください: - - - -追加の設定については、[ClickHouse Spark 設定ページ](/integrations/apache-spark/spark-native-connector#configurations)をご覧ください。 - -:::info -ClickHouse Cloud を使用する場合は、[必要な Spark 設定](/integrations/apache-spark/spark-native-connector#clickhouse-cloud-settings)を設定してください。 -::: - -## セットアップの検証 {#setup-verification} - -依存関係と設定が正しく設定されているかを検証するために、セッションの Spark UI を訪れ、「環境」タブに移動してください。 -そこで、ClickHouse に関連する設定を探してください: - - - - -## 追加リソース {#additional-resources} - -- [ClickHouse Spark コネクタのドキュメント](/integrations/apache-spark) -- [Azure Synapse Spark プールの概要](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-overview) -- [Apache Spark ワークロードのパフォーマンスを最適化する](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-performance) -- [Synapse での Apache Spark プールのライブラリ管理](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-manage-pool-packages) -- [Synapse での Apache Spark 設定の管理](https://learn.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-create-spark-configuration) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md.hash deleted file mode 100644 index fb7de325c5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/azure-synapse/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -9610d6fdf33e5e35 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/assets/static-ips.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/assets/static-ips.json deleted file mode 100644 index 5d196b7a8fe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/assets/static-ips.json +++ /dev/null @@ -1 +0,0 @@ -{"aws":[{"region":"eu-central-1","ips":["18.195.233.217","3.127.86.90","35.157.23.2","18.197.167.47","3.122.25.29","52.28.148.40"]},{"region":"us-east-1","ips":["54.82.38.199","3.90.133.29","52.5.177.8","3.227.227.145","3.216.6.184","54.84.202.92","3.131.130.196","3.23.172.68","3.20.208.150"]},{"region":"us-east-2","ips":["3.131.130.196","3.23.172.68","3.20.208.150","3.132.20.192","18.119.76.110","3.134.185.180"]}]} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md deleted file mode 100644 index 4912a236b1d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -sidebar_label: 'AWS PrivateLink for ClickPipes' -description: 'ClickPipes とデータソース間の安全な接続を AWS PrivateLink を使用して確立します。' -slug: '/integrations/clickpipes/aws-privatelink' -title: 'AWS PrivateLink for ClickPipes' ---- - -import cp_service from '@site/static/images/integrations/data-ingestion/clickpipes/cp_service.png'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import cp_rpe_select from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_select.png'; -import cp_rpe_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_step0.png'; -import cp_rpe_step1 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_step1.png'; -import cp_rpe_step2 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_step2.png'; -import cp_rpe_step3 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_step3.png'; -import cp_rpe_settings0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_settings0.png'; -import cp_rpe_settings1 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_rpe_settings1.png'; -import Image from '@theme/IdealImage'; - - -# AWS PrivateLink for ClickPipes - -AWS PrivateLinkを使用して、VPC、AWSサービス、オンプレミスシステム、およびClickHouse Cloud間の安全な接続を確立し、トラフィックをパブリックインターネットにさらさないことができます。 - -このドキュメントでは、AWS PrivateLink VPCエンドポイントを設定するためのClickPipesのリバースプライベートエンドポイント機能について説明します。 - -## サポートされているAWS PrivateLinkエンドポイントタイプ {#aws-privatelink-endpoint-types} - -ClickPipesのリバースプライベートエンドポイントは、以下のAWS PrivateLinkアプローチのいずれかで構成できます。 - -- [VPCリソース](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-access-resources.html) -- [MSK ClickPipe用のMSKマルチVPC接続](https://docs.aws.amazon.com/msk/latest/developerguide/aws-access-mult-vpc.html) -- [VPCエンドポイントサービス](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html) - -それぞれのAWS PrivateLink共有の設定方法については、上記のリンクを参照してください。 - -### VPCリソース {#vpc-resource} - -あなたのVPCリソースは、PrivateLinkを使用してClickPipesにアクセスできます。 -リソース構成は、特定のホストまたはRDSクラスターARNにターゲットを設定できます。 -クロスリージョンはサポートされていません。 - -これは、RDSクラスターからデータを取り込むPostgres CDCに推奨される選択肢です。 - -詳しい情報については、[はじめに](https://docs.aws.amazon.com/vpc/latest/privatelink/resource-configuration.html)ガイドを参照してください。 - -:::info -VPCリソースはClickPipesアカウントと共有する必要があります。リソース共有設定に `072088201116` を許可された主体として追加してください。 -詳細については、リソースを共有するためのAWSガイドを参照してください。 [リソースの共有](https://docs.aws.amazon.com/ram/latest/userguide/working-with-sharing-create.html) -::: - -### MSKマルチVPC接続 {#msk-multi-vpc} - -MSKマルチVPCは、AWS MSKのビルトイン機能で、複数のVPCを単一のMSKクラスターに接続できます。 -プライベートDNSサポートは標準で提供されており、追加の構成は必要ありません。 -クロスリージョンはサポートされていません。 - -ClickPipesにとっては、MSK向けの推奨オプションです。 -詳しい情報については、[はじめに](https://docs.aws.amazon.com/msk/latest/developerguide/mvpc-getting-started.html)ガイドを参照してください。 - -:::info -MSKクラスターのポリシーを更新し、MSKクラスターに許可された主体として `072088201116` を追加してください。 -詳細については、クラスター ポリシーをアタッチするためのAWSガイドを参照してください。[クラスター ポリシーのアタッチ](https://docs.aws.amazon.com/msk/latest/developerguide/mvpc-cluster-owner-action-policy.html) -::: - -ClickPipes用の[MSKセットアップガイド](/knowledgebase/aws-privatelink-setup-for-msk-clickpipes)を参照して、接続の設定方法を学んでください。 - -### VPCエンドポイントサービス {#vpc-endpoint-service} - -VPCサービスは、ClickPipesとデータソースを共有するための別のアプローチです。 -データソースの前にNLB(Network Load Balancer)を設定し、NLBを使用するようにVPCエンドポイントサービスを構成する必要があります。 - -VPCエンドポイントサービスは、[プライベートDNS](https://docs.aws.amazon.com/vpc/latest/privatelink/manage-dns-names.html)で構成でき、ClickPipes VPCでアクセス可能です。 - -これは以下の用途に推奨されます: - -- プライベートDNSサポートが必要なオンプレミスのKafkaセットアップ -- [Postgres CDCのクロスリージョン接続](/knowledgebase/aws-privatelink-setup-for-clickpipes) -- MSKクラスターのクロスリージョン接続。サポートが必要な場合は、ClickHouseサポートチームにお問い合わせください。 - -詳しい情報については、[はじめに](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html)ガイドを参照してください。 - -:::info -ClickPipesアカウントID `072088201116` をVPCエンドポイントサービスの許可された主体として追加してください。 -詳細については、パーミッションを管理するためのAWSガイドを参照してください。[パーミッションの管理](https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html#add-remove-permissions) -::: - -:::info -ClickPipes用の[クロスリージョンアクセス](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html#endpoint-service-cross-region) -が構成可能です。VPCエンドポイントサービスの許可されたリージョンに[あなたのClickPipeリージョン](#aws-privatelink-regions)を追加してください。 -::: - -## リバースプライベートエンドポイントを持つClickPipeの作成 {#creating-clickpipe} - -1. ClickHouse Cloud Service用のSQLコンソールにアクセスします。 - - - -2. 左側のメニューで `Data Sources` ボタンを選択し、「ClickPipeの設定」をクリックします。 - - - -3. データソースとしてKafkaまたはPostgresを選択します。 - - - -4. `Reverse private endpoint` オプションを選択します。 - - - -5. 既存のリバースプライベートエンドポイントを選択するか、新しいものを作成します。 - -:::info -RDSに対してクロスリージョンアクセスが必要な場合は、VPCエンドポイントサービスを作成する必要があります。 -このガイドは、設定の開始点として役立ちます。(/knowledgebase/aws-privatelink-setup-for-clickpipes) - -同じリージョンへのアクセスの場合、VPCリソースの作成が推奨されます。 -::: - - - -6. 選択したエンドポイントタイプの必須パラメータを提供します。 - - - - - VPCリソースの場合、構成共有ARNと構成IDを提供します。 - - MSKマルチVPCの場合、クラスターARNと作成されたエンドポイントで使用される認証方法を提供します。 - - VPCエンドポイントサービスの場合、サービス名を提供します。 - -7. `Create`をクリックし、リバースプライベートエンドポイントが準備できるのを待ちます。 - - 新しいエンドポイントを作成している場合、エンドポイントの設定には時間がかかります。 - エンドポイントが準備でき次第、ページは自動的にリフレッシュされます。 - VPCエンドポイントサービスでは、AWSコンソールで接続要求を受け入れる必要がある場合があります。 - - - -8. エンドポイントが準備できたら、DNS名を使用してデータソースに接続できます。 - - エンドポイントのリストで、利用可能なエンドポイントのDNS名を見ることができます。 - それは、ClickPipesのプロビジョニングされた内部DNS名またはPrivateLinkサービスによって提供されたプライベートDNS名のいずれかです。 - DNS名は完全なネットワークアドレスではありません。 - データソースに応じたポートを追加してください。 - - MSK接続文字列は、AWSコンソールでアクセスできます。 - - DNS名の完全なリストを見るには、クラウドサービス設定でアクセスしてください。 - -## 既存のリバースプライベートエンドポイントの管理 {#managing-existing-endpoints} - -ClickHouse Cloudサービス設定で、既存のリバースプライベートエンドポイントを管理できます。 - -1. サイドバーで `Settings` ボタンを見つけ、クリックします。 - - - -2. `ClickPipe reverse private endpoints` セクションで `Reverse private endpoints` をクリックします。 - - - - リバースプライベートエンドポイントの詳細情報がフライアウトに表示されます。 - - ここからエンドポイントを削除できます。これにより、このエンドポイントを使用する全てのClickPipesに影響を与えます。 - -## サポートされているAWSリージョン {#aws-privatelink-regions} - -次のAWSリージョンがAWS PrivateLinkでサポートされています。 - -- `us-east-1` - `us-east-1`リージョンで実行されているClickHouseサービス用 -- `eu-central-1` - EUリージョンで実行されているClickHouseサービス用 -- `us-east-2` - その他のすべての場所で実行されているClickHouseサービス用 - -この制限は、クロスリージョン接続をサポートするため、PrivateLink VPCエンドポイントサービスタイプには適用されません。 - -## 制限事項 {#limitations} - -ClickHouse Cloudで作成されたClickPipes用のAWS PrivateLinkエンドポイントは、ClickHouse Cloudサービスと同じAWSリージョンで作成されることが保証されていません。 - -現在、VPCエンドポイントサービスのみがクロスリージョン接続をサポートしています。 - -プライベートエンドポイントは特定のClickHouseサービスにリンクされており、サービス間で転送することはできません。 -単一のClickHouseサービスに対して複数のClickPipesが同じエンドポイントを再利用することができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md.hash deleted file mode 100644 index 282ec17ab29..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/aws-privatelink.md.hash +++ /dev/null @@ -1 +0,0 @@ -70c753afe0a0660c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md deleted file mode 100644 index fa9790fb54b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -sidebar_label: 'はじめに' -description: '外部データソースをClickHouse Cloudにシームレスに接続します。' -slug: '/integrations/clickpipes' -title: 'Integrating with ClickHouse Cloud' ---- - -import Kafkasvg from '@site/static/images/integrations/logos/kafka.svg'; -import Confluentsvg from '@site/static/images/integrations/logos/confluent.svg'; -import Msksvg from '@site/static/images/integrations/logos/msk.svg'; -import Azureeventhubssvg from '@site/static/images/integrations/logos/azure_event_hubs.svg'; -import Warpstreamsvg from '@site/static/images/integrations/logos/warpstream.svg'; -import S3svg from '@site/static/images/integrations/logos/amazon_s3_logo.svg'; -import Amazonkinesis from '@site/static/images/integrations/logos/amazon_kinesis_logo.svg'; -import Gcssvg from '@site/static/images/integrations/logos/gcs.svg'; -import DOsvg from '@site/static/images/integrations/logos/digitalocean.svg'; -import ABSsvg from '@site/static/images/integrations/logos/azureblobstorage.svg'; -import Postgressvg from '@site/static/images/integrations/logos/postgresql.svg'; -import Mysqlsvg from '@site/static/images/integrations/logos/mysql.svg'; -import redpanda_logo from '@site/static/images/integrations/logos/logo_redpanda.png'; -import clickpipes_stack from '@site/static/images/integrations/data-ingestion/clickpipes/clickpipes_stack.png'; -import cp_custom_role from '@site/static/images/integrations/data-ingestion/clickpipes/cp_custom_role.png'; -import Image from '@theme/IdealImage'; - - -# ClickHouse Cloudとの統合 - -## はじめに {#introduction} - -[ClickPipes](/integrations/clickpipes) は、さまざまなソースからのデータを簡単にインジェストできる管理された統合プラットフォームです。最も要求の厳しいワークロード向けに設計されたClickPipesの堅牢でスケーラブルなアーキテクチャは、一貫したパフォーマンスと信頼性を確保します。ClickPipesは、長期的なストリーミングニーズや一回限りのデータロードジョブに使用できます。 - - - -## サポートされているデータソース {#supported-data-sources} - -| 名前 | ロゴ | タイプ | ステータス | 説明 | -|------------------------|--------------------------------------------------------------------------------------------------|-------------|------------------|------------------------------------------------------------------------------------------------------| -| Apache Kafka | | ストリーミング | 安定 | ClickPipesを設定し、Apache KafkaからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| Confluent Cloud | | ストリーミング | 安定 | ConfluentとClickHouse Cloudの統合による強力な機能を解放します。 | -| Redpanda | | ストリーミング | 安定 | ClickPipesを設定し、RedpandaからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| AWS MSK | | ストリーミング | 安定 | ClickPipesを設定し、AWS MSKからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| Azure Event Hubs | | ストリーミング | 安定 | ClickPipesを設定し、Azure Event HubsからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| WarpStream | | ストリーミング | 安定 | ClickPipesを設定し、WarpStreamからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| Amazon S3 | | オブジェクトストレージ | 安定 | ClickPipesを設定し、オブジェクトストレージから大量のデータをインジェストします。 | -| Google Cloud Storage | | オブジェクトストレージ | 安定 | ClickPipesを設定し、オブジェクトストレージから大量のデータをインジェストします。 | -| DigitalOcean Spaces | | オブジェクトストレージ | 安定 | ClickPipesを設定し、オブジェクトストレージから大量のデータをインジェストします。 | -| Azure Blob Storage | | オブジェクトストレージ | プライベートベータ | ClickPipesを設定し、オブジェクトストレージから大量のデータをインジェストします。 | -| Amazon Kinesis | | ストリーミング | 安定 | ClickPipesを設定し、Amazon KinesisからClickHouse Cloudへのストリーミングデータをインジェスト開始します。 | -| Postgres | | DBMS | パブリックベータ | ClickPipesを設定し、PostgresからClickHouse Cloudへのデータをインジェスト開始します。 | -| MySQL | | DBMS | プライベートベータ | ClickPipesを設定し、MySQLからClickHouse Cloudへのデータをインジェスト開始します。 | - - -より多くのコネクタがClickPipesに追加されます。詳細については、[お問い合せ](https://clickhouse.com/company/contact?loc=clickpipes)ください。 - -## スタティックIPのリスト {#list-of-static-ips} - -以下は、ClickPipesが外部サービスに接続するために使用する静的NAT IP(地域別に分けたもの)です。関連するインスタンスの地域のIPをIP許可リストに追加して、トラフィックを許可してください。 -インスタンスの地域がここにリストされていない場合は、デフォルトの地域にフォールバックします: - -- **eu-central-1** EU地域用 -- **us-east-1** `us-east-1`のインスタンス用 -- **us-east-2** その他すべての地域用 - -| ClickHouse Cloud地域 | IPアドレス | -|-------------------------|-----------------------------------------| -| **eu-central-1** | `18.195.233.217`, `3.127.86.90`, `35.157.23.2`, `18.197.167.47`, `3.122.25.29`, `52.28.148.40` | -| **us-east-2** | `3.131.130.196`, `3.23.172.68`, `3.20.208.150`, `3.132.20.192`, `18.119.76.110`, `3.134.185.180` | -| **us-east-1** | `54.82.38.199`, `3.90.133.29`, `52.5.177.8`, `3.227.227.145`, `3.216.6.184`, `54.84.202.92`, `3.131.130.196`, `3.23.172.68`, `3.20.208.150` | - -## ClickHouse設定の調整 {#adjusting-clickhouse-settings} -ClickHouse Cloudは、ほとんどのユースケースに対して合理的なデフォルトを提供します。ただし、ClickPipesの宛先テーブルのためにいくつかのClickHouse設定を調整する必要がある場合は、ClickPipes用の専用ロールが最も柔軟なソリューションです。 -手順: -1. カスタムロール `CREATE ROLE my_clickpipes_role SETTINGS ...` を作成します。[CREATE ROLE](/sql-reference/statements/create/role.md) 構文の詳細を参照してください。 -2. ClickPipesの作成中の「詳細と設定」ステップでカスタムロールをClickPipesユーザーに追加します。 - - - -## エラーレポート {#error-reporting} -ClickPipesは、宛先テーブルの隣に `_clickpipes_error` という接尾辞を持つテーブルを作成します。このテーブルには、ClickPipeの操作(ネットワーク、接続など)からのエラーや、スキーマに準拠していないデータが含まれます。エラー テーブルには [TTL](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl) が 7 日間設定されています。 -ClickPipesがデータソースまたは宛先に15分間接続できない場合、ClickPipesインスタンスは停止し、エラーテーブルに適切なメッセージを保存します(ClickHouseインスタンスが利用可能な場合)。 - -## よくある質問 {#faq} -- **ClickPipesとは何ですか?** - - ClickPipesは、ユーザーがClickHouseサービスを外部データソース、特にKafkaに接続しやすくするClickHouse Cloudの機能です。ClickPipesを使用すると、ユーザーは簡単にデータをClickHouseに継続的にロードし、リアルタイム解析のためにそれを利用可能にします。 - -- **ClickPipesはデータ変換をサポートしていますか?** - - はい、ClickPipesはDDL作成を公開することにより、基本的なデータ変換をサポートしています。これにより、ClickHouse Cloudサービスの宛先テーブルにデータがロードされる際に、より高度な変換を適用できます。ClickHouseの[マテリアライズドビュー機能](/guides/developer/cascading-materialized-views)を活用できます。 - -- **ClickPipesの使用には追加コストがかかりますか?** - - ClickPipesは、インジェストされたデータとコンピュートの二つの次元で請求されます。料金の詳細は[このページ](/cloud/manage/jan-2025-faq/pricing-dimensions#clickpipes-pricing-faq)で確認できます。ClickPipesを実行すると、宛先ClickHouse Cloudサービスでの間接的なコンピュートおよびストレージコストが発生する場合もあります。 - -- **Kafka用のClickPipesを使用する際のエラーや失敗を処理する方法はありますか?** - - はい、ClickPipes for Kafkaは、Kafkaからデータを消費する際に失敗した場合、自動的に再試行します。ClickPipesは、エラーと不正なデータを7日間保持する専用のエラーテーブルを有効にすることもサポートしています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md.hash deleted file mode 100644 index ee6efd7c3aa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -70a1761dd0f6cf00 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md deleted file mode 100644 index 310e98fe5ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md +++ /dev/null @@ -1,372 +0,0 @@ ---- -sidebar_label: 'ClickPipes for Kafka' -description: 'Seamlessly connect your Kafka data sources to ClickHouse Cloud.' -slug: '/integrations/clickpipes/kafka' -sidebar_position: 1 -title: 'Integrating Kafka with ClickHouse Cloud' ---- - -import Kafkasvg from '@site/static/images/integrations/logos/kafka.svg'; -import Confluentsvg from '@site/static/images/integrations/logos/confluent.svg'; -import Msksvg from '@site/static/images/integrations/logos/msk.svg'; -import Azureeventhubssvg from '@site/static/images/integrations/logos/azure_event_hubs.svg'; -import Warpstreamsvg from '@site/static/images/integrations/logos/warpstream.svg'; -import redpanda_logo from '@site/static/images/integrations/logos/logo_redpanda.png'; -import cp_service from '@site/static/images/integrations/data-ingestion/clickpipes/cp_service.png'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import cp_step1 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step1.png'; -import cp_step2 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step2.png'; -import cp_step3 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step3.png'; -import cp_step4a from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a.png'; -import cp_step4a3 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a3.png'; -import cp_step4b from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4b.png'; -import cp_step5 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step5.png'; -import cp_success from '@site/static/images/integrations/data-ingestion/clickpipes/cp_success.png'; -import cp_remove from '@site/static/images/integrations/data-ingestion/clickpipes/cp_remove.png'; -import cp_destination from '@site/static/images/integrations/data-ingestion/clickpipes/cp_destination.png'; -import cp_overview from '@site/static/images/integrations/data-ingestion/clickpipes/cp_overview.png'; -import Image from '@theme/IdealImage'; - - - -# KafkaとClickHouse Cloud の統合 -## 前提条件 {#prerequisite} -[ClickPipesの紹介](./index.md)に目を通しておいてください。 - -## 最初のKafka ClickPipeの作成 {#creating-your-first-kafka-clickpipe} - -1. ClickHouse CloudサービスのSQLコンソールにアクセスします。 - - - -2. 左側のメニューから`データソース`ボタンを選択し、「ClickPipeをセットアップ」をクリックします。 - - - -3. データソースを選択します。 - - - -4. ClickPipeに名前、説明(オプション)、認証情報、その他の接続詳細を提供してフォームを記入します。 - - - -5. スキーマレジストリを設定します。有効なスキーマはAvroストリームには必須で、JSONにはオプションです。このスキーマは、選択されたトピック上で[AvroConfluent](../../../interfaces/formats.md/#data-format-avro-confluent)を解析したり、JSONメッセージを検証したりするために使用されます。 -- 解析できないAvroメッセージや検証に失敗したJSONメッセージはエラーを生成します。 -- スキーマレジストリの「ルート」パス。例えば、Confluent CloudのスキーマレジストリURLは`https://test-kk999.us-east-2.aws.confluent.cloud`のようなパスのないHTTPS URLです。ルートパスのみが指定されている場合、ステップ4で列名とタイプを決定するために使用されるスキーマは、サンプリングされたKafkaメッセージに埋め込まれたIDによって決定されます。 -- 数値スキーマIDによるスキーマドキュメントへのパス`/schemas/ids/[ID]`。スキーマIDを使用した完全なURLは`https://registry.example.com/schemas/ids/1000`です。 -- スキーマドキュメントへのサブジェクト名によるパス`/subjects/[subject_name]`。オプションで、特定のバージョンはURLに`/versions/[version]`を付加することで参照できます(そうでない場合、ClickPipesは最新バージョンを取得します)。スキーマサブジェクトを使用した完全なURLは`https://registry.example.com/subjects/events`または`https://registry/example.com/subjects/events/versions/4`です。 - -すべての場合において、ClickPipesはメッセージに埋め込まれたスキーマIDによって示された場合、レジストリから更新されたり異なるスキーマを自動的に取得します。埋め込まれたスキーマIDなしでメッセージが書き込まれた場合は、すべてのメッセージを解析するために特定のスキーマIDまたはサブジェクトを指定する必要があります。 - -6. トピックを選択すると、UIにそのトピックのサンプルドキュメントが表示されます。 - - - -7. 次のステップでは、新しいClickHouseテーブルにデータを取り込むか、既存のテーブルを再利用するかを選択できます。画面の指示に従って、テーブル名、スキーマ、設定を変更してください。上部のサンプルテーブルで自分の変更をリアルタイムでプレビューできます。 - - - - 提供されたコントロールを使用して高度な設定をカスタマイズすることもできます。 - - - -8. または、既存のClickHouseテーブルにデータを取り込む決定をすることもできます。その場合、UIはソースからのフィールドを選択した宛先テーブル内のClickHouseフィールドにマッピングすることを許可します。 - - - -9. 最後に、内部ClickPipesユーザーの権限を設定できます。 - - **権限:** ClickPipesは、宛先テーブルにデータを書き込むための専用ユーザーを作成します。この内部ユーザーに対して、カスタムロールまたは事前定義されたロールの一つを選択できます: - - `フルアクセス`: クラスターへのフルアクセスを持つ。Materialized ViewやDictionaryを宛先テーブルと共に使用する場合に便利です。 - - `宛先テーブルのみ`: 宛先テーブルにのみ`INSERT`権限を持つ。 - - - -10. 「セットアップを完了する」をクリックすると、システムがClickPipeを登録し、サマリーテーブルに表示されるようになります。 - - - - - - サマリーテーブルは、ClickHouseのソースまたは宛先テーブルからサンプルデータを表示するためのコントロールを提供します。 - - - - また、ClickPipeを削除し、取り込みジョブの概要を表示するためのコントロールもあります。 - - - -11. **おめでとうございます!** あなたは最初のClickPipeを成功裏にセットアップしました。これがストリーミングClickPipeである場合は、リモートデータソースからリアルタイムでデータを取り込み続けます。 - -## サポートされているデータソース {#supported-data-sources} - -| 名前 | ロゴ | タイプ | ステータス | 説明 | -|------------------------|------|--------|-------------------|------------------------------------------------------------------------------------------------| -| Apache Kafka || ストリーミング | 安定 | ClickPipesを設定し、Apache KafkaからClickHouse Cloudにストリーミングデータを取り込むことができます。 | -| Confluent Cloud || ストリーミング | 安定 | ConfluentとClickHouse Cloudの組み合わせの力を、直接の統合で活用します。 | -| Redpanda || ストリーミング | 安定 | ClickPipesを設定し、RedpandaからClickHouse Cloudにストリーミングデータを取り込むことができます。 | -| AWS MSK || ストリーミング | 安定 | ClickPipesを設定し、AWS MSKからClickHouse Cloudにストリーミングデータを取り込むことができます。 | -| Azure Event Hubs || ストリーミング | 安定 | ClickPipesを設定し、Azure Event HubsからClickHouse Cloudにストリーミングデータを取り込むことができます。 | -| WarpStream || ストリーミング | 安定 | ClickPipesを設定し、WarpStreamからClickHouse Cloudにストリーミングデータを取り込むことができます。 | - -More connectors are will get added to ClickPipes, you can find out more by [contacting us](https://clickhouse.com/company/contact?loc=clickpipes). - -## サポートされているデータ形式 {#supported-data-formats} - -サポートされている形式は以下の通りです: -- [JSON](../../../interfaces/formats.md/#json) -- [AvroConfluent](../../../interfaces/formats.md/#data-format-avro-confluent) - -### サポートされているデータタイプ {#supported-data-types} - -現在、ClickPipesでサポートされているClickHouseのデータタイプは以下の通りです: - -- 基本的な数値型 - \[U\]Int8/16/32/64およびFloat32/64 -- 大きい整数型 - \[U\]Int128/256 -- 小数型 -- ブール型 -- 文字列 -- 固定文字列 -- 日付、Date32 -- 日時、DateTime64(UTCタイムゾーンのみ) -- Enum8/Enum16 -- UUID -- IPv4 -- IPv6 -- すべてのClickHouse LowCardinality型 -- 上記のタイプ(Nullableを含む)を使用したキーと値のあるMap -- 上記のタイプ(Nullableを含む、1レベル深さのみ)を要素として使用したTupleおよびArray - -### Avro {#avro} -#### サポートされているAvroデータタイプ {#supported-avro-data-types} - -ClickPipesはすべてのAvroプリミティブおよび複合タイプ、`time-millis`、`time-micros`、`local-timestamp-millis`、`local_timestamp-micros`、および`duration`以外のすべてのAvroロジカルタイプをサポートします。Avroの`record`タイプはTupleに変換され、`array`タイプはArrayに、`map`はMap(文字列キーのみ)に変換されます。一般的に、[ここ](/interfaces/formats/Avro#data-types-matching)で示された変換があります。ClickPipesはAvro数値型の正確なタイプマッチングを推奨します。ClickPipesは型変換時のオーバーフローや精度損失をチェックしません。 - -#### Nullable型とAvroユニオン {#nullable-types-and-avro-unions} - -AvroのNullableタイプは、`(T, null)`または`(null, T)`のユニオンスキーマを使用して定義され、ここでTは基本的なAvroタイプです。スキーマ推論中に、そのようなユニオンはClickHouseの「Nullable」カラムにマッピングされます。ClickHouseは `Nullable(Array)`、`Nullable(Map)`、または`Nullable(Tuple)`型をサポートしていないことに注意してください。これらの型のAvro nullユニオンは、非Nullableバージョンにマッピングされます(Avro Record型はClickHouseの名前付けされたTupleにマッピングされます)。これらの型のAvro「null」は次のように挿入されます: -- nullのAvro配列に対して空のArray -- nullのAvro Mapに対して空のMap -- nullのAvro Recordに対してすべてのデフォルト/ゼロ値を持つ名前付きTuple - -ClickPipesは、他のAvroユニオンが含まれるスキーマを現在サポートしていません(これは、ClickHouseの新しいVariantおよびJSONデータタイプが成熟するにつれて変更される可能性があります)。Avroスキーマに「非null」ユニオンが含まれている場合、ClickPipesはAvroスキーマとClickhouseカラムタイプ間のマッピングを計算しようとする際にエラーを生成します。 - -#### Avroスキーマ管理 {#avro-schema-management} - -ClickPipesは、各メッセージ/イベントに埋め込まれたスキーマIDを使用して、設定されたスキーマレジストリからAvroスキーマを動的に取得して適用します。スキーマの更新は自動的に検出され、処理されます。 - -現在、ClickPipesは[Confluent Schema Registry API](https://docs.confluent.io/platform/current/schema-registry/develop/api.html)を使用するスキーマレジストリとのみ互換性があります。Confluent KafkaおよびCloudの他に、Redpanda、AWS MSK、およびUpstashのスキーマレジストリも含まれます。ClickPipesは現在、AWS GlueスキーマレジストリまたはAzureスキーマレジストリとは互換性がありません(近日中に対応予定)。 - -取得したAvroスキーマとClickHouse宛先テーブル間のマッピングには以下のルールが適用されます: -- AvroスキーマがClickHouse宛先マッピングに含まれていないフィールドを含む場合、そのフィールドは無視されます。 -- AvroスキーマがClickHouse宛先マッピングで定義されたフィールドを欠いている場合、ClickHouseカラムは0や空文字列などの「ゼロ」値で埋められます。[DEFAULT](/sql-reference/statements/create/table#default)式は現在ClickPipesの挿入で評価されていないことに注意してください(これはClickHouseサーバーのデフォルト処理の更新を待っている一時的な制限です)。 -- AvroスキーマのフィールドとClickHouseカラムが互換性がない場合、その行/メッセージの挿入は失敗し、失敗はClickPipesエラーテーブルに記録されます。数値型間のようにいくつかの暗黙的な変換がサポートされていますが、すべてではありません(例えば、Avroの`record`フィールドは`Int32`のClickHouseカラムに挿入することはできません)。 - -## Kafka仮想カラム {#kafka-virtual-columns} - -Kafka互換のストリーミングデータソース用に以下の仮想カラムがサポートされています。新しい宛先テーブルを作成する際には、`カラムを追加`ボタンを使用して仮想カラムを追加できます。 - -| 名称 | 説明 | 推奨データタイプ | -|------------------|------------------------------------------------|-----------------------| -| _key | Kafkaメッセージキー | 文字列 | -| _timestamp | Kafkaタイムスタンプ(ミリ秒精度) | DateTime64(3) | -| _partition | Kafkaパーティション | Int32 | -| _offset | Kafkaオフセット | Int64 | -| _topic | Kafkaトピック | 文字列 | -| _header_keys | レコードヘッダ内のキーの並列配列 | Array(文字列) | -| _header_values | レコードヘッダ内のヘッダの並列配列 | Array(文字列) | -| _raw_message | 完全なKafkaメッセージ | 文字列 | - -_raw_messageカラムは、JSONデータに対してのみ推奨されます。JSON文字列のみが必要なユースケース(例:ClickHouseの[`JsonExtract*`](/sql-reference/functions/json-functions#jsonextract-functions)関数を使用して下流のマテリアライズドビューを埋めるなど)では、すべての「非仮想」カラムを削除するとClickPipesのパフォーマンスが向上する可能性があります。 - -## 制限事項 {#limitations} - -- [DEFAULT](/sql-reference/statements/create/table#default)はサポートされていません。 - -## 配信のセマンティクス {#delivery-semantics} -Kafka向けのClickPipesは`少なくとも一度`の配信セマンティクスを提供します(最も一般的に使用されるアプローチの一つとして)。配信セマンティクスについてのフィードバックがある場合は[お問い合わせフォーム](https://clickhouse.com/company/contact?loc=clickpipes)までお知らせください。正確に一度のセマンティクスが必要な場合は、公式の[`clickhouse-kafka-connect`](https://clickhouse.com/blog/real-time-event-streaming-with-kafka-connect-confluent-cloud-clickhouse)シンクを使用することをお勧めします。 - -## 認証 {#authentication} -Apache Kafkaプロトコルデータソースに対して、ClickPipesはTLS暗号化を伴う[SASL/PLAIN](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_plain.html)認証や`SASL/SCRAM-SHA-256`、`SASL/SCRAM-SHA-512`をサポートします。ストリーミングソース(Redpanda、MSKなど)に応じて、互換性に基づきこれらの認証メカニズムの全てまたは一部が有効になります。認証要件が異なる場合は、ぜひ[フィードバックをお寄せください](https://clickhouse.com/company/contact?loc=clickpipes)。 - -### IAM {#iam} - -:::info -MSK ClickPipe用のIAM認証はベータ機能です。 -::: - -ClickPipesは、以下のAWS MSK認証をサポートしています。 - - - [SASL/SCRAM-SHA-512](https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html)認証 - - [IAM資格情報またはロールベースのアクセス](https://docs.aws.amazon.com/msk/latest/developerguide/how-to-use-iam-access-control.html)認証 - -MSKブローカーに接続するためにIAM認証を使用する場合、IAMロールは必要な権限を持っている必要があります。 -以下は、MSKのApache Kafka APIに必要なIAMポリシーの例です。 - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kafka-cluster:Connect" - ], - "Resource": [ - "arn:aws:kafka:us-west-2:12345678912:cluster/clickpipes-testing-brokers/b194d5ae-5013-4b5b-ad27-3ca9f56299c9-10" - ] - }, - { - "Effect": "Allow", - "Action": [ - "kafka-cluster:DescribeTopic", - "kafka-cluster:ReadData" - ], - "Resource": [ - "arn:aws:kafka:us-west-2:12345678912:topic/clickpipes-testing-brokers/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "kafka-cluster:AlterGroup", - "kafka-cluster:DescribeGroup" - ], - "Resource": [ - "arn:aws:kafka:us-east-1:12345678912:group/clickpipes-testing-brokers/*" - ] - } - ] -} -``` - -#### 信頼関係の設定 {#configuring-a-trusted-relationship} - -もしIAMロールARNでMSKに認証をする場合、ロールが引き受けられるようにClickHouse Cloudインスタンスとの間に信頼関係を追加する必要があります。 - -:::note -ロールベースのアクセスは、AWSにデプロイされたClickHouse Cloudインスタンスのみ機能します。 -::: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - ... - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::12345678912:role/CH-S3-your-clickhouse-cloud-role" - }, - "Action": "sts:AssumeRole" - }, - ] -} -``` - -### カスタム証明書 {#custom-certificates} -Kafka向けのClickPipesは、SASLおよびパブリックSSL/TLS証明書を持つKafkaブローカー用のカスタム証明書のアップロードをサポートしています。ClickPipeセットアップのSSL証明書セクションで証明書をアップロードできます。 -:::note -Kafka用のSASLと共に単一のSSL証明書のアップロードをサポートしていますが、相互TLS(mTLS)によるSSLは現在サポートされていないことに注意してください。 -::: - -## パフォーマンス {#performance} - -### バッチ処理 {#batching} -ClickPipesはClickHouseにバッチでデータを挿入します。データベース内に過剰なパーツが作成されるのを避け、クラスターのパフォーマンス問題を引き起こす可能性があるためです。 - -バッチは、以下のいずれかの基準が満たされたときに挿入されます: -- バッチサイズが最大サイズ(100,000行または20MB)に達した場合 -- バッチが最大の時間(5秒)オープンしていた場合 - -### レイテンシ {#latency} - -レイテンシ(Kafkaメッセージが生成されてからメッセージがClickHouseで使用可能になるまでの時間)は、ブローカーレイテンシ、ネットワークレイテンシ、メッセージサイズ/フォーマットなどの多くの要因に依存します。上記の[バッチ処理](#batching)はレイテンシにも影響を与えます。特定の負荷でのユースケースをテストし、期待されるレイテンシを確認することを常に推奨します。 - -ClickPipesはレイテンシに関して何の保証も提供しません。特定の低レイテンシ要件がある場合は、[お問い合わせください](https://clickhouse.com/company/contact?loc=clickpipes)。 - -### スケーリング {#scaling} - -Kafka向けのClickPipesは水平スケーリングを設計されています。デフォルトでは、1つのコンシューマを持つコンシューマグループを作成します。 -これは、ClickPipe詳細ビューのスケーリングコントロールで変更可能です。 - -ClickPipesは高可用性を提供し、アベイラビリティゾーン分散アーキテクチャを持っています。 -少なくとも二つのコンシューマへスケーリングすることが必要です。 - -起動しているコンシューマの数に関わらず、故障耐性は設計上提供されています。 -コンシューマまたはその基盤インフラストラクチャが失敗した場合、ClickPipeは自動的にコンシューマを再起動し、メッセージの処理を続行します。 - -## F.A.Q {#faq} - -### 一般的な問い合わせ {#general} - -- **Kafka向けのClickPipesはどのように機能しますか?** - - ClickPipesは、指定されたトピックからデータを読み取るためのKafkaコンシューマAPIを実行する専用のアーキテクチャを使用し、データを特定のClickHouse Cloudサービス内のClickHouseテーブルに挿入します。 - -- **ClickPipesとClickHouse Kafkaテーブルエンジンの違いは何ですか?** - - Kafkaテーブルエンジンは、ClickHouseサーバー自体がKafkaに接続し、イベントを引き出して書き込む「プルモデル」を実装するClickHouseのコア機能です。 - - ClickPipesはClickHouseサービスとは独立して動作する別のクラウドサービスで、Kafka(または他のデータソース)に接続し、関連するClickHouse Cloudサービスにイベントをプッシュします。この分離されたアーキテクチャは、優れた運用柔軟性、明確な関心の分離、スケーラブルな取り込み、優雅な失敗管理、拡張性などを可能にします。 - -- **Kafka向けのClickPipesを使用するための要件は何ですか?** - - Kafka向けのClickPipesを使用するには、稼働中のKafkaブローカーとClickPipesが有効化されたClickHouse Cloudサービスが必要です。ClickHouse CloudがKafkaブローカーにアクセスできることも確認する必要があります。このためには、Kafka側でリモート接続を許可し、Kafka設定内で[ClickHouse CloudのエグレスIPアドレス](/manage/security/cloud-endpoints-api)をホワイトリストに追加します。 - -- **Kafka向けのClickPipesはAWS PrivateLinkをサポートしていますか?** - - AWS PrivateLinkはサポートされています。詳細については[お問い合わせください](https://clickhouse.com/company/contact?loc=clickpipes)。 - -- **ClickPipes for Kafkaを使用してKafkaトピックにデータを書き込むことはできますか?** - - いいえ、ClickPipes for KafkaはKafkaトピックからデータを読み取るように設計されており、それらに書き込むことはできません。Kafkaトピックにデータを書き込むには、専用のKafkaプロデューサを使用する必要があります。 - -- **ClickPipesは複数のブローカーをサポートしていますか?** - - はい、ブローカーが同じクォーラムの一部であれば、`,`で区切って一緒に設定できます。 - -### Upstash {#upstash} - -- **ClickPipesはUpstashをサポートしていますか?** - - はい。Upstash Kafka製品は2024年9月11日に廃止期間に入り、6か月間継続します。既存の顧客は、ClickPipesを使用して既存のUpstash Kafkaブローカーを利用することができます。廃止通知前の既存のUpstash Kafka ClickPipesには影響がありません。廃止期間が終了すると、ClickPipeは機能しなくなります。 - -- **ClickPipesはUpstashスキーマレジストリをサポートしていますか?** - - いいえ。ClickPipesはUpstash Kafkaスキーマレジストリとは互換性がありません。 - -- **ClickPipesはUpstash QStashワークフローをサポートしていますか?** - - いいえ。QStashワークフローにKafka互換のインターフェースが導入されない限り、Kafka ClickPipesでは機能しません。 - -### Azure EventHubs {#azure-eventhubs} - -- **Azure Event Hubs ClickPipeはKafkaインターフェースなしで機能しますか?** - - いいえ。ClickPipesはAzure Event HubsにKafkaインターフェースが有効である必要があります。Kafkaプロトコルは、Standard、Premium、およびDedicated SKUの価格帯でのみサポートされています。 - -- **AzureスキーマレジストリはClickPipesと互換性がありますか?** - - いいえ。ClickPipesは現在、Event Hubsスキーマレジストリとは互換性がありません。 - -- **Azure Event Hubsからデータを消費するために私のポリシーにはどのような権限が必要ですか?** - - トピックをリストし、イベントを消費するには、ClickPipesに与えられる共有アクセスポリシーには、少なくとも「リッスン」クレームが必要です。 - -- **なぜ私のEvent Hubsがデータを返さないのですか?** - - ClickHouseインスタンスがEvent Hubsデプロイメントと異なるリージョンまたは大陸にある場合、ClickPipesのオンボーディング時にタイムアウトが発生し、Event Hubからデータを消費する際にレイテンシが高くなる可能性があります。ClickHouse CloudデプロイメントとAzure Event Hubsデプロイメントを近いクラウドリージョン内に配置することが、パフォーマンス低下を避けるためのベストプラクティスと見なされます。 - -- **Azure Event Hubsにポート番号を含める必要がありますか?** - - はい。ClickPipesは、Kafkaインターフェースのポート番号を含めることを期待しています。ポート番号は`:9093`です。 - -- **ClickPipes IPはまだAzure Event Hubsに関連していますか?** - - はい。Event Hubsインスタンスへのトラフィックを制限する場合は、[ドキュメント化された静的NAT IP](./index.md#list-of-static-ips)を追加してください。 - -- **接続文字列はEvent Hub用ですか、それともEvent Hubネームスペース用ですか?** - - どちらでも機能しますが、複数のEvent Hubsからサンプルを取得するためにネームスペースレベルで共有アクセスポリシーを使用することをお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md.hash deleted file mode 100644 index e948b1bc0b3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kafka.md.hash +++ /dev/null @@ -1 +0,0 @@ -19b4c0da6155f29f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md deleted file mode 100644 index 535e6bbeac5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -sidebar_label: 'ClickPipes for Amazon Kinesis' -description: 'Seamlessly connect your Amazon Kinesis data sources to ClickHouse - Cloud.' -slug: '/integrations/clickpipes/kinesis' -title: 'Integrating Amazon Kinesis with ClickHouse Cloud' ---- - -import cp_service from '@site/static/images/integrations/data-ingestion/clickpipes/cp_service.png'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import cp_step1 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step1.png'; -import cp_step2_kinesis from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step2_kinesis.png'; -import cp_step3_kinesis from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step3_kinesis.png'; -import cp_step4a from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a.png'; -import cp_step4a3 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a3.png'; -import cp_step4b from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4b.png'; -import cp_step5 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step5.png'; -import cp_success from '@site/static/images/integrations/data-ingestion/clickpipes/cp_success.png'; -import cp_remove from '@site/static/images/integrations/data-ingestion/clickpipes/cp_remove.png'; -import cp_destination from '@site/static/images/integrations/data-ingestion/clickpipes/cp_destination.png'; -import cp_overview from '@site/static/images/integrations/data-ingestion/clickpipes/cp_overview.png'; -import Image from '@theme/IdealImage'; - - - -# Amazon KinesisとClickHouse Cloudの統合 -## 前提条件 {#prerequisite} -[ClickPipesの紹介](./index.md)に目を通し、[IAM認証情報](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)または[IAMロール](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)を設定しました。ClickHouse Cloudと連携するロールの設定に関する情報は、[Kinesisロールベースアクセスガイド](./secure-kinesis.md)を参照してください。 - -## 最初のClickPipeを作成する {#creating-your-first-clickpipe} - -1. ClickHouse CloudサービスのSQLコンソールにアクセスします。 - - - -2. 左側のメニューから`Data Sources`ボタンを選択し、「ClickPipeの設定」をクリックします。 - - - -3. データソースを選択します。 - - - -4. ClickPipeの名前、説明(任意)、IAMロールまたは認証情報、及び他の接続の詳細を提供してフォームに記入します。 - - - -5. Kinesisストリームと開始オフセットを選択します。UIは選択したソース(Kafkaトピックなど)からのサンプルドキュメントを表示します。また、ClickPipeのパフォーマンスと安定性を向上させるために、KinesisストリームのEnhanced Fan-outを有効にすることもできます(Enhanced Fan-outの詳細は[こちら](https://aws.amazon.com/blogs/aws/kds-enhanced-fanout)にあります)。 - - - -6. 次のステップでは、新しいClickHouseテーブルにデータを取り込むか、既存のテーブルを再利用するかを選択できます。画面の指示に従ってテーブル名、スキーマ、および設定を変更してください。上部のサンプルテーブルでリアルタイムの変更プレビューを見ることができます。 - - - - また、提供されたコントロールを使用して高度な設定をカスタマイズすることもできます。 - - - -7. あるいは、既存のClickHouseテーブルにデータを取り込むことに決めることもできます。その場合、UIはソースのフィールドを選択した宛先テーブルのClickHouseフィールドにマッピングできるようにします。 - - - -8. 最後に、内部ClickPipesユーザーの権限を設定できます。 - - **権限:** ClickPipesは、宛先テーブルにデータを書き込むための専用ユーザーを作成します。この内部ユーザーのロールをカスタムロールまたは定義されたロールのいずれかから選択できます。 - - `フルアクセス`: クラスターへのフルアクセスを持ちます。これは、宛先テーブルにMaterialized ViewやDictionaryを使用する場合に役立ちます。 - - `宛先テーブルのみ`: 宛先テーブルに対する`INSERT`権限のみを持ちます。 - - - -9. 「セットアップ完了」をクリックすると、システムがClickPipeを登録し、概要テーブルに表示されます。 - - - - - - 概要テーブルは、ClickHouse内のソースまたは宛先テーブルからサンプルデータを表示するコントロールを提供します。 - - - - また、ClickPipeを削除したり、取り込みジョブの概要を表示したりするコントロールも提供します。 - - - -10. **おめでとうございます!** 最初のClickPipeを正常にセットアップしました。これがストリーミングClickPipeの場合、遠隔データソースからリアルタイムでデータを取り込むために継続的に実行されます。そうでない場合は、バッチを取り込み完了します。 - - -## サポートされるデータ形式 {#supported-data-formats} - -サポートされている形式は次のとおりです: -- [JSON](../../../interfaces/formats.md/#json) - -## サポートされるデータ型 {#supported-data-types} - -現在ClickPipesでサポートされているClickHouseのデータ型は次のとおりです: - -- 基本的な数値型 - \[U\]Int8/16/32/64およびFloat32/64 -- 大きな整数型 - \[U\]Int128/256 -- 小数型 -- ブール型 -- 文字列 -- 固定文字列 -- 日付、Date32 -- DateTime、DateTime64(UTCタイムゾーンのみ) -- Enum8/Enum16 -- UUID -- IPv4 -- IPv6 -- すべてのClickHouse LowCardinality型 -- 上記の型(Nullableを含む)を使用したキーと値のあるマップ -- 上記の型(Nullableを含む、一階層の深さのみ)を使用した要素のタプルと配列 - -## Kinesis仮想カラム {#kinesis-virtual-columns} - -Kinesisストリームのためにサポートされている仮想カラムは次のとおりです。新しい宛先テーブルを作成する際には、`Add Column`ボタンを使用して仮想カラムを追加できます。 - -| 名前 | 説明 | 推奨データ型 | -|--------------------|-------------------------------------------------------|-----------------------| -| _key | Kinesisパーティションキー | 文字列 | -| _timestamp | Kinesisおおよその到着タイムスタンプ(ミリ秒精度) | DateTime64(3) | -| _stream | Kinesisストリーム名 | 文字列 | -| _sequence_number | Kinesisシーケンス番号 | 文字列 | -| _raw_message | 完全なKinesisメッセージ | 文字列 | - -_raw_messageフィールドは、完全なKinesis JSONレコードが必要な場合(例:ClickHouseの[`JsonExtract*`](/sql-reference/functions/json-functions#jsonextract-functions)関数を使用して、下流のMaterialized Viewを構築する場合)に使用できます。このようなパイプの場合、すべての「非仮想」カラムを削除することでClickPipesのパフォーマンスが向上する可能性があります。 - -## 制限事項 {#limitations} - -- [DEFAULT](/sql-reference/statements/create/table#default)はサポートされていません。 - -## パフォーマンス {#performance} - -### バッチ処理 {#batching} -ClickPipesは、ClickHouseにデータをバッチで挿入します。これは、データベースに多くのパーツを生成してクラスターのパフォーマンス問題を引き起こさないようにするためです。 - -バッチは、次のいずれかの条件が満たされたときに挿入されます: -- バッチサイズが最大サイズ(100,000行または20MB)に達した -- バッチが最大時間(5秒)オープンのままであった - -### レイテンシ {#latency} - -レイテンシ(Kinesisメッセージがストリームに送信されてからメッセージがClickHouseで利用可能になるまでの時間)は、いくつかの要因(例:Kinesisレイテンシ、ネットワークレイテンシ、メッセージサイズ/形式)の影響を受けます。上記のセクションで説明した[バッチ処理](#batching)もレイテンシに影響を与えます。特定のユースケースをテストして、期待できるレイテンシを理解することをお勧めします。 - -特定の低レイテンシ要件がある場合は、[お問い合わせ](https://clickhouse.com/company/contact?loc=clickpipes)ください。 - -### スケーリング {#scaling} - -Kinesis向けのClickPipesは、水平にスケールするように設計されています。デフォルトでは、1つのコンシューマーを持つコンシューマーグループを作成します。これは、ClickPipeの詳細ビューのスケーリングコントロールで変更できます。 - -ClickPipesは、高可用性を提供する可用性ゾーン分散アーキテクチャを備えています。これには、少なくとも2つのコンシューマーが必要です。 - -動作中のコンシューマーの数に関係なく、フォールトトレランスは設計上提供されています。コンシューマーまたはその基盤となるインフラストラクチャが失敗すると、ClickPipeは自動的にコンシューマーを再起動し、メッセージの処理を続行します。 - -## 認証 {#authentication} - -Amazon Kinesisストリームにアクセスするには、[IAM認証情報](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)または[IAMロール](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)を使用できます。IAMロールの設定方法についての詳細は、ClickHouse Cloudと連携するロールの設定に関する情報を[こちらのガイド](./secure-kinesis.md)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md.hash deleted file mode 100644 index 42c41876480..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/kinesis.md.hash +++ /dev/null @@ -1 +0,0 @@ -8b120e72781a006c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md deleted file mode 100644 index da4745c7f37..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'ClickPipes for MySQL: Supported data types' -slug: '/integrations/clickpipes/mysql/datatypes' -description: 'Page describing MySQL ClickPipe datatype mapping from MySQL to ClickHouse' ---- - - - -Here is the supported data-type mapping for the MySQL ClickPipe: - -| MySQL Type | ClickHouse type | Notes | -| -------------------------------------------------------------------------- | ------------------------------------------ | -------------------------------------------------------------------------------------- | -| Enum | LowCardinality(String) | | -| Set | String | | -| Decimal | Decimal | | -| TinyInt | Int8 | 符号なしをサポートしています。 | -| SmallInt | Int16 | 符号なしをサポートしています。 | -| MediumInt, Int | Int32 | 符号なしをサポートしています。 | -| BigInt | Int64 | 符号なしをサポートしています。 | -| Year | Int16 | | -| TinyText, Text, MediumText, LongText | String | | -| TinyBlob, Blob, MediumBlob, LongBlob | String | | -| Char, Varchar | String | | -| Binary, VarBinary | String | | -| TinyInt(1) | Bool | | -| JSON | String | MySQL専用; MariaDBの `json` は `text` のエイリアスで制約が付いています。 | -| Geometry & Geometry Types | String | WKT (Well-Known Text)。WKTは小さな精度損失を被る可能性があります。 | -| Vector | Array(Float32) | MySQL専用; MariaDBはサポートを近日中に追加予定です。 | -| Float | Float32 | 初期ロード中にClickHouseの精度がMySQLと異なる場合があります。テキストプロトコルによるため。 | -| Double | Float64 | 初期ロード中にClickHouseの精度がMySQLと異なる場合があります。テキストプロトコルによるため。 | -| Date | Date32 | | -| Time | DateTime64(6) | 日付部分はUnixエポックです。 | -| Datetime, Timestamp | DateTime64(6) | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md.hash deleted file mode 100644 index 7f86e11c7c4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/datatypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -4e5f7e6b08fd9969 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md deleted file mode 100644 index a29f26bf558..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -sidebar_label: 'FAQ' -description: 'Frequently asked questions about ClickPipes for MySQL.' -slug: '/integrations/clickpipes/mysql/faq' -sidebar_position: 2 -title: 'ClickPipes for MySQL FAQ' ---- - - - - -# ClickPipes for MySQL FAQ - -### MySQL ClickPipeはMariaDBをサポートしていますか? {#does-the-clickpipe-support-mariadb} -はい、MySQL ClickPipeはMariaDB 10.0以降をサポートしています。その設定はMySQLと非常に似ており、GTIDの動作はデフォルトで有効になっています。 - -### MySQL ClickPipeはPlanetscaleやVitessをサポートしていますか? {#does-the-clickpipe-support-planetscale-vitess} -現在、標準のMySQLのみをサポートしています。PlanetScaleはVitess上に構築されているため、VitessのVStream APIと統合し、VGtids (Vitess Global Transaction IDs) を処理して増分変更を追跡する必要があります。これは、ネイティブMySQLのCDCの動作とは異なります。この機能のサポートを追加するための作業が進められています。 - -### MySQLに接続するときにTLS証明書の検証エラーが表示されるのはなぜですか? {#tls-certificate-validation-error} -`failed to verify certificate: x509: certificate is not valid for any names`のようなエラーが表示された場合、これはMySQLサーバーのSSL/TLS証明書に接続ホスト名(例: EC2インスタンスのDNS名)が有効名のリストに含まれていないときに発生します。ClickPipesはデフォルトでTLSを有効にして、安全な暗号化接続を提供します。 - -この問題を解決するためには、以下の3つのオプションがあります: - -1. 接続設定でホスト名の代わりにIPアドレスを使用し、「TLS Host (optional)」フィールドを空のままにします。この方法は最も簡単ですが、ホスト名の検証をバイパスするため、最も安全な方法ではありません。 - -2. 「TLS Host (optional)」フィールドを、証明書のSubject Alternative Name (SAN)フィールドにある実際のホスト名と一致させるように設定します。これにより、適切な検証が維持されます。 - -3. 接続に使用している実際のホスト名を証明書に含めるようにMySQLサーバーのSSL証明書を更新します。 - -これは特に、クラウド環境にセルフホスティングされているデータベース(またはAWS Private Linkをエンドポイントサービス経由で使用している場合)に接続する際に、パブリックDNS名が証明書に記載のものと異なる場合に一般的な構成上の問題です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md.hash deleted file mode 100644 index e2c81d90942..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/faq.md.hash +++ /dev/null @@ -1 +0,0 @@ -07133862086c301d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md deleted file mode 100644 index f2342bb8f67..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -sidebar_label: 'ClickPipes for MySQL' -description: 'Describes how to seamlessly connect your MySQL to ClickHouse Cloud.' -slug: '/integrations/clickpipes/mysql' -title: 'Ingesting Data from MySQL to ClickHouse (using CDC)' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; -import cp_service from '@site/static/images/integrations/data-ingestion/clickpipes/cp_service.png'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import mysql_tile from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/mysql-tile.png' -import mysql_connection_details from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/mysql-connection-details.png' -import ssh_tunnel from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/ssh-tunnel.jpg' -import select_destination_db from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/select-destination-db.png' -import ch_permissions from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/ch-permissions.jpg' -import Image from '@theme/IdealImage'; - - -# MySQLからClickHouseへのCDCを使用したデータの取り込み - - - -:::info -現在、ClickPipesを介してClickHouse CloudにMySQLからデータを取り込むことはプライベートプレビュー中です。 -::: - -ClickPipesを使用して、ソースのMySQLデータベースからClickHouse Cloudにデータを取り込むことができます。ソースのMySQLデータベースは、オンプレミスまたはクラウドにホストされている可能性があります。 - -## 前提条件 {#prerequisites} - -まず、MySQLデータベースが正しく設定されていることを確認してください。ソースのMySQLインスタンスに応じて、次のガイドのいずれかに従ってください。 - -1. [Amazon RDS MySQL](./mysql/source/rds) - -2. [Amazon Aurora MySQL](./mysql/source/aurora) - -3. [Cloud SQL for MySQL](./mysql/source/gcp) - -4. [Amazon RDS MariaDB](./mysql/source/rds_maria) - -ソースのMySQLデータベースが設定されたら、ClickPipeの作成を続けることができます。 - -## ClickPipeを作成する {#creating-your-clickpipe} - -ClickHouse Cloudアカウントにログインしていることを確認してください。まだアカウントをお持ちでない場合は、[こちらからサインアップ](https://cloud.clickhouse.com/)できます。 - -[//]: # ( TODO update image here) -1. ClickHouse Cloudコンソールで、ClickHouse Cloudサービスに移動します。 - - - -2. 左側のメニューから`データソース`ボタンを選択し、「ClickPipeを設定」をクリックします。 - - - -3. `MySQL CDC`タイルを選択します。 - - - -### ソースのMySQLデータベース接続を追加する {#adding-your-source-mysql-database-connection} - -4. 前提条件のステップで構成したソースのMySQLデータベースの接続詳細を入力します。 - - :::info - - 接続詳細を追加する前に、ファイアウォールルールでClickPipesのIPアドレスをホワイトリストに登録していることを確認してください。次のページには、[ClickPipesのIPアドレスのリスト](../index.md#list-of-static-ips)があります。 - 詳細については、[このページの上部](#prerequisites)にリンクされているソースのMySQL設定ガイドを参照してください。 - - ::: - - - -#### (オプション) SSHトンネリングの設定 {#optional-setting-up-ssh-tunneling} - -ソースのMySQLデータベースが公開アクセスできない場合は、SSHトンネリングの詳細を指定できます。 - -1. 「SSHトンネリングを使用する」トグルを有効にします。 -2. SSH接続詳細を入力します。 - - - -3. キーベースの認証を使用する場合は、「キーペアを取り消して生成」をクリックして新しいキーを生成し、生成された公開キーをSSHサーバーの`~/.ssh/authorized_keys`にコピーします。 -4. 「接続を確認」をクリックして接続を確認します。 - -:::note - -ClickPipesがSSHトンネルを確立できるように、SSHバスチオンホストのファイアウォールルールで[ClickPipesのIPアドレス](../clickpipes#list-of-static-ips)をホワイトリストに登録してください。 - -::: - -接続詳細が入力されたら、「次へ」をクリックします。 - -#### 詳細設定の構成 {#advanced-settings} - -必要に応じて詳細設定を構成できます。各設定の簡単な説明は以下の通りです。 - -- **同期間隔**: これはClickPipesがソースデータベースをポーリングして変更を確認する間隔です。コストに敏感なユーザーには、これを高い値(`3600`を超える)に設定することを推奨します。 -- **初回ロードのための並列スレッド**: これは初回スナップショットを取得するために使用される並列作業者の数です。テーブルの数が多い場合に、初回スナップショットを取得するために使用される並列作業者の数を制御するのに役立ちます。この設定はテーブルごとです。 -- **プルバッチサイズ**: 単一バッチで取得する行の数です。これは最善の努力としての設定であり、すべてのケースで適用されるとは限りません。 -- **スナップショットごとのパーティションの行数**: 初回スナップショット中に各パーティションで取得される行の数です。テーブルに多くの行がある場合に、各パーティションで取得される行の数を制御するのに役立ちます。 -- **スナップショットのテーブル数**: 初回スナップショット中に並列で取得されるテーブルの数です。テーブルの数が多い場合に、並列で取得されるテーブルの数を制御するのに役立ちます。 - -### テーブルの構成 {#configuring-the-tables} - -5. ここで、ClickPipeの宛先データベースを選択できます。既存のデータベースを選択するか、新しいデータベースを作成することができます。 - - - -6. ソースのMySQLデータベースからレプリケートしたいテーブルを選択できます。テーブルを選択する際に、宛先のClickHouseデータベースでテーブルの名称を変更したり、特定のカラムを除外することも可能です。 - -### 権限を確認してClickPipeを開始する {#review-permissions-and-start-the-clickpipe} - -7. 権限のドロップダウンから「フルアクセス」ロールを選択し、「セットアップを完了」をクリックします。 - - - -最後に、一般的な問題とその解決方法についての詳細は、["ClickPipes for MySQLFAQ"](/integrations/clickpipes/mysql/faq)ページを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md.hash deleted file mode 100644 index 4e37eb87f20..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -f283c3bb4c6b952d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md deleted file mode 100644 index e81550107be..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -sidebar_label: 'Amazon Aurora MySQL' -description: 'ClickPipesのソースとしてAmazon Aurora MySQLを設定する手順についての詳細ガイド' -slug: '/integrations/clickpipes/mysql/source/aurora' -title: 'Aurora MySQLのソース設定ガイド' ---- - -import rds_backups from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/rds-backups.png'; -import parameter_group_in_blade from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/parameter_group_in_blade.png'; -import security_group_in_rds_mysql from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/security-group-in-rds-mysql.png'; -import edit_inbound_rules from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/edit_inbound_rules.png'; -import aurora_config from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/rds_config.png'; -import binlog_format from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_format.png'; -import binlog_row_image from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_image.png'; -import binlog_row_metadata from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_metadata.png'; -import edit_button from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/edit_button.png'; -import enable_gtid from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/enable_gtid.png'; -import Image from '@theme/IdealImage'; - - -# Aurora MySQL ソースセットアップガイド - -これは MySQL ClickPipe を介してデータを複製するために、Aurora MySQL インスタンスを設定する手順を示すガイドです。 -
-:::info -MySQL の FAQ も [こちら](/integrations/data-ingestion/clickpipes/mysql/faq.md)でご確認いただくことをお勧めします。FAQ ページは積極的に更新されています。 -::: - -## バイナリログ保持の有効化 {#enable-binlog-retention-aurora} -バイナリログは、MySQL サーバーインスタンスに対するデータ変更に関する情報を含むログファイルのセットであり、複製にはバイナリログファイルが必要です。以下の 2 つのステップを実行する必要があります。 - -### 1. 自動バックアップによるバイナリログの有効化 {#enable-binlog-logging-aurora} -自動バックアップ機能は、MySQL のバイナリログがオンかオフかを決定します。AWS コンソールで設定できます。 - - - -複製のユースケースに応じて、バックアップの保持期間を適切に長い値に設定することをお勧めします。 - -### 2. バイナリログ保持時間 {#binlog-retention-hours-aurora} -以下の手順を実行して、複製用のバイナリログの利用可能性を確保します: - -```text -mysql=> call mysql.rds_set_configuration('binlog retention hours', 24); -``` -この設定が行われていない場合、Amazon RDS は可能な限り早くバイナリログを削除し、バイナリログにギャップが生じます。 - -## パラメータグループでのバイナリログ設定の構成 {#binlog-parameter-group-aurora} - -RDS コンソールで MySQL インスタンスをクリックし、`Configurations` タブに移動すると、パラメータグループを見つけることができます。 - - - -パラメータグループのリンクをクリックすると、そのページに移動します。右上に編集ボタンが表示されます。 - - - -以下の設定を次のように設定する必要があります: - -1. `binlog_format` を `ROW` に設定します。 - - - -2. `binlog_row_metadata` を `FULL` に設定します。 - - - -3. `binlog_row_image` を `FULL` に設定します。 - - - -右上の `Save Changes` をクリックします。変更が反映されるにはインスタンスを再起動する必要がある場合があります。これを確認する方法は、RDS インスタンスの構成タブ内のパラメータグループリンクの横に `Pending reboot` と表示されることです。 -
-:::tip -MySQL クラスターがある場合、上記のパラメータは [DB クラスター](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.CreatingCluster.html) のパラメータグループにあり、DB インスタンスグループにはありません。 -::: - -## GTID モードの有効化 {#gtid-mode-aurora} -グローバルトランザクション識別子 (GTID) は、MySQL の各コミットされたトランザクションに割り当てられる一意の ID です。これによりバイナリログの複製が簡素化され、トラブルシューティングが容易になります。 - -MySQL インスタンスが MySQL 5.7、8.0、または 8.4 の場合は、MySQL ClickPipe が GTID 複製を使用できるように GTID モードを有効にすることをお勧めします。 - -MySQL インスタンスの GTID モードを有効にするための手順は次のとおりです: -1. RDS コンソールで MySQL インスタンスをクリックします。 -2. `Configurations` タブをクリックします。 -3. パラメータグループのリンクをクリックします。 -4. 右上の `Edit` ボタンをクリックします。 -5. `enforce_gtid_consistency` を `ON` に設定します。 -6. `gtid-mode` を `ON` に設定します。 -7. 右上の `Save Changes` をクリックします。 -8. 変更が反映されるためにインスタンスを再起動します。 - - - -
-:::info -MySQL ClickPipe は GTID モードなしでも複製をサポートしています。ただし、GTID モードを有効にすることでパフォーマンスが向上し、トラブルシューティングが容易になります。 -::: - -## データベースユーザーの構成 {#configure-database-user-aurora} - -管理ユーザーとして Aurora MySQL インスタンスに接続し、以下のコマンドを実行します: - -1. ClickPipes 用の専用ユーザーを作成します: - - ```sql - CREATE USER 'clickpipes_user'@'%' IDENTIFIED BY 'some-password'; - ``` - -2. スキーマ権限を付与します。以下の例では `mysql` データベースの権限を示しています。複製したい各データベースとホストに対してこれらのコマンドを繰り返します: - - ```sql - GRANT SELECT ON `mysql`.* TO 'clickpipes_user'@'host'; - ``` - -3. ユーザーにレプリケーション権限を付与します: - - ```sql - GRANT REPLICATION CLIENT ON *.* TO 'clickpipes_user'@'%'; - GRANT REPLICATION SLAVE ON *.* TO 'clickpipes_user'@'%'; - ``` - -## ネットワークアクセスの構成 {#configure-network-access} - -### IP ベースのアクセス制御 {#ip-based-access-control} - -Aurora インスタンスへのトラフィックを制限したい場合は、[ドキュメント化された静的 NAT IPs](../../index.md#list-of-static-ips)を Aurora セキュリティグループの `Inbound rules` に追加してください。以下のように表示されます: - - - - - -### AWS PrivateLink 経由のプライベートアクセス {#private-access-via-aws-privatelink} - -プライベートネットワークを介して Aurora インスタンスに接続するには、AWS PrivateLink を使用できます。接続の設定については、[ClickPipes 用の AWS PrivateLink セットアップガイド](/knowledgebase/aws-privatelink-setup-for-clickpipes)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md.hash deleted file mode 100644 index 250de9c74f2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/aurora.md.hash +++ /dev/null @@ -1 +0,0 @@ -4235ebf0a27f9b0f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md deleted file mode 100644 index f8c5470248b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -sidebar_label: 'Cloud SQL For MySQL' -description: 'ClickPipes のソースとして Cloud SQL for MySQL をセットアップする手順ガイド' -slug: '/integrations/clickpipes/mysql/source/gcp' -title: 'Cloud SQL for MySQL ソースセットアップガイド' ---- - -import gcp_pitr from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/gcp-mysql-pitr.png'; -import gcp_mysql_flags from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/gcp-mysql-flags.png'; -import gcp_mysql_ip from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/gcp-mysql-ip.png'; -import gcp_mysql_edit_button from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/gcp-mysql-edit-button.png'; -import gcp_mysql_cert from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/gcp-mysql-cert.png'; -import rootca from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/gcp/rootca.png'; -import Image from '@theme/IdealImage'; - - -# Cloud SQL for MySQL ソース設定ガイド - -これは、MySQL ClickPipeを介してデータの複製を行うためにあなたのCloud SQL for MySQLインスタンスを構成する手順を示したガイドです。 - -## バイナリログの保持を有効にする {#enable-binlog-retention-gcp} -バイナリログは、MySQLサーバーインスタンスに対して行われたデータの変更に関する情報を含むログファイルの集合であり、複製にはバイナリログファイルが必要です。 - -### PITRを介してバイナリログを有効にする {#enable-binlog-logging-gcp} -PITR機能は、Google CloudのMySQLに対してバイナリログのオンまたはオフを決定します。これはCloudコンソールで設定でき、Cloud SQLインスタンスを編集して下のセクションまでスクロールします。 - - - -複製のユースケースに応じて、適切に長い値に設定することが推奨されます。 - -まだ設定されていない場合は、Cloud SQLを編集してデータベースフラグセクションに次の設定を行ってください: -1. `binlog_expire_logs_seconds`を`86400`(1日)以上の値に設定します。 -2. `binlog_row_metadata`を`FULL`に設定します。 -3. `binlog_row_image`を`FULL`に設定します。 - -これを行うには、インスタンスの概要ページの右上隅にある`Edit`ボタンをクリックします。 - - -その後、`Flags`セクションまでスクロールして、上記のフラグを追加します。 - - - -## データベースユーザーの構成 {#configure-database-user-gcp} - -Cloud SQL MySQLインスタンスにrootユーザーとして接続し、次のコマンドを実行します: - -1. ClickPipes用の専用ユーザーを作成します: - - ```sql - CREATE USER 'clickpipes_user'@'host' IDENTIFIED BY 'some-password'; - ``` - -2. スキーマ権限を付与します。以下の例は`clickpipes`データベースの権限を示しています。複製したい各データベースとホストに対してこれらのコマンドを繰り返します: - - ```sql - GRANT SELECT ON `clickpipes`.* TO 'clickpipes_user'@'host'; - ``` - -3. ユーザーに複製権限を付与します: - - ```sql - GRANT REPLICATION CLIENT ON *.* TO 'clickpipes_user'@'%'; - GRANT REPLICATION SLAVE ON *.* TO 'clickpipes_user'@'%'; - ``` - -## ネットワークアクセスの構成 {#configure-network-access-gcp-mysql} - -Cloud SQLインスタンスへのトラフィックを制限したい場合は、[文書化された静的NAT IP](../../index.md#list-of-static-ips)をCloud SQL MySQLインスタンスの許可されたIPに追加してください。 -これはインスタンスを編集するか、Cloud Consoleのサイドバーの`Connections`タブに移動することで行えます。 - - - -## ルートCA証明書のダウンロードと使用 {#download-root-ca-certificate-gcp-mysql} -Cloud SQLインスタンスに接続するには、ルートCA証明書をダウンロードする必要があります。 - -1. Cloud ConsoleのCloud SQLインスタンスに移動します。 -2. サイドバーの`Connections`をクリックします。 -3. `Security`タブをクリックします。 -4. `Manage server CA certificates`セクションで、下部にある`DOWNLOAD CERTIFICATES`ボタンをクリックします。 - - - -5. ClickPipes UIで、新しいMySQL ClickPipeを作成するときに、ダウンロードした証明書をアップロードします。 - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md.hash deleted file mode 100644 index 62ce22d48bf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/gcp.md.hash +++ /dev/null @@ -1 +0,0 @@ -b0854b16a09439e5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md deleted file mode 100644 index 61b592b47ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -sidebar_label: 'Amazon RDS MySQL' -description: 'Step-by-step guide on how to set up Amazon RDS MySQL as a source for - ClickPipes' -slug: '/integrations/clickpipes/mysql/source/rds' -title: 'RDS MySQL source setup guide' ---- - -import rds_backups from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/rds-backups.png'; -import parameter_group_in_blade from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/parameter_group_in_blade.png'; -import security_group_in_rds_mysql from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/security-group-in-rds-mysql.png'; -import edit_inbound_rules from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/edit_inbound_rules.png'; -import rds_config from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/rds_config.png'; -import binlog_format from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_format.png'; -import binlog_row_image from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_image.png'; -import binlog_row_metadata from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_metadata.png'; -import edit_button from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/edit_button.png'; -import enable_gtid from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/enable_gtid.png'; -import Image from '@theme/IdealImage'; - - -# RDS MySQL ソース設定ガイド - -これは、RDS MySQL インスタンスを MySQL ClickPipe を介してデータを複製するように構成する手順です。 -
-:::info -MySQL FAQ を [こちら](/integrations/data-ingestion/clickpipes/mysql/faq.md) で確認することもお勧めします。FAQ ページはもりを積極的に更新されています。 -::: - -## バイナリログの保持を有効にする {#enable-binlog-retention-rds} -バイナリログは、MySQL サーバーインスタンスに対して行われたデータ変更に関する情報を含むログファイルのセットであり、レプリケーションにはバイナリログファイルが必要です。以下のステップの両方を実行する必要があります。 - -### 1. 自動バックアップを介してバイナリログを有効にする {#enable-binlog-logging-rds} -自動バックアップ機能は、MySQL のバイナリログがオンまたはオフになっているかを決定します。AWS コンソールで設定できます。 - - - -レプリケーションの使用ケースに応じて、バックアップの保持期間を適切に長い値に設定することをお勧めします。 - -### 2. バイナリログの保持時間 {#binlog-retention-hours-rds} -Amazon RDS for MySQL では、変更が含まれるバイナリログファイルが保持される時間を設定する方法が異なります。バイナリログファイルが削除される前に変更が読み取られない場合、レプリケーションは続行できなくなります。バイナリログの保持時間のデフォルト値は NULL であり、これはバイナリログが保持されていないことを意味します。 - -DB インスタンスのバイナリログを保持する時間を指定するには、mysql.rds_set_configuration 関数を使用し、レプリケーションが発生するために十分な長さのバイナリログ保持期間を設定します。`24 時間` が推奨される最小値です。 - -```text -mysql=> call mysql.rds_set_configuration('binlog retention hours', 24); -``` - -## パラメータグループでバイナリログ設定を構成する {#binlog-parameter-group-rds} - -パラメータグループは、RDS コンソールで MySQL インスタンスをクリックし、`Configurations` タブに移動すると見つけることができます。 - - - -パラメータグループのリンクをクリックすると、そのページに移動します。右上に編集ボタンがあります。 - - - -次の設定を次のように設定する必要があります。 - -1. `binlog_format` を `ROW` に設定します。 - - - -2. `binlog_row_metadata` を `FULL` に設定します。 - - - -3. `binlog_row_image` を `FULL` に設定します。 - - - -右上の `Save Changes` をクリックします。変更が有効になるためにはインスタンスを再起動する必要がある場合があります。この場合、RDS インスタンスの構成タブにあるパラメータグループリンクの横に `Pending reboot` という表示が見られます。 - -
-:::tip -MySQL クラスターを持っている場合、上記のパラメータは DB クラスター [DB Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.CreatingCluster.html)のパラメータグループに存在し、DB インスタンスグループではありません。 -::: - -## GTID モードを有効にする {#gtid-mode-rds} -グローバルトランザクション ID (GTID) は、MySQL の各コミットされたトランザクションに割り当てられるユニーク ID です。GTID はバイナリログのレプリケーションを簡素化し、トラブルシューティングをより簡単にします。 - -MySQL インスタンスが MySQL 5.7、8.0 または 8.4 の場合、MySQL ClickPipe が GTID レプリケーションを使用できるように GTID モードを有効にすることをお勧めします。 - -MySQL インスタンスの GTID モードを有効にするには、以下の手順に従ってください。 -1. RDS コンソールで MySQL インスタンスをクリックします。 -2. `Configurations` タブをクリックします。 -3. パラメータグループのリンクをクリックします。 -4. 右上隅の `Edit` ボタンをクリックします。 -5. `enforce_gtid_consistency` を `ON` に設定します。 -6. `gtid-mode` を `ON` に設定します。 -7. 右上隅の `Save Changes` をクリックします。 -8. 変更を有効にするためにインスタンスを再起動します。 - - - -
-:::tip -MySQL ClickPipe は GTID モードなしでのレプリケーションもサポートしています。ただし、GTID モードを有効にすることは、パフォーマンスの向上とトラブルシューティングの容易さのために推奨されます。 -::: - - -## データベースユーザーを構成する {#configure-database-user-rds} - -管理者ユーザーとして RDS MySQL インスタンスに接続し、以下のコマンドを実行します。 - -1. ClickPipes 用の専用ユーザーを作成します。 - - ```sql - CREATE USER 'clickpipes_user'@'host' IDENTIFIED BY 'some-password'; - ``` - -2. スキーマ権限を付与します。以下の例は `mysql` データベースの権限を示しています。複製したい各データベースおよびホストについて、これらのコマンドを繰り返します。 - - ```sql - GRANT SELECT ON `mysql`.* TO 'clickpipes_user'@'host'; - ``` - -3. ユーザーにレプリケーション権限を付与します。 - - ```sql - GRANT REPLICATION CLIENT ON *.* TO 'clickpipes_user'@'%'; - GRANT REPLICATION SLAVE ON *.* TO 'clickpipes_user'@'%'; - ``` - -## ネットワークアクセスを構成する {#configure-network-access} - -### IP ベースのアクセス制御 {#ip-based-access-control} - -RDS インスタンスへのトラフィックを制限したい場合は、RDS セキュリティグループの `Inbound rules` に [文書化された静的 NAT IPs](../../index.md#list-of-static-ips) を追加してください。 - - - - - -### AWS PrivateLink 経由のプライベートアクセス {#private-access-via-aws-privatelink} - -プライベートネットワークを介して RDS インスタンスに接続するには、AWS PrivateLink を使用できます。接続を設定するには、私たちの [ClickPipes 用の AWS PrivateLink 設定ガイド](/knowledgebase/aws-privatelink-setup-for-clickpipes) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md.hash deleted file mode 100644 index 7fc031b1396..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds.md.hash +++ /dev/null @@ -1 +0,0 @@ -d9e776f578a6c85a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md deleted file mode 100644 index 7e6956a2737..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -sidebar_label: 'Amazon RDS MariaDB' -description: 'ClickPipes のソースとして Amazon RDS MariaDB を設定する手順ガイド' -slug: '/integrations/clickpipes/mysql/source/rds_maria' -title: 'RDS MariaDB ソースセットアップガイド' ---- - -import rds_backups from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/rds-backups.png'; -import rds_config from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/rds_config.png'; -import edit_button from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/edit_button.png'; -import binlog_format from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_format.png'; -import binlog_row_image from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_image.png'; -import binlog_row_metadata from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/parameter_group/binlog_row_metadata.png'; -import security_group_in_rds_mysql from '@site/static/images/integrations/data-ingestion/clickpipes/mysql/source/rds/security-group-in-rds-mysql.png'; -import edit_inbound_rules from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/edit_inbound_rules.png'; -import Image from '@theme/IdealImage'; - - -# RDS MariaDB ソースセットアップガイド - -これは、MySQL ClickPipe を介してデータを複製するために RDS MariaDB インスタンスを構成する方法のステップバイステップガイドです。 -
-:::info -MySQL FAQ の確認もお勧めします [こちら](/integrations/data-ingestion/clickpipes/mysql/faq.md)。FAQ ページは定期的に更新されています。 -::: - -## バイナリログの保持を有効にする {#enable-binlog-retention-rds} -バイナリログは、MySQL サーバーインスタンスに対して行われたデータの変更に関する情報を含むログファイルのセットです。複製にはバイナリログファイルが必要です。以下の両方のステップを実行する必要があります: - -### 1. 自動バックアップ経由でバイナリログを有効にする {#enable-binlog-logging-rds} - -自動バックアップ機能は、MySQL のバイナリログがオンまたはオフになっているかどうかを決定します。AWS コンソールで設定できます: - - - -複製の使用ケースに応じて、バックアップの保持期間を適切な長さに設定することが推奨されます。 - -### 2. バイナリログ保持時間 {#binlog-retention-hours-rds} -Amazon RDS for MariaDB では、バイナリログの保持期間を設定するための異なる方法があり、これは変更を含むバイナリログファイルが保持される時間のことを指します。バイナリログファイルが削除される前に変更が読み取られない場合、複製は続行できなくなります。バイナリログ保持時間のデフォルト値は NULL で、これはバイナリログが保持されていないことを意味します。 - -DB インスタンスでバイナリログの保持時間を指定するには、mysql.rds_set_configuration 関数を使用し、複製が行われるのに十分なバイナリログ保持期間を指定します。推奨される最小値は「24 時間」です。 - -```text -mysql=> call mysql.rds_set_configuration('binlog retention hours', 24); -``` - -## パラメータグループでのバイナリログ設定の構成 {#binlog-parameter-group-rds} - -パラメータグループは、RDS コンソールで MariaDB インスタンスをクリックし、`Configurations` タブに移動することで見つけることができます。 - - - -パラメータグループリンクをクリックすると、パラメータグループリンクページに移動します。右上に「Edit」ボタンが表示されます: - - - -設定は以下の通りにする必要があります: - -1. `binlog_format` を `ROW` に設定します。 - - - -2. `binlog_row_metadata` を `FULL` に設定します。 - - - -3. `binlog_row_image` を `FULL` に設定します。 - - - -次に、右上の「Save Changes」をクリックします。変更を有効にするにはインスタンスを再起動する必要がある場合があります。RDS インスタンスの Configurations タブのパラメータグループリンクの横に「Pending reboot」と表示されている場合は、インスタンスの再起動が必要であることを示す良いサインです。 - -
-:::tip -MariaDB クラスターがある場合、上記のパラメータは [DB クラスター](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.CreatingCluster.html) パラメータグループに見つかり、DB インスタンスグループには見つかりません。 -::: - -## GTID モードを有効にする {#gtid-mode-rds} -グローバルトランザクション識別子 (GTID) は、MySQL/MariaDB でコミットされた各トランザクションに割り当てられる一意の ID です。これにより、バイナリログの複製が簡素化され、トラブルシューティングが容易になります。MariaDB では、GTID モードがデフォルトで有効になっているため、使用するためのユーザーアクションは必要ありません。 - -## データベースユーザーの構成 {#configure-database-user-rds} - -管理者ユーザーとして RDS MariaDB インスタンスに接続し、次のコマンドを実行します: - -1. ClickPipes 用の専用ユーザーを作成します: - - ```sql - CREATE USER 'clickpipes_user'@'host' IDENTIFIED BY 'some-password'; - ``` - -2. スキーマ権限を付与します。以下の例は `mysql` データベースの権限を示しています。複製したい各データベースとホストに対してこれらのコマンドを繰り返します: - - ```sql - GRANT SELECT ON `mysql`.* TO 'clickpipes_user'@'host'; - ``` - -3. ユーザーに複製権限を付与します: - - ```sql - GRANT REPLICATION CLIENT ON *.* TO 'clickpipes_user'@'%'; - GRANT REPLICATION SLAVE ON *.* TO 'clickpipes_user'@'%'; - ``` - -## ネットワークアクセスの構成 {#configure-network-access} - -### IP ベースのアクセス制御 {#ip-based-access-control} - -RDS インスタンスへのトラフィックを制限したい場合は、RDS セキュリティグループの `Inbound rules` に [文書化された静的 NAT IP](../../index.md#list-of-static-ips) を追加してください。 - - - - - -### AWS PrivateLink 経由のプライベートアクセス {#private-access-via-aws-privatelink} - -プライベートネットワークを介して RDS インスタンスに接続するには、AWS PrivateLink を使用できます。接続の設定については、[ClickPipes 用の AWS PrivateLink セットアップガイド](/knowledgebase/aws-privatelink-setup-for-clickpipes) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md.hash deleted file mode 100644 index 9f87e3e1d0d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/mysql/source/rds_maria.md.hash +++ /dev/null @@ -1 +0,0 @@ -9859cd7ba00651e0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md deleted file mode 100644 index 3244fd15f8e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -sidebar_label: 'ClickPipes for Object Storage' -description: 'Seamlessly connect your object storage to ClickHouse Cloud.' -slug: '/integrations/clickpipes/object-storage' -title: 'Integrating Object Storage with ClickHouse Cloud' ---- - -import S3svg from '@site/static/images/integrations/logos/amazon_s3_logo.svg'; -import Gcssvg from '@site/static/images/integrations/logos/gcs.svg'; -import DOsvg from '@site/static/images/integrations/logos/digitalocean.svg'; -import ABSsvg from '@site/static/images/integrations/logos/azureblobstorage.svg'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import cp_step1 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step1.png'; -import cp_step2_object_storage from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step2_object_storage.png'; -import cp_step3_object_storage from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step3_object_storage.png'; -import cp_step4a from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a.png'; -import cp_step4a3 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4a3.png'; -import cp_step4b from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step4b.png'; -import cp_step5 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step5.png'; -import cp_success from '@site/static/images/integrations/data-ingestion/clickpipes/cp_success.png'; -import cp_remove from '@site/static/images/integrations/data-ingestion/clickpipes/cp_remove.png'; -import cp_destination from '@site/static/images/integrations/data-ingestion/clickpipes/cp_destination.png'; -import cp_overview from '@site/static/images/integrations/data-ingestion/clickpipes/cp_overview.png'; -import Image from '@theme/IdealImage'; - - - -# Object StorageをClickHouse Cloudと統合する -Object Storage ClickPipesは、Amazon S3、Google Cloud Storage、Azure Blob Storage、DigitalOcean SpacesからClickHouse Cloudにデータを取り込むためのシンプルで堅牢な方法を提供します。一次的および継続的な取り込みの両方がサポートされており、正確な一次のセマンティクスを実現しています。 - -## 前提条件 {#prerequisite} -[ClickPipesのイントロ](./index.md)に目を通していることが必要です。 - -## 最初のClickPipeを作成する {#creating-your-first-clickpipe} - -1. クラウドコンソールで、左側のメニューから`Data Sources`ボタンを選択し、「ClickPipeの設定」をクリックします。 - - - -2. データソースを選択します。 - - - -3. ClickPipeに名前、説明(オプション)、IAMロールまたは資格情報、バケットURLを提供してフォームに記入します。bashのようなワイルドカードを使用して複数のファイルを指定できます。詳細については、[パス内のワイルドカード使用に関するドキュメント](#limitations)を参照してください。 - - - -4. UIに指定されたバケット内のファイルのリストが表示されます。データ形式を選択し(現在はClickHouse形式のサブセットをサポートしています)、継続的な取り込みを有効にするかどうかを選択します。[詳細はこちら](#continuous-ingest)。 - - - -5. 次のステップでは、新しいClickHouseテーブルにデータを取り込むか、既存のテーブルを再利用するかを選択できます。画面の指示に従って、テーブル名、スキーマ、および設定を変更します。変更内容のリアルタイムプレビューをサンプルテーブルの上部に表示します。 - - - - 提供されたコントロールを使用して高度な設定もカスタマイズできます。 - - - -6. 代わりに、既存のClickHouseテーブルにデータを取り込むこともできます。その場合、UIはソースから選択した宛先テーブルのClickHouseフィールドにマッピングするフィールドを指定できます。 - - - -:::info -仮想カラムをフィールドにマッピングすることもできます。[仮想カラム](../../sql-reference/table-functions/s3#virtual-columns)のように、`_path`や`_size`などの。 -::: - -7. 最後に、内部ClickPipesユーザーのための権限を設定できます。 - - **権限:** ClickPipesはデータを宛先テーブルに書き込むための専用ユーザーを作成します。この内部ユーザーの役割をカスタム役割または以下のいずれかの事前定義された役割から選択できます: - - `完全なアクセス`:クラスターへの完全なアクセス権を持ちます。宛先テーブルでMaterialized ViewまたはDictionaryを使用する場合に必要です。 - - `宛先テーブルのみ`:宛先テーブルに対する`INSERT`権限のみを持ちます。 - - - -8. 「セットアップを完了」をクリックすると、システムがClickPipeを登録し、サマリーテーブルに表示されます。 - - - - - - サマリーテーブルには、ClickHouse内のソースまたは宛先テーブルからサンプルデータを表示するためのコントロールが提供されます。 - - - - また、ClickPipeを削除したり、取り込みジョブの概要を表示するためのコントロールもあります。 - - - -9. **おめでとうございます!** 最初のClickPipeを設定しました。これはストリーミングClickPipeであれば、リモートデータソースからリアルタイムでデータを継続的に取り込みます。さもなければ、バッチを取り込み、完了します。 - -## サポートされているデータソース {#supported-data-sources} - -| 名前 |ロゴ|タイプ| ステータス | 説明 | -|----------------------|----|----|-----------------|------------------------------------------------------------------------------------------------------| -| Amazon S3 ||オブジェクトストレージ| 安定 | Object Storageからの大量データを取り込むためにClickPipesを構成します。 | -| Google Cloud Storage ||オブジェクトストレージ| 安定 | Object Storageからの大量データを取り込むためにClickPipesを構成します。 | -| DigitalOcean Spaces | | オブジェクトストレージ | 安定 | Object Storageからの大量データを取り込むためにClickPipesを構成します。 | -| Azure Blob Storage | | オブジェクトストレージ | プライベートベータ | Object Storageからの大量データを取り込むためにClickPipesを構成します。 | - -今後、クリックパイプに新しいコネクタが追加される予定です。詳細については[お問合せ](https://clickhouse.com/company/contact?loc=clickpipes)ください。 - -## サポートされているデータフォーマット {#supported-data-formats} - -サポートされているフォーマットは: -- [JSON](/interfaces/formats/JSON) -- [CSV](/interfaces/formats/CSV) -- [Parquet](/interfaces/formats/Parquet) -- [Avro](/interfaces/formats/Avro) - -## 正確な一次セマンティクス {#exactly-once-semantics} - -大規模データセットを取り込む際、さまざまなタイプの障害が発生する可能性があり、部分的な挿入や重複データを生じることがあります。Object Storage ClickPipesは挿入失敗に耐性があり、正確な一次セマンティクスを提供します。これは、一時的な「ステージング」テーブルを使用することで実現されます。データは最初にステージングテーブルに挿入されます。この挿入に問題が発生した場合、ステージングテーブルを切り捨てることができ、挿入をクリーンな状態から再試行できます。挿入が完了し成功したときのみ、ステージングテーブルのパーティションはターゲットテーブルに移動されます。この戦略についての詳細は、[このブログ記事](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part3)を確認してください。 - -### ビューサポート {#view-support} -ターゲットテーブルでのMaterialized Viewもサポートされています。ClickPipesはターゲットテーブルだけでなく、依存するMaterialized Viewのためにもステージングテーブルを作成します。 - -非Materialized Viewについてはステージングテーブルは作成されません。これは、ターゲットテーブルにダウストリームのMaterialized Viewがある場合、これらのMaterialized Viewはターゲットテーブルからのビューを介してデータを選択することを避けるべきであることを意味します。そうでない場合、Materialized Viewにデータが欠落することがあります。 - -## スケーリング {#scaling} - -Object Storage ClickPipesは、[設定された垂直自動スケーリング設定](/manage/scaling#configuring-vertical-auto-scaling)によって決定される最小ClickHouseサービスサイズに基づいてスケールされます。ClickPipeのサイズは、パイプが作成されたときに決定されます。ClickHouseサービス設定のその後の変更は、ClickPipeサイズに影響を与えません。 - -大規模な取り込みジョブのスループットを増加させるために、ClickPipeを作成する前にClickHouseサービスをスケーリングすることをお勧めします。 - -## 制限事項 {#limitations} -- 宛先テーブル、そこにあるMaterialized View(カスケードMaterialized Viewを含む)、またはMaterialized Viewのターゲットテーブルへの変更は、自動的にはパイプに反映されず、エラーが発生する可能性があります。パイプを停止し、必要な修正を行った後、変更を反映させてエラーや重複データを避けるために再起動する必要があります。 -- サポートされているビューのタイプには制限があります。[正確な一次セマンティクス](#exactly-once-semantics)および[ビューサポート](#view-support)のセクションをお読みください。 -- GCPまたはAzureにデプロイされたClickHouse CloudインスタンスのS3 ClickPipesではロール認証が利用できません。これはAWSのClickHouse Cloudインスタンスでのみサポートされています。 -- ClickPipesは、サイズが10GB以下のオブジェクトのみを取り込むことを試みます。ファイルが10GBを超える場合、エラーがClickPipes専用のエラーテーブルに追加されます。 -- S3 / GCS ClickPipesは、[S3テーブル関数](/sql-reference/table-functions/s3)とリストシンタックスを共有しませんし、Azureは[AzureBlobStorageテーブル関数](/sql-reference/table-functions/azureBlobStorage)とも共有しません。 - - `?` — 任意の単一の文字を置き換えます。 - - `*` — 空文字列を含め、任意の数の任意の文字を置き換えます。 - - `**` — 空文字列を含め、任意の数の任意の文字を置き換えます。 - -:::note -これは有効なパス(S3用)です: - -https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/**.ndjson.gz - - -これは無効なパスです。`{N..M}`はClickPipesではサポートされていません。 - -https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/{documents-01,documents-02}.ndjson.gz -::: - -## 継続的取り込み {#continuous-ingest} -ClickPipesは、S3、GCS、Azure Blob Storage、DigitalOcean Spacesからの継続的な取り込みをサポートしています。有効にすると、ClickPipesは指定されたパスからデータを継続的に取り込み、30秒ごとに新しいファイルをポーリングします。ただし、新しいファイルは最後に取り込まれたファイルよりも辞書的に大きくなければなりません。つまり、取り込みの順序を定義する方法で名前が付けられている必要があります。たとえば、`file1`、`file2`、`file3`などのファイルは順に取り込まれます。`file0`のように名前が付けられた新しいファイルが追加された場合、ClickPipesはそれを取り込まず、最後に取り込まれたファイルよりも辞書的に大きくないためです。 - -## アーカイブテーブル {#archive-table} -ClickPipesは、宛先テーブルの隣に`s3_clickpipe__archive`という接尾辞のテーブルを作成します。このテーブルは、ClickPipeによって取り込まれたすべてのファイルのリストを含みます。このテーブルは取り込み中のファイルを追跡するために使用され、ファイルが取り込まれたかどうかを確認するために使用できます。アーカイブテーブルには、[TTL](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl)が7日間設定されています。 - -:::note -これらのテーブルはClickHouse Cloud SQLコンソールでは表示されません。HTTPSまたはネイティブ接続を使用して外部クライアントから接続して読む必要があります。 -::: - -## 認証 {#authentication} - -### S3 {#s3} -特別な設定なしでパブリックバケットにアクセスでき、保護されたバケットには[IAM資格情報](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)または[IAMロール](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)を使用できます。 -IAMロールを使用するには、[このガイド](/cloud/security/secure-s3)で指定されているようにIAMロールを作成する必要があります。作成後に新しいIAMロールArnをコピーし、それをClickPipeの設定に「IAM ARNロール」として貼り付けます。 - -### GCS {#gcs} -S3と同様に、設定なしでパブリックバケットにアクセスでき、保護されたバケットにはAWS IAM資格情報の代わりに[HMACキー](https://cloud.google.com/storage/docs/authentication/managing-hmackeys)を使用できます。このキーのセットアップ方法に関するGoogle Cloudの[ガイド](https://cloud.google.com/storage/docs/authentication/hmackeys)を読むことができます。 - -GCSのサービスアカウントは直接サポートされていません。非公開バケットで認証する際にはHMAC(IAM)資格情報を使用する必要があります。 -HMAC資格情報に付属するサービスアカウントの権限は`storage.objects.list`および`storage.objects.get`である必要があります。 - -### DigitalOcean Spaces {#dospaces} -現在、デジタルオーシャンスペースには保護されたバケットのみがサポートされています。バケットとそのファイルにアクセスするためには、「Access Key」と「Secret Key」が必要です。アクセストークンの作成方法については、[このガイド](https://docs.digitalocean.com/products/spaces/how-to/manage-access/)をお読みください。 - -### Azure Blob Storage {#azureblobstorage} -現在、Azure Blob Storageでは保護されたバケットのみがサポートされています。認証は接続文字列によって行われ、アクセスキーと共有キーをサポートします。詳細については、[このガイド](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string)をお読みください。 - -## よくある質問 {#faq} - -- **ClickPipesは`gs://`でプレフィックスされたGCSバケットをサポートしていますか?** - -サポートしていません。相互運用性の理由から、`gs://`バケットプレフィックスを`https://storage.googleapis.com/`に置き換えることをお勧めします。 - -- **GCSのパブリックバケットにはどのような権限が必要ですか?** - -`allUsers`には適切な役割の割り当てが必要です。`roles/storage.objectViewer`の役割はバケットレベルで付与される必要があります。この役割により、ClickPipesがバケット内のすべてのオブジェクトをリスト化するために必要な`storage.objects.list`権限が提供されます。この役割には、バケット内の個々のオブジェクトを読み取るまたはダウンロードするために必要な`storage.objects.get`権限も含まれています。詳細情報については、[Google Cloudのアクセス制御](https://cloud.google.com/storage/docs/access-control/iam-roles)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md.hash deleted file mode 100644 index 5cad283945c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/object-storage.md.hash +++ /dev/null @@ -1 +0,0 @@ -166256b658263f4b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md deleted file mode 100644 index 9cc5a79b08b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'ClickPipe に特定のテーブルを追加する' -description: '特定のテーブルを ClickPipe に追加する手順を説明します。' -sidebar_label: 'テーブルの追加' -slug: '/integrations/clickpipes/postgres/add_table' -show_title: false ---- - -import Image from '@theme/IdealImage'; -import add_table from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/add_table.png' - - -# ClickPipeに特定のテーブルを追加する - -パイプに特定のテーブルを追加することが有用なシナリオがあります。これは、トランザクションまたは分析のワークロードがスケールするにつれて、一般的な必要性となります。 - -## ClickPipeに特定のテーブルを追加する手順 {#add-tables-steps} - -以下の手順で実行できます: -1. [一時停止](./pause_and_resume.md)します。 -2. テーブル設定を編集をクリックします。 -3. テーブルを見つけます - 検索バーで検索することで行うことができます。 -4. チェックボックスをクリックしてテーブルを選択します。 -
- - -5. 更新をクリックします。 -6. 更新が成功すると、パイプは `Setup`、`Snapshot`、`Running` のステータスをその順番で持ちます。テーブルの初期ロードは **Tables** タブで追跡できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md.hash deleted file mode 100644 index 95b09e85649..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/add_table.md.hash +++ /dev/null @@ -1 +0,0 @@ -e5dae695fa594f88 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md deleted file mode 100644 index 35cb837eb05..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -sidebar_label: '重複除去戦略' -description: '重複と削除された行の処理。' -slug: '/integrations/clickpipes/postgres/deduplication' -title: '重複除去戦略 (CDCを使用)' ---- - -import clickpipes_initial_load from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/postgres-cdc-initial-load.png'; -import Image from '@theme/IdealImage'; - -Updates and deletes replicated from Postgres to ClickHouse result in duplicated rows in ClickHouse due to its data storage structure and the replication process. This page covers why this happens and the strategies to use in ClickHouse to handle duplicates. - -## データはどのようにレプリケートされますか? {#how-does-data-get-replicated} - -### PostgreSQLの論理デコーディング {#PostgreSQL-logical-decoding} - -ClickPipesは、[Postgres Logical Decoding](https://www.pgedge.com/blog/logical-replication-evolution-in-chronological-order-clustering-solution-built-around-logical-replication)を使用して、Postgres内で発生する変更を消費します。Postgresの論理デコーディングプロセスにより、ClickPipesのようなクライアントは、INSERT、UPDATE、およびDELETEの一連の操作として、人間が読みやすい形式で変更を受け取ることができます。 - -### ReplacingMergeTree {#replacingmergetree} - -ClickPipesは、[ReplacingMergeTree](/engines/table-engines/mergetree-family/replacingmergetree)エンジンを使用してPostgresテーブルをClickHouseにマップします。ClickHouseは、追加専用のワークロードで最も良いパフォーマンスを発揮し、頻繁なUPDATEを推奨していません。ここで、ReplacingMergeTreeが特に強力になります。 - -ReplacingMergeTreeでは、更新は、新しいバージョン(`_peerdb_version`)を持つ行の挿入としてモデル化され、削除は新しいバージョンを持ち、`_peerdb_is_deleted`がtrueとしてマークされた挿入としてモデル化されます。ReplacingMergeTreeエンジンは、バックグラウンドでデデュプリケート/マージを行い、特定の主キー(id)の最新バージョンの行を保持し、バージョン付きのINSERTとしてUPDATEとDELETEを効率的に処理します。 - -以下は、ClickPipesによってClickHouseでテーブルを作成するために実行されたCREATE TABLEステートメントの例です。 - -```sql -CREATE TABLE users -( - `id` Int32, - `reputation` String, - `creationdate` DateTime64(6), - `displayname` String, - `lastaccessdate` DateTime64(6), - `aboutme` String, - `views` Int32, - `upvotes` Int32, - `downvotes` Int32, - `websiteurl` String, - `location` String, - `accountid` Int32, - `_peerdb_synced_at` DateTime64(9) DEFAULT now64(), - `_peerdb_is_deleted` Int8, - `_peerdb_version` Int64 -) -ENGINE = ReplacingMergeTree(_peerdb_version) -PRIMARY KEY id -ORDER BY id; -``` - -### 例示的な例 {#illustrative-example} - -以下のイラストは、PostgreSQLとClickHouse間のテーブル`users`の同期の基本的な例を示しています。 - - - -**ステップ1**では、PostgreSQL内の2行の初期スナップショットとClickPipesがそれらの2行をClickHouseに初期ロードしている様子が示されています。観察できるように、両方の行はそのままClickHouseにコピーされます。 - -**ステップ2**では、ユーザーテーブルに対する3つの操作が示されています:新しい行の挿入、既存の行の更新、別の行の削除。 - -**ステップ3**では、ClickPipesがINSERT、UPDATE、DELETE操作をClickHouseにバージョン付きの挿入としてレプリケートする様子が示されています。UPDATEはID2の行の新しいバージョンとして現れ、一方でDELETEはID1の新しいバージョンとして現れ、`_is_deleted`を使用してtrueとしてマークされます。このため、ClickHouseにはPostgreSQLに比べて3つの追加行があります。 - -その結果、`SELECT count(*) FROM users;`のようなシンプルなクエリを実行すると、ClickHouseとPostgreSQLで異なる結果が得られることがあります。[ClickHouseマージドキュメント](/merges#replacing-merges)によると、古くなった行のバージョンは最終的にマージプロセス中に破棄されます。しかし、このマージのタイミングは予測できず、ClickHouseのクエリはそれが行われるまで一貫性のない結果を返す可能性があります。 - -ClickHouseとPostgreSQLの両方で同じクエリ結果を保証するにはどうすればよいでしょうか? - -### FINALキーワードを使用してデデュプリケートする {#deduplicate-using-final-keyword} - -ClickHouseクエリでデデュプリケートデータを処理する推奨方法は、[FINAL修飾子](/sql-reference/statements/select/from#final-modifier)を使用することです。これにより、デデュプリケートされた行のみが返されます。 - -これを3つの異なるクエリに適用する方法を見てみましょう。 - -_次のクエリのWHERE句に注意してください。これは削除された行を除外するために使用されます。_ - -- **単純なカウントクエリ**:投稿の数をカウントします。 - -これは、同期が正常かどうかを確認するために実行できる最も簡単なクエリです。両方のクエリは同じカウントを返すべきです。 - -```sql --- PostgreSQL -SELECT count(*) FROM posts; - --- ClickHouse -SELECT count(*) FROM posts FINAL where _peerdb_is_deleted=0; -``` - -- **JOINを使用した単純な集計**:最も多くのビューを獲得した上位10ユーザー。 - -単一のテーブルに対する集計の例です。ここに重複があると、SUM関数の結果に大きな影響を与えるでしょう。 - -```sql --- PostgreSQL -SELECT - sum(p.viewcount) AS viewcount, - p.owneruserid as user_id, - u.displayname as display_name -FROM posts p -LEFT JOIN users u ON u.id = p.owneruserid --- highlight-next-line -WHERE p.owneruserid > 0 -GROUP BY user_id, display_name -ORDER BY viewcount DESC -LIMIT 10; - --- ClickHouse -SELECT - sum(p.viewcount) AS viewcount, - p.owneruserid AS user_id, - u.displayname AS display_name -FROM posts AS p -FINAL -LEFT JOIN users AS u -FINAL ON (u.id = p.owneruserid) AND (u._peerdb_is_deleted = 0) --- highlight-next-line -WHERE (p.owneruserid > 0) AND (p._peerdb_is_deleted = 0) -GROUP BY - user_id, - display_name -ORDER BY viewcount DESC -LIMIT 10 -``` - -#### FINAL設定 {#final-setting} - -クエリ内の各テーブル名にFINAL修飾子を追加する代わりに、[FINAL設定](/operations/settings/settings#final)を使用して、クエリ内のすべてのテーブルに自動的に適用することができます。 - -この設定は、クエリごとまたはセッション全体に適用できます。 - -```sql --- クエリごとのFINAL設定 -SELECT count(*) FROM posts SETTINGS final = 1; - --- セッションに対してFINALを設定 -SET final = 1; -SELECT count(*) FROM posts; -``` - -#### ROWポリシー {#row-policy} - -冗長な`_peerdb_is_deleted = 0`フィルターを隠す簡単な方法は、[ROWポリシー](/operations/access-rights#row-policy-management)を使用することです。以下は、テーブルvotesのすべてのクエリから削除された行を除外するための行ポリシーを作成する例です。 - -```sql --- すべてのユーザーに行ポリシーを適用 -CREATE ROW POLICY cdc_policy ON votes FOR SELECT USING _peerdb_is_deleted = 0 TO ALL; -``` - -> 行ポリシーは、ユーザーとロールのリストに適用されます。この例では、すべてのユーザーとロールに適用されています。これは特定のユーザーやロールのみに調整できます。 - -### PostgreSQLのようにクエリする {#query-like-with-postgres} - -PostgreSQLからClickHouseに分析データセットを移行するには、データ処理とクエリ実行の違いを考慮してアプリケーションクエリを変更する必要があります。 - -このセクションでは、オリジナルのクエリを変更せずにデータをデデュプリケートする技術を探ります。 - -#### ビュー {#views} - -[ビュー](/sql-reference/statements/create/view#normal-view)は、クエリからFINALキーワードを隠すのに最適な方法です。なぜなら、ビューはデータを格納せず、各アクセス時に別のテーブルから単に読み込みを行うからです。 - -以下に、削除された行のフィルターとFINALキーワードを使用して、ClickHouseのデータベース内の各テーブルのビューを作成する例を示します。 - -```sql -CREATE VIEW posts_view AS SELECT * FROM posts FINAL WHERE _peerdb_is_deleted=0; -CREATE VIEW users_view AS SELECT * FROM users FINAL WHERE _peerdb_is_deleted=0; -CREATE VIEW votes_view AS SELECT * FROM votes FINAL WHERE _peerdb_is_deleted=0; -CREATE VIEW comments_view AS SELECT * FROM comments FINAL WHERE _peerdb_is_deleted=0; -``` - -その後、PostgreSQLで使用するのと同じクエリを使ってビューをクエリできます。 - -```sql --- 最も閲覧された投稿 -SELECT - sum(viewcount) AS viewcount, - owneruserid -FROM posts_view -WHERE owneruserid > 0 -GROUP BY owneruserid -ORDER BY viewcount DESC -LIMIT 10 -``` - -#### 更新可能なマテリアライズドビュー {#refreshable-material-view} - -別のアプローチとして、[更新可能なマテリアライズドビュー](/materialized-view/refreshable-materialized-view)を使用することができます。これにより、行のデデュプリケーションのためのクエリの実行をスケジュールし、その結果を宛先テーブルに保存できます。各スケジュールされた更新時に、宛先テーブルは最新のクエリ結果に置き換えられます。 - -この方法の主な利点は、FINALキーワードを使用するクエリが更新時に1回だけ実行され、その後の宛先テーブルに対するクエリでFINALを使用する必要がなくなることです。 - -ただし、欠点は、宛先テーブルのデータは最も最近の更新時点のものに過ぎないということです。それでも、多くのユースケースでは、数分から数時間の更新間隔が十分であるかもしれません。 - -```sql --- デデュプリケートされた投稿テーブルの作成 -CREATE TABLE deduplicated_posts AS posts; - --- マテリアライズドビューを作成し、毎時実行されるようにスケジュール -CREATE MATERIALIZED VIEW deduplicated_posts_mv REFRESH EVERY 1 HOUR TO deduplicated_posts AS -SELECT * FROM posts FINAL WHERE _peerdb_is_deleted=0 -``` - -その後、`deduplicated_posts`テーブルを通常通りクエリできます。 - -```sql -SELECT - sum(viewcount) AS viewcount, - owneruserid -FROM deduplicated_posts -WHERE owneruserid > 0 -GROUP BY owneruserid -ORDER BY viewcount DESC -LIMIT 10; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md.hash deleted file mode 100644 index 0bbe3dbe5ea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/deduplication.md.hash +++ /dev/null @@ -1 +0,0 @@ -37c763214365abae diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md deleted file mode 100644 index 467005569c3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md +++ /dev/null @@ -1,281 +0,0 @@ ---- -sidebar_label: 'FAQ' -description: 'Frequently asked questions about ClickPipes for Postgres.' -slug: '/integrations/clickpipes/postgres/faq' -sidebar_position: 2 -title: 'ClickPipes for Postgres FAQ' ---- - - - - - -# ClickPipes for Postgres FAQ - -### idlingは私のPostgres CDC ClickPipeにどのように影響しますか? {#how-does-idling-affect-my-postgres-cdc-clickpipe} - -あなたのClickHouse Cloudサービスがアイドル状態であっても、Postgres CDC ClickPipeはデータの同期を続けます。次回の同期間隔でサービスが起動して、受信データを処理します。同期が終了しアイドル期間に達すると、サービスは再びアイドル状態に戻ります。 - -例として、同期間隔が30分に設定され、サービスのアイドル時間が10分に設定されている場合、サービスは30分ごとに起動し、10分間アクティブになり、その後アイドル状態に戻ります。 - -### ClickPipes for PostgresではTOASTカラムはどのように処理されますか? {#how-are-toast-columns-handled-in-clickpipes-for-postgres} - -詳細については、[TOASTカラムの処理](./toast)ページをご覧ください。 - -### ClickPipes for Postgresでは生成されたカラムはどのように処理されますか? {#how-are-generated-columns-handled-in-clickpipes-for-postgres} - -詳細については、[Postgres生成カラム: 注意点とベストプラクティス](./generated_columns)ページをご覧ください。 - -### テーブルはPostgres CDCの一部となるために主キーを持っている必要がありますか? {#do-tables-need-to-have-primary-keys-to-be-part-of-postgres-cdc} - -はい、CDCのためには、テーブルは主キーまたは[REPLICA IDENTITY](https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY)を持っている必要があります。REPLICA IDENTITYはFULLに設定するか、ユニークインデックスを使用するように構成することができます。 - -### パーティション化されたテーブルはPostgres CDCの一部としてサポートしていますか? {#do-you-support-partitioned-tables-as-part-of-postgres-cdc} - -はい、主キーまたはREPLICA IDENTITYが定義されている限り、パーティション化されたテーブルは標準でサポートされています。主キーとREPLICA IDENTITYは親テーブルとそのパーティションの両方に存在する必要があります。詳細については[こちら](https://blog.peerdb.io/real-time-change-data-capture-for-postgres-partitioned-tables)をご覧ください。 - -### 公開IPを持たないPostgresデータベースやプライベートネットワークにあるデータベースに接続できますか? {#can-i-connect-postgres-databases-that-dont-have-a-public-ip-or-are-in-private-networks} - -はい!ClickPipes for Postgresは、プライベートネットワーク内のデータベースに接続するための2つの方法を提供しています: - -1. **SSHトンネリング** - - ほとんどのユースケースでうまく機能します - - セットアップ手順については[こちら](/integrations/clickpipes/postgres#adding-your-source-postgres-database-connection)を参照してください - - すべてのリージョンで機能します - -2. **AWS PrivateLink** - - 次の3つのAWSリージョンで利用可能です: - - us-east-1 - - us-east-2 - - eu-central-1 - - 詳細なセットアップ手順については、[PrivateLinkドキュメント](/knowledgebase/aws-privatelink-setup-for-clickpipes)をご覧ください - - PrivateLinkが利用できないリージョンでは、SSHトンネリングを使用してください - -### UPDATEおよびDELETEはどのように処理しますか? {#how-do-you-handle-updates-and-deletes} - -ClickPipes for Postgresは、PostgresからのINSERTおよびUPDATEをClickHouse内の異なるバージョンを持つ新しい行としてキャプチャします(`_peerdb_`バージョンカラムを使用)。ReplacingMergeTreeテーブルエンジンは、順序キー(ORDER BYカラム)に基づいて定期的に重複除去をバックグラウンドで実行し、最新の`_peerdb_`バージョンを持つ行のみを保持します。 - -PostgresからのDELETEは削除されたことを示す新しい行として伝播します(`_peerdb_is_deleted`カラムを使用)。重複除去プロセスは非同期で行われるため、一時的に重複が見られることがあります。これを解決するには、クエリ層で重複除去を処理する必要があります。 - -詳細については以下を参照してください: - -* [ReplacingMergeTreeテーブルエンジンのベストプラクティス](https://docs.peerdb.io/bestpractices/clickhouse_datamodeling#replacingmergetree-table-engine) -* [PostgresからClickHouseへのCDC内部ブログ](https://clickhouse.com/blog/postgres-to-clickhouse-data-modeling-tips) - -### スキーマの変更をサポートしていますか? {#do-you-support-schema-changes} - -詳細については、[ClickPipes for Postgres: スキーマ変更の伝播サポート](./schema-changes)ページをご覧ください。 - -### ClickPipes for Postgres CDCのコストはどのようになりますか? {#what-are-the-costs-for-clickpipes-for-postgres-cdc} - -プレビュー中はClickPipesは無料です。GA以降の価格はまだ未定です。価格は合理的で、外部ETLツールと比べて非常に競争力のあるものであることを目指しています。 - -### レプリケーションスロットのサイズが増加したり減少しない場合、問題は何ですか? {#my-replication-slot-size-is-growing-or-not-decreasing-what-might-be-the-issue} - -Postgresのレプリケーションスロットのサイズが増加し続けている場合、または減少しない場合、それは通常、**WAL (Write-Ahead Log)レコードがCDCパイプラインまたはレプリケーションプロセスによって十分に早く消費されていないことを意味します**。以下は最も一般的な原因とその対処法です。 - -1. **データベースのアクティビティの急激なスパイク** - - 大規模なバッチ更新、大量の挿入、または重要なスキーマ変更などは、短時間で大量のWALデータを生成する可能性があります。 - - レプリケーションスロットは、これらのWALレコードが消費されるまで保持し、サイズが一時的に増加します。 - -2. **長時間実行されるトランザクション** - - オープントランザクションにより、Postgresはトランザクションが開始された時点以降に生成されたすべてのWALセグメントを保持する必要があるため、スロットサイズが大幅に増加する可能性があります。 - - `statement_timeout`および`idle_in_transaction_session_timeout`を合理的な値に設定して、トランザクションが無期限にオープンのままにならないようにします。このクエリを使用して、異常に長いトランザクションを特定できます: - ```sql - SELECT - pid, - state, - age(now(), xact_start) AS transaction_duration, - query AS current_query - FROM - pg_stat_activity - WHERE - xact_start IS NOT NULL - ORDER BY - age(now(), xact_start) DESC; - ``` - -3. **メンテナンスまたはユーティリティ操作 (例: `pg_repack`)** - - `pg_repack`などのツールは、テーブル全体を書き直すことができ、短時間で大量のWALデータを生成します。 - - これらの操作は、トラフィックが少ない時間帯にスケジュールするか、実行中にWAL使用量を注意深く監視します。 - -4. **VACUUMおよびVACUUM ANALYZE** - - データベースの健康に必要ですが、特に大きなテーブルをスキャンする場合は、追加のWALトラフィックを生成する可能性があります。 - - autovacuumの調整パラメータを利用するか、オフピーク時に手動のVACUUM操作をスケジュールすることを検討します。 - -5. **レプリケーションコンシューマがスロットを積極的に読み取っていない** - - CDCパイプライン(例: ClickPipes)または他のレプリケーションコンシューマが停止、休止、またはクラッシュすると、WALデータがスロットに蓄積されます。 - - パイプラインが継続的に実行されていることを確認し、接続や認証エラーのログをチェックします。 - -このトピックに関する詳細な分析は、ブログ記事[Postgres Logical Decodingの回避策](https://blog.peerdb.io/overcoming-pitfalls-of-postgres-logical-decoding#heading-beware-of-replication-slot-growth-how-to-monitor-it)を参照してください。 - -### Postgresのデータ型はClickHouseにどのようにマッピングされますか? {#how-are-postgres-data-types-mapped-to-clickhouse} - -ClickPipes for Postgresは、ClickHouse側でPostgresデータ型をできるだけネイティブにマッピングすることを目指しています。この文書では、各データ型とそのマッピングの包括的なリストを提供します:[データ型マトリックス](https://docs.peerdb.io/datatypes/datatype-matrix)。 - -### PostgresからClickHouseにデータを複製する際に独自のデータ型マッピングを定義できますか? {#can-i-define-my-own-data-type-mapping-while-replicating-data-from-postgres-to-clickhouse} - -現在、パイプの一部としてカスタムデータ型マッピングを定義することはサポートしていません。ただし、ClickPipesで使用されるデフォルトのデータ型マッピングは非常にネイティブです。Postgresのほとんどのカラムタイプは、ClickHouseのネイティブな同等物にできるだけ近く複製されます。たとえば、Postgresの整数配列タイプはClickHouseの整数配列タイプとして複製されます。 - -### JSONおよびJSONBカラムはPostgresからどのように複製されますか? {#how-are-json-and-jsonb-columns-replicated-from-postgres} - -JSONおよびJSONBカラムは、ClickHouseではString型として複製されます。ClickHouseはネイティブな[JSON型](/sql-reference/data-types/newjson)をサポートしているため、必要に応じてClickPipesテーブルの上にマテリアライズドビューを作成して変換を行うことができます。また、Stringカラムに対して[JSON関数](/sql-reference/functions/json-functions)を直接使用することもできます。JSONおよびJSONBカラムを直接ClickHouseのJSON型に複製する機能に取り組んでいます。この機能は数ヶ月内に利用可能になる予定です。 - -### ミラーが一時停止しているとき、挿入はどうなりますか? {#what-happens-to-inserts-when-a-mirror-is-paused} - -ミラーを一時停止すると、メッセージはソースPostgresのレプリケーションスロットにキューイングされ、バッファリングされて失われることはありません。ただし、ミラーを一時停止して再開すると接続が再確立され、ソースに応じてしばらく時間がかかることがあります。 - -このプロセス中、同期(PostgresからデータをプルしてClickHouseの生テーブルにストリーミングする操作)と正規化(生テーブルからターゲットテーブルへの操作)が中止されます。ただし、耐久性を持って再開するために必要な状態を保持します。 - -- 同期については、中途半端にキャンセルされた場合、Postgresのconfirmed_flush_lsnは進んでいないため、次回の同期は中止されたものと同じ位置から開始され、データの一貫性が確保されます。 -- 正規化については、ReplacingMergeTreeの挿入順序が重複除去を処理します。 - -要するに、同期および正規化プロセスは一時停止中に終了しますが、データの損失や不一致なしに再開できるため、安全です。 - -### ClickPipeの作成は自動化できるか、APIまたはCLIを使用できますか? {#can-clickpipe-creation-be-automated-or-done-via-api-or-cli} - -Postgres ClickPipeは、[OpenAPI](https://clickhouse.com/docs/cloud/manage/openapi)エンドポイントを介して作成および管理することもできます。この機能はベータ版であり、APIリファレンスは[こちら](https://clickhouse.com/docs/cloud/manage/api/swagger#tag/beta)にあります。Postgres ClickPipesを作成するためのTerraformサポートにも積極的に取り組んでいます。 - -### 初期ロードを高速化するにはどうすればよいですか? {#how-do-i-speed-up-my-initial-load} - -すでに実行中の初期ロードを加速することはできません。ただし、特定の設定を調整することで、今後の初期ロードを最適化できます。デフォルトでは、設定は4つの並列スレッドと、パーティションごとのスナップショット行数が100,000に設定されています。これらは高度な設定であり、ほとんどのユースケースには十分です。 - -Postgresバージョン13以下では、CTID範囲スキャンが遅く、これらの設定がより重要になります。その場合、パフォーマンスを向上させるために次のプロセスを検討してください: - -1. **既存のパイプを削除する**:新しい設定を適用するために必要です。 -2. **ClickHouseの宛先テーブルを削除する**:以前のパイプによって作成されたテーブルが削除されていることを確認します。 -3. **最適化された設定で新しいパイプを作成する**:一般的には、パーティションごとのスナップショット行数を100万から1000万の範囲に増やします。これは特定の要件とPostgresインスタンスが処理できる負荷に応じて行います。 - -これらの調整により、特に古いPostgresバージョンの初期ロードのパフォーマンスが大幅に向上します。Postgres 14以降を使用している場合、これらの設定の影響は少なくなります。 - -### レプリケーションを設定する際に公開物の範囲をどのように設定すべきですか? {#how-should-i-scope-my-publications-when-setting-up-replication} - -ClickPipesに公開物を管理させることができます(追加の権限が必要)し、自分で作成することもできます。ClickPipesが管理する公開物では、パイプを編集する際にテーブルの追加や削除を自動的に処理します。自己管理する場合は、レプリケーションが必要なテーブルのみを含むように公開物の範囲を注意深く設定してください。不要なテーブルを含めると、PostgresのWALデコードが遅くなります。 - -公開物にテーブルを含める場合は、そのテーブルに主キーまたは`REPLICA IDENTITY FULL`があることを確認してください。主キーのないテーブルを持っている場合、すべてのテーブルの公開物を作成すると、それらのテーブルに対するDELETEおよびUPDATE操作が失敗します。 - -データベース内の主キーのないテーブルを特定するには、このクエリを使用できます: -```sql -SELECT table_schema, table_name -FROM information_schema.tables -WHERE - (table_catalog, table_schema, table_name) NOT IN ( - SELECT table_catalog, table_schema, table_name - FROM information_schema.table_constraints - WHERE constraint_type = 'PRIMARY KEY') AND - table_schema NOT IN ('information_schema', 'pg_catalog', 'pgq', 'londiste'); -``` - -主キーのないテーブルを扱う際の選択肢は2つあります: - -1. **ClickPipesから主キーのないテーブルを除外する**: - 主キーを持つテーブルだけで公開物を作成します: - ```sql - CREATE PUBLICATION clickpipes_publication FOR TABLE table_with_primary_key1, table_with_primary_key2, ...; - ``` - -2. **ClickPipesに主キーのないテーブルを含める**: - 主キーのないテーブルを含めたい場合は、そのレプリカアイデンティティを`FULL`に変更する必要があります。これにより、UPDATEおよびDELETE操作が正しく機能します: - ```sql - ALTER TABLE table_without_primary_key1 REPLICA IDENTITY FULL; - ALTER TABLE table_without_primary_key2 REPLICA IDENTITY FULL; - CREATE PUBLICATION clickpipes_publication FOR TABLE <...>, <...>; - ``` - -:::tip -ClickPipesが管理するのではなく手動で公開物を作成する場合、`FOR ALL TABLES`という公開物の作成はお勧めしません。これにより、ClickPipesに対するPostgresからのトラフィックが増加し(パイプに含まれていない他のテーブルの変更を送信)全体的な効率が低下します。 - -手動で作成した公開物の場合は、パイプに追加する前に公開物にテーブルを追加してください。 -::: - -## 推奨される`max_slot_wal_keep_size`設定 {#recommended-max_slot_wal_keep_size-settings} - -- **最低限**:[`max_slot_wal_keep_size`](https://www.postgresql.org/docs/devel/runtime-config-replication.html#GUC-MAX-SLOT-WAL-KEEP-SIZE)を設定して、少なくとも**2日分の**WALデータを保持します。 -- **大規模データベース(高トランザクション量)**:1日あたりのピークWAL生成の少なくとも**2~3倍**を保持します。 -- **ストレージが制約されている環境**:ディスクの枯渇を避けつつ、レプリケーションの安定性を確保するために、慎重に調整します。 - -### 正しい値の計算方法 {#how-to-calculate-the-right-value} - -適切な設定を決定するために、WAL生成レートを測定します。 - -#### PostgreSQL 10以上の場合: {#for-postgresql-10} - -```sql -SELECT pg_wal_lsn_diff(pg_current_wal_insert_lsn(), '0/0') / 1024 / 1024 AS wal_generated_mb; -``` - -#### PostgreSQL 9.6以下の場合: {#for-postgresql-96-and-below} - -```sql -SELECT pg_xlog_location_diff(pg_current_xlog_insert_location(), '0/0') / 1024 / 1024 AS wal_generated_mb; -``` - -* 上記のクエリを1日の異なる時間に実行し、特にトランザクションが多い時間帯に実行します。 -* 24時間あたりに生成されるWALの量を計算します。 -* その数値を2または3倍して十分な保持を確保します。 -* `max_slot_wal_keep_size`をMBまたはGBで設定します。 - -#### 例: {#example} - -データベースが1日の間に100GBのWALを生成する場合、設定します: - -```sql -max_slot_wal_keep_size = 200GB -``` - -### レプリケーションスロットが無効化されています。どうすればよいですか? {#my-replication-slot-is-invalidated-what-should-i-do} - -ClickPipeを回復する唯一の方法は、設定ページでリスイートをトリガーすることです。 - -レプリケーションスロットの無効化の最も一般的な原因は、PostgreSQLデータベースの`max_slot_wal_keep_size`設定が低すぎることです(例:数GB)。この値を増やすことをお勧めします。[こちらのセクション](/integrations/clickpipes/postgres/faq#recommended-max_slot_wal_keep_size-settings)で`max_slot_wal_keep_size`の調整を参照してください。理想的には、200GB以上に設定して、レプリケーションスロットの無効化を防ぎます。 - -まれに、`max_slot_wal_keep_size`が設定されていない場合でもこの問題が発生することがあります。これはPostgreSQLの複雑でまれなバグによるものかもしれませんが、原因は不明のままです。 - -## ClickHouseがデータを取り込んでいる間にOut Of Memory(OOM)が発生しています。助けてくれますか? {#i-am-seeing-out-of-memory-ooms-on-clickhouse-while-my-clickpipe-is-ingesting-data-can-you-help} - -ClickHouseでのOOMの一般的な理由の1つは、サービスがサイズ不足であることです。これは、現在のサービス設定には、取り込み負荷を効果的に処理するための十分なリソース(例:メモリやCPU)がないことを意味します。ClickPipeデータ取り込みの要求に応じて、サービスのスケールアップを強くお勧めします。 - -また、下流のマテリアライズドビューに最適化されていない結合が存在することも観察されています: - -- JOINの一般的な最適化手法として、右側のテーブルが非常に大きい場合の`LEFT JOIN`があります。この場合、クエリを`RIGHT JOIN`に書き換え、大きなテーブルを左側に移動します。これにより、クエリプランナーがよりメモリ効率的に処理できます。 - -- JOINの別の最適化手法は、テーブルを`サブクエリ`または`CTE`を介して明示的にフィルタリングし、その後これらのサブクエリ間でJOINを行うことです。これにより、プランナーは行を効率的にフィルタリングおよびJOINを実行するためのヒントを得ることができます。 - -## 初期ロード中に`invalid snapshot identifier`が表示されます。どうすればよいですか? {#i-am-seeing-an-invalid-snapshot-identifier-during-the-initial-load-what-should-i-do} - -`invalid snapshot identifier`エラーは、ClickPipesとPostgresデータベース間の接続が断たれた場合に発生します。これは、ゲートウェイタイムアウト、データベースの再起動、またはその他の一時的な問題で発生する可能性があります。 - -初期ロードが進行中の間、Postgresデータベースでのアップグレードや再起動などの中断する操作を行わず、データベースへのネットワーク接続が安定していることを確認することをお勧めします。 - -この問題を解決するには、ClickPipes UIからリスイートをトリガーできます。これにより、初期ロードプロセスが最初から再開されます。 - -## Postgresで公開物を削除した場合はどうなりますか? {#what-happens-if-i-drop-a-publication-in-postgres} - -Postgresで公開物を削除すると、ClickPipeの接続が切断されます。公開物はClickPipeがソースから変更を取り込むために必要です。この場合、通常は公開物がもはや存在しないことを示すエラーアラートが表示されます。 - -公開物を削除した後にClickPipeを回復するには: - -1. Postgresで同じ名前と必要なテーブルを持つ新しい公開物を作成します。 -2. ClickPipeの設定タブで「テーブルをリスイート」ボタンをクリックします。 - -このリスイートは、再作成された公開物がPostgres内で異なるオブジェクト識別子(OID)を持つために必要です。同じ名前を持っていても、このプロセスは宛先テーブルを更新し、接続を復元します。 - -別の新しいパイプを作成することも可能です。 - -パーティション化されたテーブルを扱う場合は、適切な設定で公開物を作成していることを確認してください: - -```sql -CREATE PUBLICATION clickpipes_publication -FOR TABLE <...>, <...> -WITH (publish_via_partition_root = true); -``` - -## `Unexpected Datatype`エラーや`Cannot parse type XX ...`が表示される場合は? {#what-if-i-am-seeing-unexpected-datatype-errors} - -このエラーは通常、ソースのPostgresデータベースに取り込み中にマッピングできないデータ型が存在する場合に発生します。より具体的な問題については、以下の可能性を参照してください。 - -### `Cannot parse type Decimal(XX, YY), expected non-empty binary data with size equal to or less than ...` {#cannot-parse-type-decimal-expected-non-empty-binary-data-with-size-equal-to-or-less-than} - -Postgresの`NUMERIC`は非常に高い精度(小数点前131072桁、後16383桁まで)を持っており、ClickHouseのDecimal型は最大で(76桁、39スケール)です。システムは通常、そのサイズがそこまで大きくならないと仮定し、CDCフェーズ中に多くの行が来る可能性があるため、楽観的なキャストを行います。 - -現在の回避策は、ClickHouseでNUMERIC型を文字列にマッピングすることです。これを有効にするには、サポートチームにチケットを提出してください。これにより、あなたのClickPipesで有効化されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md.hash deleted file mode 100644 index 24bcd1bc310..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/faq.md.hash +++ /dev/null @@ -1 +0,0 @@ -30b2cd3bbc44cd01 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md deleted file mode 100644 index 90e3948e88f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -sidebar_label: 'Ingesting Data from Postgres to ClickHouse' -description: 'Seamlessly connect your Postgres to ClickHouse Cloud.' -slug: '/integrations/clickpipes/postgres' -title: 'Ingesting Data from Postgres to ClickHouse (using CDC)' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; -import cp_service from '@site/static/images/integrations/data-ingestion/clickpipes/cp_service.png'; -import cp_step0 from '@site/static/images/integrations/data-ingestion/clickpipes/cp_step0.png'; -import postgres_tile from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/postgres-tile.png' -import postgres_connection_details from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/postgres-connection-details.jpg' -import ssh_tunnel from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/ssh-tunnel.jpg' -import select_replication_slot from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/select-replication-slot.jpg' -import select_destination_db from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/select-destination-db.jpg' -import ch_permissions from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/ch-permissions.jpg' -import Image from '@theme/IdealImage'; - - -# PostgresからClickHouseへのデータ取り込み(CDCを使用) - - - -:::info -現在、ClickPipesを使用してPostgresからClickHouse Cloudへのデータ取り込みはパブリックベータ版にあります。 -::: - - -ClickPipesを使用して、ソースのPostgresデータベースからClickHouse Cloudにデータを取り込むことができます。ソースのPostgresデータベースは、オンプレミスまたはクラウドにホストされていることができます(Amazon RDS、Google Cloud SQL、Azure Database for Postgres、Supabaseなどを含む)。 - - -## 前提条件 {#prerequisites} - -始めるには、まずPostgresデータベースが正しく設定されていることを確認する必要があります。ソースのPostgresインスタンスに応じて、以下のガイドのいずれかに従ってください: - -1. [Amazon RDS Postgres](./postgres/source/rds) - -2. [Amazon Aurora Postgres](./postgres/source/aurora) - -3. [Supabase Postgres](./postgres/source/supabase) - -4. [Google Cloud SQL Postgres](./postgres/source/google-cloudsql) - -5. [Azure Flexible Server for Postgres](./postgres/source/azure-flexible-server-postgres) - -6. [Neon Postgres](./postgres/source/neon-postgres) - -7. [Crunchy Bridge Postgres](./postgres/source/crunchy-postgres) - -8. [Generic Postgres Source](./postgres/source/generic)(他のPostgresプロバイダーを使用しているか、セルフホストのインスタンスを使用している場合) - -9. [TimescaleDB](./postgres/source/timescale)(マネージドサービスまたはセルフホストのインスタンスでTimescaleDB拡張機能を使用している場合) - - -:::warning - -PgBouncer、RDS Proxy、Supabase PoolerなどのPostgresプロキシは、CDCベースのレプリケーションに対応していません。ClickPipesのセットアップにはそれらを使用しないようにし、実際のPostgresデータベースの接続情報を追加してください。 - -::: - -ソースのPostgresデータベースが設定されたら、ClickPipeの作成を続けることができます。 - -## ClickPipeの作成 {#creating-your-clickpipe} - -ClickHouse Cloudアカウントにログインしていることを確認してください。まだアカウントをお持ちでない場合は、[こちら](https://cloud.clickhouse.com/)からサインアップできます。 - -[//]: # ( TODO update image here) -1. ClickHouse Cloudコンソールで、ClickHouse Cloudサービスに移動します。 - - - -2. 左側のメニューで「データソース」ボタンを選択し、「ClickPipeを設定」をクリックします。 - - - -3. 「Postgres CDC」タイルを選択します。 - - - -### ソースのPostgresデータベース接続の追加 {#adding-your-source-postgres-database-connection} - -4. 前提条件ステップで構成したソースのPostgresデータベースの接続詳細を入力します。 - - :::info - - 接続詳細を追加する前に、クリックパイプのIPアドレスをファイアウォールルールにホワイトリストに追加していることを確認してください。ClickPipesのIPアドレスのリストは[こちら](../index.md#list-of-static-ips)で確認できます。 - さらなる情報については、[このページの先頭にリンクされているソースPostgres設定ガイドを参照してください](#prerequisites)。 - - ::: - - - -#### (オプショナル) AWSプライベートリンクの設定 {#optional-setting-up-aws-private-link} - -AWSにホストされているソースPostgresデータベースに接続するには、AWSプライベートリンクを使用できます。データ転送をプライベートに保ちたい場合に便利です。 -接続を設定するための[セットアップガイドをこちらで確認](../integrations/clickpipes/aws-privatelink)できます。 - -#### (オプショナル) SSHトンネリングの設定 {#optional-setting-up-ssh-tunneling} - -ソースのPostgresデータベースが公開されていない場合、SSHトンネリングの詳細を指定することができます。 - - -1. 「SSHトンネリングを使用する」トグルを有効にします。 -2. SSH接続詳細を入力します。 - - - -3. キーベースの認証を使用するには、「キーのペアを取り消して生成」をクリックして新しいキーのペアを生成し、生成された公開キーをSSHサーバーの`~/.ssh/authorized_keys`にコピーします。 -4. 「接続を確認」をクリックして接続を確認します。 - -:::note - -SSHバスティオンホストのファイアウォールルールに[ClickPipes IPアドレス](../clickpipes#list-of-static-ips)をホワイトリストに追加し、ClickPipesがSSHトンネルを確立できるようにしてください。 - -::: - -接続詳細の入力が完了したら、「次へ」をクリックします。 - -### レプリケーション設定の構成 {#configuring-the-replication-settings} - -5. 前提条件ステップで作成したレプリケーションスロットをドロップダウンリストから選択してください。 - - - -#### 高度な設定 {#advanced-settings} - -必要に応じて、高度な設定を構成できます。各設定の簡単な説明を以下に示します: - -- **同期間隔**:これは、ClickPipesがソースデータベースを変更のためにポーリングする間隔です。これは、コストに敏感なユーザーにとって重要で、高い値(`3600`以上)に設定することをお勧めします。 -- **初期ロードのための並列スレッド数**:これは、初期スナップショットを取得するために使用される並列ワーカーの数です。多数のテーブルがある場合、初期スナップショットを取得するために使用される並列ワーカーの数を制御したい場合に便利です。この設定はテーブルごとに適用されます。 -- **プルバッチサイズ**:単一バッチで取得する行の数です。これは最善を尽くす設定であり、すべてのケースで遵守されるわけではありません。 -- **パーティションごとのスナップショットの行数**:これは、初期スナップショット中に各パーティションで取得される行の数です。テーブルに多くの行がある場合、各パーティションで取得される行の数を制御したい場合に便利です。 -- **並列でのスナップショットテーブル数**:これは、初期スナップショット中に並列で取得されるテーブルの数です。多数のテーブルがある場合、並列で取得するテーブルの数を制御したい場合に便利です。 - - -### テーブルの構成 {#configuring-the-tables} - -6. ここで、ClickPipeの宛先データベースを選択できます。既存のデータベースを選択するか、新しいデータベースを作成できます。 - - - -7. ソースのPostgresデータベースからレプリケートしたいテーブルを選択できます。テーブルを選択する際、宛先のClickHouseデータベース内でテーブルの名前を変更したり、特定のカラムを除外したりすることもできます。 - - :::warning - ClickHouseでのOrdering KeyをPostgresの主キーと異なるように定義している場合は、[考慮事項](/integrations/clickpipes/postgres/ordering_keys)をすべてお読みください! - ::: - -### 権限を確認し、ClickPipeを開始 {#review-permissions-and-start-the-clickpipe} - -8. 権限のドロップダウンから「フルアクセス」ロールを選択し、「セットアップを完了」をクリックします。 - - - -## 次は何ですか? {#whats-next} - -PostgresからClickHouseにデータを移動した後の次の明白な質問は、ClickHouseでデータをクエリし、モデル化して最大限に活用する方法です。PostgreSQLからClickHouseへの移行方法に関する段階的アプローチについては、[移行ガイド](/migrations/postgresql/overview)を参照してください。移行ガイドに加えて、[重複排除戦略(CDC使用)](/integrations/clickpipes/postgres/deduplication)や[Ordering Keys](/integrations/clickpipes/postgres/ordering_keys)に関するページを確認して、重複を処理し、CDCを使用する際にOrdering Keysをカスタマイズする方法を理解してください。 - -最後に、一般的な問題とその解決方法に関する詳細は、["ClickPipes for Postgres FAQ"](/integrations/clickpipes/postgres/faq)ページを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md.hash deleted file mode 100644 index 94cc5585724..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -3f4353dcfdbaba3c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md deleted file mode 100644 index d5477c565a7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -sidebar_label: 'メンテナンスウィンドウ' -description: 'Postgres用のClickPipesのメンテナンスウィンドウ。' -slug: '/integrations/clickpipes/postgres/maintenance' -title: 'Postgres用ClickPipesのメンテナンスウィンドウ' ---- - - - - -# Postgres 用 ClickPipes のメンテナンスウィンドウ - -Postgres 用 ClickPipes のメンテナンスウィンドウが以下の日程で予定されています: -- **日付:** 2025年4月17日 -- **時間:** UTC 07:00 AM - 08:00 AM - -この時間帯に、あなたの Postgres Pipes は短期間ダウンタイムが発生します。 -メンテナンスウィンドウが終了した後、ClickPipes は再び利用可能になり、通常の操作に戻ります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md.hash deleted file mode 100644 index 7df5f499d0b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/maintenance.md.hash +++ /dev/null @@ -1 +0,0 @@ -66d31508b0d50eec diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md deleted file mode 100644 index b5b067978fe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_label: 'オーダリングキー' -description: 'カスタムオーダリングキーの定義方法。' -slug: '/integrations/clickpipes/postgres/ordering_keys' -title: 'オーダリングキー' ---- - - - -Ordering Keys (a.k.a. sorting keys) は、ClickHouseのテーブルのデータがディスク上でどのようにソートされ、インデックスされるかを定義します。Postgresからレプリケーションを行う場合、ClickPipesはPostgresの主キーをClickHouse内の対応するテーブルのOrdering Keyとして設定します。ほとんどの場合、Postgresの主キーは十分なOrdering Keyとして機能します。ClickHouseはすでに高速なスキャン用に最適化されているため、カスタムOrdering Keyはしばしば必要ありません。 - -[移行ガイド](/migrations/postgresql/data-modeling-techniques)で説明されているように、大規模なユースケースでは、ClickHouseのOrdering KeyにはPostgresの主キーに加えて追加のカラムを含めて、クエリを最適化することをお勧めします。 - -デフォルトでCDCを使用している場合、Postgresの主キーとは異なるOrdering Keyを選択すると、ClickHouseでデータの重複除去の問題が発生する可能性があります。これは、ClickHouseのOrdering Keyが二重の役割を果たすために発生します。つまり、データのインデックスとソートを制御すると同時に、重複除去のキーとして機能します。この問題に対処する最も簡単な方法は、更新可能なMaterialized Viewを定義することです。 - -## 更新可能なMaterialized Viewの使用 {#use-refreshable-materialized-views} - -カスタムOrdering Key(ORDER BY)を定義する簡単な方法は、[更新可能なMaterialized View](/materialized-view/refreshable-materialized-view)(MVs)を使用することです。これにより、希望するOrdering Keyを持つテーブル全体を定期的に(例:5分または10分ごとに)コピーできます。 - -以下は、カスタムORDER BYおよび必要な重複除去を備えた更新可能なMVの例です: - -```sql -CREATE MATERIALIZED VIEW posts_final -REFRESH EVERY 10 second ENGINE = ReplacingMergeTree(_peerdb_version) -ORDER BY (owneruserid,id) -- 異なるOrdering Keyですが、Postgresの主キーにサフィックスが付いています -AS -SELECT * FROM posts FINAL -WHERE _peerdb_is_deleted = 0; -- これが重複除去を行います -``` - -## 更新可能なMaterialized ViewなしのカスタムOrdering Key {#custom-ordering-keys-without-refreshable-materialized-views} - -データのスケールのために更新可能なMaterialized Viewが機能しない場合、より大きなテーブルでカスタムOrdering Keyを定義し、重複除去に関連する問題を克服するためのいくつかの推奨事項を以下に示します。 - -### 特定の行で変更されないOrdering Keyカラムを選択する {#choose-ordering-key-columns-that-dont-change-for-a-given-row} - -ClickHouseのOrdering KeyにPostgresの主キー以外の追加のカラムを含める場合は、各行で変更されないカラムを選択することをお勧めします。これにより、ReplacingMergeTreeでデータの整合性や重複除去の問題を防ぐのに役立ちます。 - -例えば、マルチテナントSaaSアプリケーションでは、(`tenant_id`, `id`)をOrdering Keyとして使用するのが良い選択です。これらのカラムは各行を一意に識別し、`tenant_id`は他のカラムが変更された場合でも`id`に対して一定です。idによる重複除去が(tenant_id, id)による重複除去と一致するため、tenant_idが変更された場合に発生する可能性のあるデータの[重複除去の問題](https://docs.peerdb.io/mirror/ordering-key-different)を回避するのに役立ちます。 - -### PostgresテーブルのレプリカアイデンティティをカスタムOrdering Keyに設定する {#set-replica-identity-on-postgres-tables-to-custom-ordering-key} - -PostgresのCDCが期待通りに機能するためには、テーブルの`REPLICA IDENTITY`をOrdering Keyカラムを含むように変更することが重要です。これはDELETE操作を正確に処理するために必要です。 - -`REPLICA IDENTITY`にOrdering Keyカラムが含まれない場合、PostgresのCDCは主キー以外のカラムの値をキャプチャしません - これはPostgresの論理デコーディングの制限です。Postgresの主キー以外のすべてのOrdering Keyカラムはnullになります。これにより、重複除去に影響を及ぼし、行の以前のバージョンが最新の削除されたバージョン(`_peerdb_is_deleted`が1に設定されている)と重複除去されない可能性があります。 - -`owneruserid`と`id`を用いた上記の例では、主キーがすでに`owneruserid`を含まない場合、(`owneruserid`, `id`)の上に`UNIQUE INDEX`を持ち、それをテーブルの`REPLICA IDENTITY`として設定する必要があります。これにより、PostgresのCDCは正確なレプリケーションと重複除去に必要なカラムの値をキャプチャします。 - -以下は、eventsテーブルでこれを行う方法の例です。変更されたOrdering Keyを持つすべてのテーブルに適用することを確認してください。 - -```sql --- (owneruserid, id)の上にUNIQUE INDEXを作成する -CREATE UNIQUE INDEX posts_unique_owneruserid_idx ON posts(owneruserid, id); --- このインデックスを使用してREPLICA IDENTITYを設定する -ALTER TABLE posts REPLICA IDENTITY USING INDEX posts_unique_owneruserid_idx; -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md.hash deleted file mode 100644 index 3847e8377bd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/ordering_keys.md.hash +++ /dev/null @@ -1 +0,0 @@ -4ae631ab10830f50 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md deleted file mode 100644 index f6f595cc5ac..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: 'Postgres ClickPipeの一時停止と再開' -description: 'Postgres ClickPipeの一時停止と再開' -sidebar_label: 'テーブルの一時停止' -slug: '/integrations/clickpipes/postgres/pause_and_resume' ---- - -import Image from '@theme/IdealImage'; -import pause_button from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/pause_button.png' -import pause_dialog from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/pause_dialog.png' -import pause_status from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/pause_status.png' -import resume_button from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/resume_button.png' -import resume_dialog from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/resume_dialog.png' - -There are scenarios where it would be useful to pause a Postgres ClickPipe. For example, you may want to run some analytics on existing data in a static state. Or, you might be performing upgrades on Postgres. Here is how you can pause and resume a Postgres ClickPipe. - -## Steps to pause a Postgres ClickPipe {#pause-clickpipe-steps} - -1. データソースタブで、停止したいPostgres ClickPipeをクリックします。 -2. **設定**タブに移動します。 -3. **一時停止**ボタンをクリックします。 -
- - - -4. 確認のためのダイアログボックスが表示されます。再度、一時停止をクリックします。 -
- - - -4. **メトリクス**タブに移動します。 -5. 約5秒後(ページを更新すると)、パイプの状態が**一時停止**と表示されるはずです。 -
- - - -## Steps to resume a Postgres ClickPipe {#resume-clickpipe-steps} -1. データソースタブで、再開したいPostgres ClickPipeをクリックします。ミラーの状態は最初は**一時停止**です。 -2. **設定**タブに移動します。 -3. **再開**ボタンをクリックします。 -
- - - -4. 確認のためのダイアログボックスが表示されます。再度、再開をクリックします。 -
- - - -5. **メトリクス**タブに移動します。 -6. 約5秒後(ページを更新すると)、パイプの状態が**実行中**と表示されるはずです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md.hash deleted file mode 100644 index e464824b278..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/pause_and_resume.md.hash +++ /dev/null @@ -1 +0,0 @@ -f9c01f68d2225b43 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md deleted file mode 100644 index 64cb379f72c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: 'Postgres Generated Columns: Gotchas and Best Practices' -slug: '/integrations/clickpipes/postgres/generated_columns' -description: 'Page describing important considerations to keep in mind when using - PostgreSQL generated columns in tables that are being replicated' ---- - - - -PostgreSQLの生成カラムを含むテーブルをレプリケーションする際には、いくつかの重要な考慮事項があります。これらの注意点は、レプリケーションプロセスや、宛先システムにおけるデータの整合性に影響を及ぼす可能性があります。 - -## 生成カラムの問題 {#the-problem-with-generated-columns} - -1. **`pgoutput`を介して公開されない:** 生成カラムは、`pgoutput`論理レプリケーションプラグインを通じて公開されません。これは、PostgreSQLから別のシステムにデータをレプリケートする際に、生成カラムの値がレプリケーションストリームに含まれないことを意味します。 - -2. **主キーに関する問題:** 生成カラムが主キーの一部である場合、宛先での重複排除に問題を引き起こす可能性があります。生成カラムの値はレプリケートされないため、宛先システムは行を適切に識別し、重複を排除するために必要な情報を持っていません。 - -## ベストプラクティス {#best-practices} - -これらの制限を回避するために、次のベストプラクティスを考慮してください。 - -1. **宛先で生成カラムを再作成:** レプリケーションプロセスに生成カラムの処理を任せるのではなく、dbt(data build tool)や他のデータ変換メカニズムを使用して、宛先でこれらのカラムを再作成することをお勧めします。 - -2. **主キーに生成カラムを使用しない:** レプリケートされるテーブルを設計する際には、生成カラムを主キーの一部として含めない方が良いです。 - -## 今後のUIの改善 {#upcoming-improvements-to-ui} - -今後のバージョンでは、ユーザーを支援するためにUIを追加する予定です。 - -1. **生成カラムを持つテーブルの特定:** UIには、生成カラムを含むテーブルを特定する機能が追加されます。これにより、ユーザーはどのテーブルがこの問題の影響を受けているかを理解しやすくなります。 - -2. **ドキュメントとベストプラクティス:** UIには、レプリケートテーブルで生成カラムを使用するためのベストプラクティスが含まれ、一般的な落とし穴を避けるためのガイダンスが提供されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md.hash deleted file mode 100644 index b420bdddd0c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/postgres_generated_columns.md.hash +++ /dev/null @@ -1 +0,0 @@ -1d0d36849e35b676 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md deleted file mode 100644 index e4993786457..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 'ClickPipe から特定のテーブルを削除する' -description: 'ClickPipe から特定のテーブルを削除する' -sidebar_label: 'テーブルの削除' -slug: '/integrations/clickpipes/postgres/removing_tables' ---- - -import Image from '@theme/IdealImage'; -import remove_table from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/remove_table.png' - -In some cases, it makes sense to exclude specific tables from a Postgres ClickPipe - for example, if a table isn't needed for your analytics workload, skipping it can reduce storage and replication costs in ClickHouse. - -## Steps to remove specific tables {#remove-tables-steps} - -The first step is to remove the table from the pipe. This can be done by the following steps: - -1. [Pause](./pause_and_resume.md) the pipe. -2. Click on Edit Table Settings. -3. Locate your table - this can be done by searching it in the search bar. -4. Deselect the table by clicking on the selected checkbox. -
- - - -5. Click update. -6. Upon successful update, in the **Metrics** tab the status will be **Running**. This table will no longer be replicated by this ClickPipe. diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md.hash deleted file mode 100644 index 7e0502ca08c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/remove_table.md.hash +++ /dev/null @@ -1 +0,0 @@ -009654c974b7a066 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md deleted file mode 100644 index 6e1a72e04ba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: 'Schema Changes Propagation Support' -slug: '/integrations/clickpipes/postgres/schema-changes' -description: 'Page describing schema change types detectable by ClickPipes in the - source tables' ---- - - - -ClickPipes for Postgresは、ソーステーブルのスキーマ変更を検出できます。また、これらの変更の一部を対応するデスティネーションテーブルにも伝播することができます。それぞれのスキーマ変更の扱いは、以下に文書化されています。 - -| スキーマ変更タイプ | 振る舞い | -| ----------------------------------------------------------------------------------- | ------------------------------------- | -| 新しいカラムの追加 (`ALTER TABLE ADD COLUMN ...`) | 自動的に伝播され、変更後のすべての行はすべてのカラムが埋められます | -| デフォルト値を持つ新しいカラムの追加 (`ALTER TABLE ADD COLUMN ... DEFAULT ...`) | 自動的に伝播され、変更後のすべての行はすべてのカラムが埋められますが、既存の行はテーブル全体を再読み込みしない限りDEFAULT値を表示しません | -| 既存のカラムの削除 (`ALTER TABLE DROP COLUMN ...`) | 検出されますが、伝播はされません。変更後のすべての行は削除されたカラムに対してNULLを持ちます | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md.hash deleted file mode 100644 index 64ddda690a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/schema-changes.md.hash +++ /dev/null @@ -1 +0,0 @@ -b77bf7abf4960e56 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md deleted file mode 100644 index af81b149ab9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -sidebar_label: 'Amazon Aurora Postgres' -description: 'Set up Amazon Aurora Postgres as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/aurora' -title: 'Aurora Postgres Source Setup Guide' ---- - -import parameter_group_in_blade from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/parameter_group_in_blade.png'; -import change_rds_logical_replication from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/change_rds_logical_replication.png'; -import change_wal_sender_timeout from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/change_wal_sender_timeout.png'; -import modify_parameter_group from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/modify_parameter_group.png'; -import reboot_rds from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/reboot_rds.png'; -import security_group_in_rds_postgres from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/security_group_in_rds_postgres.png'; -import edit_inbound_rules from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/edit_inbound_rules.png'; -import Image from '@theme/IdealImage'; - - - -# Aurora Postgres Source Setup Guide - -## Supported Postgres versions {#supported-postgres-versions} - -ClickPipesは、Aurora PostgreSQL-Compatible Edition バージョン12以降をサポートしています。 - -## Enable Logical Replication {#enable-logical-replication} - -Auroraインスタンスに以下の設定が既に構成されている場合、このセクションをスキップできます: -- `rds.logical_replication = 1` -- `wal_sender_timeout = 0` - -これらの設定は、以前に別のデータレプリケーションツールを使用していた場合、通常は事前に構成されています。 - -```text -postgres=> SHOW rds.logical_replication ; - rds.logical_replication -------------------------- - on -(1 row) - -postgres=> SHOW wal_sender_timeout ; - wal_sender_timeout --------------------- - 0 -(1 row) -``` - -まだ構成されていない場合は、次の手順に従ってください: - -1. 必要な設定を持つAurora PostgreSQLバージョン用の新しいパラメーターグループを作成します: - - `rds.logical_replication`を1に設定 - - `wal_sender_timeout`を0に設定 - - - - - - - -2. 新しいパラメーターグループをAurora PostgreSQLクラスターに適用します - - - -3. 変更を適用するためにAuroraクラスターを再起動します - - - -## Configure Database User {#configure-database-user} - -管理者ユーザーとしてAurora PostgreSQLのライターインスタンスに接続し、以下のコマンドを実行します: - -1. ClickPipes用の専用ユーザーを作成します: - - ```sql - CREATE USER clickpipes_user PASSWORD 'some-password'; - ``` - -2. スキーマの権限を付与します。以下の例は`public`スキーマの権限を示しています。レプリケートしたい各スキーマについてこのコマンドを繰り返します: - - ```sql - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - ``` - -3. レプリケーション権限を付与します: - - ```sql - GRANT rds_replication TO clickpipes_user; - ``` - -4. レプリケーションのためのパブリケーションを作成します: - - ```sql - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; - ``` - - -## Configure Network Access {#configure-network-access} - -### IP-based Access Control {#ip-based-access-control} - -Auroraクラスターへのトラフィックを制限したい場合は、[文書化された静的NAT IP](../../index.md#list-of-static-ips)をAuroraセキュリティグループの`Inbound rules`に追加してください。 - - - - - -### Private Access via AWS PrivateLink {#private-access-via-aws-privatelink} - -プライベートネットワークを通じてAuroraクラスターに接続するには、AWS PrivateLinkを使用できます。接続の設定については、[ClickPipes用のAWS PrivateLink設定ガイド](/knowledgebase/aws-privatelink-setup-for-clickpipes)を参照してください。 - -### Aurora-Specific Considerations {#aurora-specific-considerations} - -ClickPipesをAurora PostgreSQLで設定する際に考慮すべき点は以下の通りです: - -1. **接続エンドポイント**:常にAuroraクラスターのライターエンドポイントに接続してください。論理レプリケーションには、レプリケーションスロットを作成するための書き込みアクセスが必要で、プライマリインスタンスに接続する必要があります。 - -2. **フェイルオーバー処理**:フェイルオーバーが発生した場合、Auroraは自動的にリーダーを新しいライターに昇格させます。ClickPipesは切断を検出し、ライターエンドポイントへの再接続を試みます。このエンドポイントは新しいプライマリインスタンスを指すことになります。 - -3. **グローバルデータベース**:Aurora Global Databaseを使用している場合、プライマリリージョンのライターエンドポイントに接続する必要があります。クロスリージョンレプリケーションは、すでにリージョン間のデータ移動を処理します。 - -4. **ストレージの考慮事項**:Auroraのストレージ層はクラスター内のすべてのインスタンスで共有されており、標準RDSに比べて論理レプリケーションのパフォーマンスが向上する可能性があります。 - -### Dealing with Dynamic Cluster Endpoints {#dealing-with-dynamic-cluster-endpoints} - -Auroraは、適切なインスタンスに自動的にルーティングされる安定したエンドポイントを提供しますが、一貫した接続性を確保するための追加のアプローチは以下の通りです: - -1. 高可用性のセットアップの場合、Auroraライターエンドポイントを使用するようにアプリケーションを構成してください。これにより、現在のプライマリインスタンスを自動的に指します。 - -2. クロスリージョンレプリケーションを使用している場合は、各リージョンに対して別々のClickPipesを設定してレイテンシを減少させ、耐障害性を向上させることを検討してください。 - -## What's next? {#whats-next} - -これで、[ClickPipeを作成](../index.md)し、Aurora PostgreSQLクラスターからClickHouse Cloudにデータを取り込むことができるようになります。 -Aurora PostgreSQLクラスターの設定時に使用した接続詳細をメモしておくことを忘れないでください。ClickPipeの作成プロセスでそれらが必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md.hash deleted file mode 100644 index 749c1bc55e7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/aurora.md.hash +++ /dev/null @@ -1 +0,0 @@ -72968c73dc8d0f25 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md deleted file mode 100644 index 298506691b8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -sidebar_label: 'Azure Flexible Server for Postgres' -description: 'Set up Azure Flexible Server for Postgres as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/azure-flexible-server-postgres' -title: 'Azure Flexible Server for Postgres Source Setup Guide' ---- - -import server_parameters from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres/server_parameters.png'; -import wal_level from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres/wal_level.png'; -import restart from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres/restart.png'; -import firewall from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres/firewall.png'; -import Image from '@theme/IdealImage'; - - -# Azure Flexible Server for Postgres ソースセットアップガイド - -ClickPipesはPostgresバージョン12以降をサポートしています。 - -## 論理レプリケーションの有効化 {#enable-logical-replication} - -`wal_level`が`logical`に設定されている場合、以下の手順を実行する必要はありません。この設定は、別のデータレプリケーションツールから移行する場合、ほとんどの場合、事前に構成されています。 - -1. **サーバーパラメータ**セクションをクリックします。 - - - -2. `wal_level`を`logical`に編集します。 - - - -3. この変更にはサーバーの再起動が必要です。要求された場合は再起動してください。 - - - -## ClickPipesユーザーの作成と権限付与 {#creating-clickpipes-user-and-granting-permissions} - -管理ユーザーを通じてAzure Flexible Server Postgresに接続し、以下のコマンドを実行します。 - -1. ClickPipes専用のPostgresユーザーを作成します。 - - ```sql - CREATE USER clickpipes_user PASSWORD 'some-password'; - ``` - -2. テーブルをレプリケートするスキーマに対する読み取り専用アクセスを`clickpipes_user`に付与します。以下の例は`public`スキーマの権限設定を示しています。複数のスキーマにアクセスを付与したい場合は、それぞれのスキーマについてこれらの3つのコマンドを実行できます。 - - ```sql - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - ``` - -3. このユーザーにレプリケーションアクセスを付与します: - - ```sql - ALTER ROLE clickpipes_user REPLICATION; - ``` - -4. 将来MIRROR(レプリケーション)を作成するために使用する公開出版物を作成します。 - - ```sql - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; - ``` - -5. `clickpipes_user`の`wal_sender_timeout`を0に設定します。 - - ```sql - ALTER ROLE clickpipes_user SET wal_sender_timeout to 0; - ``` - - -## ClickPipesのIPをファイアウォールに追加する {#add-clickpipes-ips-to-firewall} - -以下の手順に従って、[ClickPipesのIP](../../index.md#list-of-static-ips)をネットワークに追加してください。 - -1. **ネットワーキング**タブに移動し、[ClickPipesのIP](../../index.md#list-of-static-ips)をAzure Flexible Server PostgresのファイアウォールまたはSSHトンネリングを使用している場合はJump Server/Bastionに追加します。 - - - - -## 次は何ですか? {#whats-next} - -これで[ClickPipeを作成](../index.md)し、PostgresインスタンスからClickHouse Cloudへデータを取り込むことができます。Postgresインスタンスをセットアップした際に使用した接続情報を忘れずにメモしておいてください。ClickPipe作成プロセス中にそれらが必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md.hash deleted file mode 100644 index 1a294c208e3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/azure-flexible-server-postgres.md.hash +++ /dev/null @@ -1 +0,0 @@ -6f47a0d65e470e76 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md deleted file mode 100644 index e9e40012381..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -sidebar_label: 'Crunchy Bridge Postgres' -description: 'Set up Crunchy Bridge Postgres as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/crunchy-postgres' -title: 'Crunchy Bridge Postgres Source Setup Guide' ---- - -import firewall_rules_crunchy_bridge from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/crunchy-postgres/firewall_rules_crunchy_bridge.png' -import add_firewall_rules_crunchy_bridge from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/crunchy-postgres/add_firewall_rules_crunchy_bridge.png' -import Image from '@theme/IdealImage'; - - -# Crunchy Bridge Postgres ソースセットアップガイド - -ClickPipes は Postgres バージョン 12 以降をサポートしています。 - -## 論理レプリケーションを有効にする {#enable-logical-replication} - -Crunchy Bridge には、[デフォルトで](https://docs.crunchybridge.com/how-to/logical-replication) 論理レプリケーションが有効になっています。以下の設定が正しく構成されていることを確認してください。そうでない場合は、適宜調整してください。 - -```sql -SHOW wal_level; -- logical であるべき -SHOW max_wal_senders; -- 10 であるべき -SHOW max_replication_slots; -- 10 であるべき -``` - -## ClickPipes ユーザーの作成と権限の付与 {#creating-clickpipes-user-and-granting-permissions} - -`postgres` ユーザーを通じて Crunchy Bridge Postgres に接続し、以下のコマンドを実行してください。 - -1. ClickPipes 専用の Postgres ユーザーを作成します。 - - ```sql - CREATE USER clickpipes_user PASSWORD 'some-password'; - ``` - -2. テーブルをレプリケートするスキーマへの読み取り専用アクセスを `clickpipes_user` に付与します。以下の例は `public` スキーマへの権限付与を示しています。複数のスキーマにアクセス権を付与したい場合は、各スキーマに対してこれらの 3 つのコマンドを実行できます。 - - ```sql - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - ``` - -3. このユーザーにレプリケーションアクセスを付与します。 - - ```sql - ALTER ROLE clickpipes_user REPLICATION; - ``` - -4. 今後使用する MIRROR (レプリケーション) を作成するための公開を作成します。 - - ```sql - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; - ``` - -## ClickPipes IP の安全リスト {#safe-list-clickpipes-ips} - -Crunchy Bridge のファイアウォールルールに ClickPipes IP を安全リストに追加します。[ClickPipes IPs](../../index.md#list-of-static-ips)。 - - - - - -## 次は何ですか? {#whats-next} - -これで、[ClickPipe を作成](../index.md)し、Postgres インスタンスから ClickHouse Cloud にデータを取り込む準備が整いました。Postgres インスタンスの設定中に使用した接続詳細をメモしておくことを忘れないでください。ClickPipe 作成プロセス中に必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md.hash deleted file mode 100644 index b1c66ce426d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/crunchy-postgres.md.hash +++ /dev/null @@ -1 +0,0 @@ -7fb168e0b6250ea8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md deleted file mode 100644 index b363614a1b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -sidebar_label: 'Generic Postgres' -description: 'Set up any Postgres instance as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/generic' -title: 'Generic Postgres Source Setup Guide' ---- - - - - - -# 一般的な Postgres ソースセットアップガイド - -:::info - -サポートされているプロバイダのいずれかを使用している場合は、サイドバーのそのプロバイダの特定のガイドを参照してください。 - -::: - - -ClickPipes は Postgres バージョン 12 以降をサポートしています。 - -## 論理レプリケーションの有効化 {#enable-logical-replication} - -1. Postgres インスタンスでレプリケーションを有効にするために、以下の設定が行われていることを確認する必要があります: - - ```sql - wal_level = logical - ``` - 同じことを確認するには、次の SQL コマンドを実行できます: - ```sql - SHOW wal_level; - ``` - - 出力は `logical` である必要があります。そうでない場合は、次を実行します: - ```sql - ALTER SYSTEM SET wal_level = logical; - ``` - -2. 加えて、Postgres インスタンスで設定することが推奨されている以下の設定があります: - ```sql - max_wal_senders > 1 - max_replication_slots >= 4 - ``` - 同じことを確認するには、次の SQL コマンドを実行できます: - ```sql - SHOW max_wal_senders; - SHOW max_replication_slots; - ``` - - 値が推奨値と一致しない場合は、次の SQL コマンドを実行して設定できます: - ```sql - ALTER SYSTEM SET max_wal_senders = 10; - ALTER SYSTEM SET max_replication_slots = 10; - ``` -3. 上記の設定を変更した場合は、変更が適用されるために Postgres インスタンスを再起動する必要があります。 - - -## 権限とパブリケーションを持つユーザーの作成 {#creating-a-user-with-permissions-and-publication} - -CDC に適した必要な権限を持つ ClickPipes 用の新しいユーザーを作成し、レプリケーションに使用するパブリケーションも作成しましょう。 - -これを行うには、Postgres インスタンスに接続し、次の SQL コマンドを実行できます: -```sql - CREATE USER clickpipes_user PASSWORD 'clickpipes_password'; - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - --- ユーザーにレプリケーション権限を付与 - ALTER USER clickpipes_user REPLICATION; - --- パブリケーションを作成します。パイプを作成するときに使用します - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; -``` -:::note - -`clickpipes_user` と `clickpipes_password` は希望するユーザー名とパスワードに置き換えてください。 - -::: - - -## ClickPipes ユーザーのための pg_hba.conf での接続の有効化 {#enabling-connections-in-pg_hbaconf-to-the-clickpipes-user} - -セルフサービスの場合は、ClickPipes IP アドレスから ClickPipes ユーザーへの接続を許可する必要があります。以下の手順に従ってください。マネージドサービスを使用している場合は、プロバイダのドキュメントに従って同じことを行えます。 - -1. `pg_hba.conf` ファイルで ClickPipes ユーザーへの接続を許可するために必要な変更を行います。`pg_hba.conf` ファイルの例のエントリは次のようになります: - ```response - host all clickpipes_user 0.0.0.0/0 scram-sha-256 - ``` - -2. 変更が適用されるように PostgreSQL インスタンスを再読み込みします: - ```sql - SELECT pg_reload_conf(); - ``` - - -## `max_slot_wal_keep_size` の増加 {#increase-max_slot_wal_keep_size} - -これは、大きなトランザクション/コミットによってレプリケーションスロットが削除されないようにするための推奨設定変更です。 - -`max_slot_wal_keep_size` パラメータを PostgreSQL インスタンスのより高い値(少なくとも 100GB または `102400`)に増やすことができます。`postgresql.conf` ファイルを更新して行います。 - -```sql -max_slot_wal_keep_size = 102400 -``` - -変更が適用されるように Postgres インスタンスを再読み込みできます: -```sql -SELECT pg_reload_conf(); -``` - -:::note - -この値のより良い推奨が必要な場合は、ClickPipes チームに連絡してください。 - -::: - -## 次は何をしますか? {#whats-next} - -これで、[ClickPipe を作成](../index.md)し、Postgres インスタンスから ClickHouse Cloud へのデータの取り込みを開始できます。 -Postgres インスタンスを設定したときに使用した接続の詳細をメモしておくことを忘れないでください。ClickPipe 作成プロセス中に必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md.hash deleted file mode 100644 index 6e997aa9aba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/generic.md.hash +++ /dev/null @@ -1 +0,0 @@ -3362718631b231ea diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md deleted file mode 100644 index a00b6050ec3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -sidebar_label: 'Google Cloud SQL' -description: 'Set up Google Cloud SQL Postgres instance as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/google-cloudsql' -title: 'Google Cloud SQL Postgres Source Setup Guide' ---- - -import edit_button from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/edit.png'; -import cloudsql_logical_decoding1 from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/cloudsql_logical_decoding1.png'; -import cloudsql_logical_decoding2 from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/cloudsql_logical_decoding2.png'; -import cloudsql_logical_decoding3 from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/cloudsql_logical_decoding3.png'; -import connections from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/connections.png'; -import connections_networking from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/connections_networking.png'; -import firewall1 from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/firewall1.png'; -import firewall2 from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql/firewall2.png'; -import Image from '@theme/IdealImage'; - - -# Google Cloud SQL Postgres ソースセットアップガイド - -:::info - -サポートされているプロバイダーのいずれかを使用している場合は、サイドバーのそのプロバイダーに特化したガイドを参照してください。 - -::: - - -## サポートされている Postgres バージョン {#supported-postgres-versions} - -Postgres 12 以降のすべて - -## 論理レプリケーションを有効にする {#enable-logical-replication} - -`cloudsql.logical_decoding` がオンで `wal_sender_timeout` が 0 の場合は、以下の手順を実行する必要はありません。これらの設定は、別のデータレプリケーションツールから移行する場合は、前もって設定されていることが多いです。 - -1. 概要ページで **Edit** ボタンをクリックします。 - - - -2. フラグに移動し、`cloudsql.logical_decoding` をオンにし、`wal_sender_timeout` を 0 に変更します。これらの変更は、Postgres サーバーの再起動が必要です。 - - - - - - -## ClickPipes ユーザーを作成し、権限を付与する {#creating-clickpipes-user-and-granting-permissions} - -管理ユーザーを通じて Cloud SQL Postgres に接続し、以下のコマンドを実行します。 - -1. ClickPipes 専用の Postgres ユーザーを作成します。 - - ```sql - CREATE USER clickpipes_user PASSWORD 'some-password'; - ``` - -2. テーブルを複製しているスキーマへの読み取り専用アクセスを `clickpipes_user` に提供します。以下の例は `public` スキーマの権限を設定する方法を示しています。複数のスキーマにアクセスを付与したい場合は、それぞれのスキーマについてこの3つのコマンドを実行できます。 - - ```sql - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - ``` - -3. このユーザーにレプリケーションアクセスを付与します: - - ```sql - ALTER ROLE clickpipes_user REPLICATION; - ``` - -4. 今後 MIRROR(レプリケーション)を作成するために使用する公開物を作成します。 - - ```sql - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; - ``` - -[//]: # (TODO Add SSH Tunneling) - - -## ClickPipes IP をファイアウォールに追加する {#add-clickpipes-ips-to-firewall} - -以下の手順に従って、ClickPipes IP をネットワークに追加してください。 - -:::note - -SSH トンネリングを使用している場合は、[ClickPipes IP](../../index.md#list-of-static-ips)をジャンプサーバー/バスティオンのファイアウォールルールに追加する必要があります。 - -::: - -1. **Connections** セクションに移動します。 - - - -2. ネットワーキングのサブセクションに移動します。 - - - -3. [ClickPipes のパブリック IP](../../index.md#list-of-static-ips)を追加します。 - - - - - -## 次に何をしますか? {#whats-next} - -これで、[ClickPipe を作成](../index.md)し、Postgres インスタンスから ClickHouse Cloud にデータをインジェストすることができます。 -Postgres インスタンスの設定時に使用した接続詳細をメモしておいてください。ClickPipe 作成プロセス中に必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md.hash deleted file mode 100644 index 5f631c85faf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/google-cloudsql.md.hash +++ /dev/null @@ -1 +0,0 @@ -5de14e657c542865 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md deleted file mode 100644 index 4b9ec887313..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -sidebar_label: 'Neon Postgres' -description: 'Set up Neon Postgres instance as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/neon-postgres' -title: 'Neon Postgres Source Setup Guide' ---- - -import neon_commands from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/neon-postgres/neon-commands.png' -import neon_enable_replication from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/neon-postgres/neon-enable-replication.png' -import neon_enabled_replication from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/neon-postgres/neon-enabled-replication.png' -import neon_ip_allow from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/neon-postgres/neon-ip-allow.png' -import neon_conn_details from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/neon-postgres/neon-conn-details.png' -import Image from '@theme/IdealImage'; - - -# Neon Postgres ソースセットアップガイド - -これは、ClickPipesでのレプリケーションに使用できるNeon Postgresをセットアップする方法に関するガイドです。 -このセットアップを行うには、[Neonコンソール](https://console.neon.tech/app/projects)にサインインしていることを確認してください。 - -## 権限のあるユーザーの作成 {#creating-a-user-with-permissions} - -CDCに適した必要な権限を持つClickPipes用の新しいユーザーを作成し、レプリケーションに使用する公開物を作成しましょう。 - -そのためには、**SQLエディタ**タブに移動します。 -ここで、次のSQLコマンドを実行できます: - -```sql - CREATE USER clickpipes_user PASSWORD 'clickpipes_password'; - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - --- USERにレプリケーション権限を付与 - ALTER USER clickpipes_user REPLICATION; - --- 公開物を作成します。ミラーを作成する際にこれを使用します - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; -``` - - - -**実行**をクリックして、公開物とユーザーが準備できるようにします。 - -## 論理レプリケーションの有効化 {#enable-logical-replication} -Neonでは、UIを介して論理レプリケーションを有効にできます。これは、ClickPipesのCDCがデータをレプリケートするために必要です。 -**設定**タブに移動し、次に**論理レプリケーション**セクションに進みます。 - - - -**有効化**をクリックして設定完了です。有効にすると、以下の成功メッセージが表示されるはずです。 - - - -以下の設定があなたのNeon Postgresインスタンスで確認できるか見てみましょう: -```sql -SHOW wal_level; -- これはlogicalであるべき -SHOW max_wal_senders; -- これは10であるべき -SHOW max_replication_slots; -- これは10であるべき -``` - -## IPホワイトリスト (Neonエンタープライズプラン向け) {#ip-whitelisting-for-neon-enterprise-plan} -Neonエンタープライズプランをお持ちの場合、[ClickPipesのIP](../../index.md#list-of-static-ips)をホワイトリストに追加して、ClickPipesからあなたのNeon Postgresインスタンスへのレプリケーションを許可できます。 -これを行うには、**設定**タブをクリックし、**IP許可**セクションに進みます。 - - - -## 接続詳細のコピー {#copy-connection-details} -ユーザーと公開物が準備完了で、レプリケーションが有効になったので、新しいClickPipeを作成するための接続詳細をコピーできます。 -**ダッシュボード**に移動し、接続文字列が表示されるテキストボックスで、ビューを**パラメータのみ**に変更します。次のステップでこれらのパラメータが必要になります。 - - - -## 次は何ですか? {#whats-next} - -これで、[ClickPipeを作成](../index.md)し、PostgresインスタンスからClickHouse Cloudにデータを取り込むことができます。 -Postgresインスタンスの設定中に使用した接続詳細をメモしておいてください。ClickPipe作成プロセス中に必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md.hash deleted file mode 100644 index e5386c32f6b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/neon-postgres.md.hash +++ /dev/null @@ -1 +0,0 @@ -291a2838e900db3b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md deleted file mode 100644 index c92c7d5aa86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -sidebar_label: 'Amazon RDS Postgres' -description: 'ClickPipes 用の Amazon RDS Postgres ソースの設定' -slug: '/integrations/clickpipes/postgres/source/rds' -title: 'RDS Postgres Source Setup Guide' ---- - -import parameter_group_in_blade from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/parameter_group_in_blade.png'; -import change_rds_logical_replication from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/change_rds_logical_replication.png'; -import change_wal_sender_timeout from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/change_wal_sender_timeout.png'; -import modify_parameter_group from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/modify_parameter_group.png'; -import reboot_rds from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/reboot_rds.png'; -import security_group_in_rds_postgres from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/security_group_in_rds_postgres.png'; -import edit_inbound_rules from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/rds/edit_inbound_rules.png'; -import Image from '@theme/IdealImage'; - - -# RDS Postgres ソースセットアップガイド - -## サポートされているPostgresバージョン {#supported-postgres-versions} - -ClickPipesはPostgresバージョン12以降をサポートしています。 - -## 論理レプリケーションの有効化 {#enable-logical-replication} - -以下の設定がすでに構成されている場合は、このセクションをスキップできます: -- `rds.logical_replication = 1` -- `wal_sender_timeout = 0` - -これらの設定は、以前に別のデータレプリケーションツールを使用していた場合、通常は事前に構成されています。 - -```text -postgres=> SHOW rds.logical_replication ; - rds.logical_replication -------------------------- - on -(1 row) - -postgres=> SHOW wal_sender_timeout ; - wal_sender_timeout --------------------- - 0 -(1 row) -``` - -構成されていない場合は、以下の手順に従ってください: - -1. 必要な設定を持つPostgresバージョンの新しいパラメータグループを作成します: - - `rds.logical_replication`を1に設定 - - `wal_sender_timeout`を0に設定 - - - - - - - -2. 新しいパラメータグループをRDS Postgresデータベースに適用します。 - - - -3. 変更を適用するためにRDSインスタンスを再起動します。 - - - -## データベースユーザーの設定 {#configure-database-user} - -管理者ユーザーとしてRDS Postgresインスタンスに接続し、以下のコマンドを実行します: - -1. ClickPipes用の専用ユーザーを作成します: - - ```sql - CREATE USER clickpipes_user PASSWORD 'some-password'; - ``` - -2. スキーマの権限を付与します。以下の例は`public`スキーマの権限を示しています。レプリケートしたい各スキーマに対してこれらのコマンドを繰り返します: - - ```sql - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - ``` - -3. レプリケーション権限を付与します: - - ```sql - GRANT rds_replication TO clickpipes_user; - ``` - -4. レプリケーション用の公開物を作成します: - - ```sql - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; - ``` - -## ネットワークアクセスの設定 {#configure-network-access} - -### IPベースのアクセス制御 {#ip-based-access-control} - -RDSインスタンスへのトラフィックを制限したい場合は、[文書化された静的NAT IP](../../index.md#list-of-static-ips)をRDSセキュリティグループの`Inbound rules`に追加してください。 - - - - - -### AWS PrivateLinkによるプライベートアクセス {#private-access-via-aws-privatelink} - -プライベートネットワークを通じてRDSインスタンスに接続するには、AWS PrivateLinkを使用できます。接続の設定については、[ClickPipes用のAWS PrivateLinkセットアップガイド](/knowledgebase/aws-privatelink-setup-for-clickpipes)を参照してください。 - -### RDS Proxyの回避策 {#workarounds-for-rds-proxy} -RDS Proxyは論理レプリケーション接続をサポートしていません。RDSで動的IPアドレスを使用していて、DNS名やラムダを使用できない場合、いくつかの代替手段があります: - -1. cronジョブを使用して定期的にRDSエンドポイントのIPを解決し、変更があればNLBを更新します。 -2. RDSイベント通知をEventBridge/SNSとともに使用:AWS RDSイベント通知を使用して自動的に更新をトリガーします。 -3. 安定したEC2:ポーリングサービスまたはIPベースのプロキシとして機能するEC2インスタンスをデプロイします。 -4. TerraformやCloudFormationなどのツールを使用してIPアドレス管理を自動化します。 - -## 次は何ですか? {#whats-next} - -これで、[ClickPipeを作成](../index.md)し、PostgresインスタンスからClickHouse Cloudにデータを取り込む準備が整いました。 -Postgresインスタンスの設定時に使用した接続詳細をメモしておくことを忘れないでください。ClickPipeの作成プロセス中に必要になります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md.hash deleted file mode 100644 index ac3a3256a6e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/rds.md.hash +++ /dev/null @@ -1 +0,0 @@ -2e99c1b72f9b936b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md deleted file mode 100644 index 547f18ef196..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -sidebar_label: 'Supabase Postgres' -description: 'Set up Supabase instance as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/supabase' -title: 'Supabase Source Setup Guide' ---- - -import supabase_commands from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/supabase/supabase-commands.jpg' -import supabase_connection_details from '@site/static/images/integrations/data-ingestion/clickpipes/postgres/source/setup/supabase/supabase-connection-details.jpg' -import Image from '@theme/IdealImage'; - - -# Supabase ソース設定ガイド - -これは、ClickPipes で使用するために Supabase Postgres を設定する方法のガイドです。 - -:::note - -ClickPipes は、IPv6 経由でネイティブに Supabase をサポートしており、シームレスなレプリケーションを実現します。 - -::: - - -## 権限とレプリケーションスロットを持つユーザーの作成 {#creating-a-user-with-permissions-and-replication-slot} - -CDC に適した必要な権限を持つ新しいユーザーを ClickPipes 用に作成し、レプリケーションに使用するパブリケーションを作成しましょう。 - -これには、Supabase プロジェクトの **SQL エディタ** に移動します。 -ここで、以下の SQL コマンドを実行できます: -```sql - CREATE USER clickpipes_user PASSWORD 'clickpipes_password'; - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; - --- ユーザーにレプリケーションの権限を与える - ALTER USER clickpipes_user REPLICATION; - --- パブリケーションを作成します。これをミラー作成時に使用します - CREATE PUBLICATION clickpipes_publication FOR ALL TABLES; -``` - - - - -**Run** をクリックして、パブリケーションとユーザーを準備します。 - -:::note - -`clickpipes_user` と `clickpipes_password` を希望のユーザー名とパスワードに置き換えることを忘れないでください。 - -また、ClickPipes でミラーを作成する際に同じパブリケーション名を使用することを覚えておいてください。 - -::: - - -## `max_slot_wal_keep_size` の増加 {#increase-max_slot_wal_keep_size} - - -:::warning - -このステップでは、Supabase データベースが再起動し、短いダウンタイムが発生する可能性があります。 - -Supabase データベースの `max_slot_wal_keep_size` パラメータをより高い値(少なくとも 100GB または `102400`)に増加させるには、[Supabase Docs](https://supabase.com/docs/guides/database/custom-postgres-config#cli-supported-parameters)に従ってください。 - -この値のより良い推奨事項については、ClickPipes チームにお問い合わせください。 - -::: - -## Supabase 用の接続詳細 {#connection-details-to-use-for-supabase} - -Supabase プロジェクトの `プロジェクト設定` -> `データベース`(`設定` の下)に移動します。 - -**重要**: このページで `接続プーラーを表示` を無効にし、`接続パラメータ` セクションに移動して、パラメータをメモまたはコピーします。 - - - -:::info - -接続プーラーは CDC ベースのレプリケーションではサポートされていないため、無効にする必要があります。 - -::: - - -## 次は何をすべきか? {#whats-next} - -これで、[ClickPipe を作成](../index.md)し、Postgres インスタンスから ClickHouse Cloud にデータを取り込むことができます。 -ClickPipe 作成プロセス中に必要になるため、Postgres インスタンスを設定する際に使用した接続詳細をメモしておくことを忘れないでください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md.hash deleted file mode 100644 index c7590e874d5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/supabase.md.hash +++ /dev/null @@ -1 +0,0 @@ -b9112f4063157960 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md deleted file mode 100644 index 479cdd7b582..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -sidebar_label: 'Timescale' -description: 'Set up Postgres with the TimescaleDB extension as a source for ClickPipes' -slug: '/integrations/clickpipes/postgres/source/timescale' -title: 'Postgres with TimescaleDB source setup guide' -keywords: -- 'TimescaleDB' ---- - -import BetaBadge from '@theme/badges/BetaBadge'; - - -# Postgres with TimescaleDB Source Setup Guide - - - -## 背景 {#background} - -[TimescaleDB](https://github.com/timescale/timescaledb) は、Timescale Inc によって開発されたオープンソースの Postgres 拡張機能で、Postgres から離れることなく分析クエリのパフォーマンスを向上させることを目的としています。これは、「ハイパーテーブル」を作成することによって実現され、これらは拡張によって管理され、「チャンク」への自動パーティショニングをサポートします。ハイパーテーブルは、透過的な圧縮とハイブリッドの行列ストレージ(「ハイパコア」として知られる)もサポートしていますが、これらの機能は専有ライセンスを持つバージョンの拡張が必要です。 - -Timescale Inc は、TimescaleDB のために二つの管理サービスを提供しています: -- `Managed Service for Timescale` -- `Timescale Cloud`。 - -TimescaleDB 拡張機能を利用できる管理サービスを提供しているサードパーティのベンダーもありますが、ライセンスの関係でこれらのベンダーはオープンソースバージョンの拡張のみをサポートしています。 - -Timescale ハイパーテーブルは、いくつかの点で通常の Postgres テーブルとは異なる動作をします。これがレプリケーションのプロセスにいくつかの複雑さをもたらすため、Timescale ハイパーテーブルをレプリケートする能力は **最善の努力** として検討されるべきです。 - -## サポートされている Postgres バージョン {#supported-postgres-versions} - -ClickPipes は、Postgres バージョン 12 以降をサポートしています。 - -## 論理レプリケーションを有効にする {#enable-logical-replication} - -手順は、TimescaleDB がデプロイされている Postgres インスタンスによって異なります。 - -- 管理サービスを使用している場合は、サイドバーにリストされているプロバイダーのガイドに従ってください。 -- 自分で TimescaleDB をデプロイしている場合は、一般的なガイドに従ってください。 - -他の管理サービスについては、論理レプリケーションを有効にするためにプロバイダーにサポートチケットを提出してください。 - -:::info -Timescale Cloud は、CDC モードの Postgres パイプに必要な論理レプリケーションを有効にすることをサポートしていません。その結果、Timescale Cloud のユーザーは Postgres ClickPipe でデータの一度きりのロード(`Initial Load Only`)しか行えません。 -::: - -## 構成 {#configuration} - -Timescale ハイパーテーブルは、そこに挿入されたデータを保存しません。代わりに、データは `_timescaledb_internal` スキーマ内の複数の対応する「チャンク」テーブルに保存されます。ハイパーテーブル上でクエリを実行することは問題ではありません。しかし、論理レプリケーション中は、ハイパーテーブルの変更を検出する代わりにチャンクテーブルで変更を検出します。Postgres ClickPipe には、チャンクテーブルの変更を親のハイパーテーブルに自動的にマッピングするロジックがありますが、追加の手順が必要です。 - -:::info -データの一度きりのロード(`Initial Load Only`)だけを行いたい場合は、ステップ2以降をスキップしてください。 -::: - -1. パイプ用の Postgres ユーザーを作成し、レプリケートしたいテーブルに対して `SELECT` 権限を付与します。 - -```sql - CREATE USER clickpipes_user PASSWORD 'clickpipes_password'; - GRANT USAGE ON SCHEMA "public" TO clickpipes_user; - -- 必要に応じて、これらの GRANT をスキーマ全体ではなく個別のテーブルだけに制限できます - -- ただし、ClickPipe に新しいテーブルを追加する際には、それらもユーザーに追加する必要があります。 - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO clickpipes_user; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO clickpipes_user; -``` - -:::note -`clickpipes_user` と `clickpipes_password` を希望のユーザー名とパスワードに置き換えてください。 -::: - -2. Postgres スーパーユーザー/管理ユーザーとして、レプリケートしたいテーブルとハイパーテーブルを持つソースインスタンスにパブリケーションを作成し、**`_timescaledb_internal` スキーマ全体も含める必要があります**。ClickPipe を作成する際には、このパブリケーションを選択する必要があります。 - -```sql --- ClickPipe に新しいテーブルを追加する際には、それらも手動でパブリケーションに追加する必要があります。 - CREATE PUBLICATION clickpipes_publication FOR TABLE <...>, TABLES IN SCHEMA _timescaledb_internal; -``` - -:::tip -`FOR ALL TABLES` のパブリケーションを作成することをお勧めしません。これにより、Postgres から ClickPipes へのトラフィックが増加し(パイプに含まれない他のテーブルの変更を送信するため)、全体の効率が低下します。 -::: - -:::info -一部の管理サービスでは、管理ユーザーにスキーマ全体のパブリケーションを作成するための必要な権限を付与していない場合があります。この場合は、プロバイダーにサポートチケットを提出してください。あるいは、このステップと次のステップをスキップし、一度きりのデータロードを行うこともできます。 -::: - -3. 前に作成したユーザーにレプリケーション権限を付与します。 - -```sql --- ユーザーにレプリケーション権限を付与します - ALTER USER clickpipes_user REPLICATION; -``` - -これらの手順を終えたら、[ClickPipe を作成する](../index.md)ことができるはずです。 - -## トラブルシューティング {#troubleshooting} - -テーブルの初回ロードがエラーで失敗することがあります: - -```sql -ERROR: transparent decompression only supports tableoid system column (SQLSTATE 42P10) -``` - -これらのテーブルでは、[圧縮](https://docs.timescale.com/api/latest/compression/decompress_chunk)や[ハイパコアカラムストア](https://docs.timescale.com/api/latest/hypercore/convert_to_rowstore)を無効にする必要があるかもしれません。 - -## ネットワークアクセスを構成する {#configure-network-access} - -Timescale インスタンスへのトラフィックを制限したい場合は、[文書化された静的 NAT IP](../../index.md#list-of-static-ips)を許可リストに追加してください。これを行うための手順はプロバイダーによって異なるため、サイドバーにリストされている場合は一覧をご覧になるか、チケットを提出してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md.hash deleted file mode 100644 index 37ddccb7f50..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/source/timescale.md.hash +++ /dev/null @@ -1 +0,0 @@ -d3e2dc57289d5633 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md deleted file mode 100644 index 2e600ff5086..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: '特定のテーブルの再同期' -description: 'Postgres ClickPipe内の特定のテーブルを再同期します' -slug: '/integrations/clickpipes/postgres/table_resync' -sidebar_label: 'テーブル再同期' ---- - - - - -# 特定のテーブルの再同期 {#resync-tables} - -パイプの特定のテーブルを再同期することが有用なシナリオがあります。いくつかのサンプルユースケースとしては、Postgresでの大規模なスキーマ変更や、ClickHouseでのデータの再モデル化が考えられます。 - -ボタンのクリックで個別のテーブルを再同期する作業は進行中ですが、このガイドでは、Postgres ClickPipeでこれを実現するための手順を共有します。 - -### 1. パイプからテーブルを削除する {#removing-table} - -これは、[テーブル削除ガイド](./removing_tables)に従って行うことができます。 - -### 2. ClickHouseでテーブルをトランケートまたは削除する {#truncate-drop-table} - -このステップは、次のステップでこのテーブルを再追加する際にデータの重複を避けるためのものです。これを行うには、ClickHouse Cloudの**SQL Console**タブに移動し、クエリを実行します。PeerDBはデフォルトでReplacingMergeTreeテーブルを作成するため、テーブルが一時的な重複が無害なほど小さい場合は、このステップをスキップできます。 - -### 3. 再度ClickPipeにテーブルを追加する {#add-table-again} - -これは、[テーブル追加ガイド](./add_table)に従って行うことができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md.hash deleted file mode 100644 index 8706c30b037..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/table_resync.md.hash +++ /dev/null @@ -1 +0,0 @@ -1fcb2a2ef7b56839 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md deleted file mode 100644 index 0e01fadc36c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: 'TOASTカラムの処理' -description: 'PostgreSQLからClickHouseへデータをレプリケートする際にTOASTカラムの処理方法を学びます。' -slug: '/integrations/clickpipes/postgres/toast' ---- - - - -When replicating data from PostgreSQL to ClickHouse, it's important to understand the limitations and special considerations for TOAST (The Oversized-Attribute Storage Technique) columns. This guide will help you identify and properly handle TOAST columns in your replication process. - -## What are TOAST columns in PostgreSQL? {#what-are-toast-columns-in-postgresql} - -TOAST (The Oversized-Attribute Storage Technique)は、PostgreSQLにおける大きなフィールド値を処理するためのメカニズムです。行が最大行サイズ(通常は2KBですが、PostgreSQLのバージョンと正確な設定に応じて異なる場合があります)を超えると、PostgreSQLは自動的に大きなフィールド値を別のTOASTテーブルに移動し、主テーブルにはポインタのみを保存します。 - -重要なのは、Change Data Capture(CDC)中に、変更されていないTOASTカラムはレプリケーションストリームに含まれないことです。これにより、適切に処理されないと不完全なデータレプリケーションが発生する可能性があります。 - -初回のロード(スナップショット)中は、TOASTカラムを含むすべてのカラム値が、そのサイズに関係なく正しくレプリケートされます。本ガイドで説明する制限は、初回のロード後の継続的なCDCプロセスに主に影響を及ぼします。 - -TOASTおよびその実装に関する詳細は、こちらで読むことができます: https://www.postgresql.org/docs/current/storage-toast.html - -## Identifying TOAST columns in a table {#identifying-toast-columns-in-a-table} - -テーブルにTOASTカラムがあるかどうかを識別するには、次のSQLクエリを使用できます。 - -```sql -SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod) as data_type -FROM pg_attribute a -JOIN pg_class c ON a.attrelid = c.oid -WHERE c.relname = 'your_table_name' - AND a.attlen = -1 - AND a.attstorage != 'p' - AND a.attnum > 0; -``` - -このクエリは、TOASTされる可能性のあるカラムの名前とデータタイプを返します。ただし、このクエリは、データタイプとストレージ属性に基づいてTOASTストレージの対象となるカラムのみを識別することに注意することが重要です。これらのカラムが実際にTOASTされたデータを含むかどうかを判断するには、これらのカラムの値がサイズを超えているかどうかを考慮する必要があります。データの実際のTOASTは、これらのカラムに格納されている具体的な内容によります。 - -## Ensuring proper handling of TOAST columns {#ensuring-proper-handling-of-toast-columns} - -レプリケーション中にTOASTカラムが正しく処理されることを保証するために、テーブルの`REPLICA IDENTITY`を`FULL`に設定する必要があります。これにより、PostgreSQLはUPDATEおよびDELETE操作のためにWALに古い行全体を含めるようになりますので、すべてのカラム値(TOASTカラムを含む)がレプリケーションに利用可能になります。 - -次のSQLコマンドを使用して、`REPLICA IDENTITY`を`FULL`に設定できます。 - -```sql -ALTER TABLE your_table_name REPLICA IDENTITY FULL; -``` - -`REPLICA IDENTITY FULL`を設定する際のパフォーマンス考慮については、[このブログ記事](https://xata.io/blog/replica-identity-full-performance)を参照してください。 - -## Replication behavior when REPLICA IDENTITY FULL is not set {#replication-behavior-when-replica-identity-full-is-not-set} - -`REPLICA IDENTITY FULL`が設定されていないTOASTカラムを持つテーブルの場合、ClickHouseへのレプリケーション中に次のような問題が発生する可能性があります。 - -1. INSERT操作の場合、すべてのカラム(TOASTカラムを含む)が正しくレプリケートされます。 - -2. UPDATE操作の場合: - - TOASTカラムが変更されていない場合、その値はClickHouseでNULLまたは空として表示されます。 - - TOASTカラムが変更された場合、正しくレプリケートされます。 - -3. DELETE操作の場合、TOASTカラムの値はClickHouseでNULLまたは空として表示されます。 - -これらの動作は、PostgreSQLのソースとClickHouseのデスティネーション間でデータの不整合を引き起こす可能性があります。したがって、TOASTカラムを持つテーブルに対して`REPLICA IDENTITY FULL`を設定することが、正確で完全なデータレプリケーションを保障するために重要です。 - -## Conclusion {#conclusion} - -TOASTカラムを適切に処理することは、PostgreSQLからClickHouseへのレプリケーション時にデータの整合性を維持するために不可欠です。TOASTカラムを識別し、適切な`REPLICA IDENTITY`を設定することで、データが正確かつ完全にレプリケートされることを確認できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md.hash deleted file mode 100644 index a3216cb8635..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/postgres/toast.md.hash +++ /dev/null @@ -1 +0,0 @@ -484eb80b9e8480ca diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md deleted file mode 100644 index 21050e14615..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -slug: '/integrations/clickpipes/secure-kinesis' -sidebar_label: 'Kinesis Role-Based Access' -title: 'Kinesis Role-Based Access' -description: 'This article demonstrates how ClickPipes customers can leverage role-based - access to authenticate with Amazon Kinesis and access their data streams securely.' ---- - -import secure_kinesis from '@site/static/images/integrations/data-ingestion/clickpipes/securekinesis.jpg'; -import secures3_arn from '@site/static/images/cloud/security/secures3_arn.png'; -import Image from '@theme/IdealImage'; - -この文書では、ClickPipes の顧客が役割ベースのアクセスを利用して Amazon Kinesis に認証し、安全にデータストリームにアクセスできる方法を示します。 - -## はじめに {#introduction} - -安全な Kinesis アクセスの設定に dive する前に、そのメカニズムを理解することが重要です。ここでは、ClickPipes が顧客の AWS アカウント内で役割を引き受けて Amazon Kinesis ストリームにアクセスする方法の概要を示します。 - - - -このアプローチを使用することで、顧客はそれぞれのストリームのアクセスポリシーを個別に変更することなく(引き受けた役割の IAM ポリシーで)単一の場所で Kinesis データストリームへのすべてのアクセスを管理できます。 - -## セットアップ {#setup} - -### ClickHouse サービス IAM ロール Arn の取得 {#obtaining-the-clickhouse-service-iam-role-arn} - -1 - ClickHouse クラウドアカウントにログインします。 - -2 - 統合を作成したい ClickHouse サービスを選択します。 - -3 - **設定** タブを選択します。 - -4 - ページの下部にある **ネットワークセキュリティ情報** セクションにスクロールします。 - -5 - 以下のようにサービスに属する **サービスロール ID (IAM)** の値をコピーします。 - - - -### IAM の役割を引き受ける設定 {#setting-up-iam-assume-role} - -#### IAM ロールを手動で作成します。 {#manually-create-iam-role} - -1 - IAM ロールを作成および管理する権限を持つ IAM ユーザーを使用して、ウェブブラウザで AWS アカウントにログインします。 - -2 - IAM サービスコンソールに移動します。 - -3 - 次の IAM およびトラストポリシーで新しい IAM ロールを作成します。これが機能するには IAM ロールの名前は **必ず `ClickHouseAccessRole-` で始まる必要があります**。 - -トラストポリシー(ここで `{ClickHouse_IAM_ARN}` をあなたの ClickHouse インスタンスに属する IAM ロール ARN に置き換えてください): - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "{ClickHouse_IAM_ARN}" - }, - "Action": "sts:AssumeRole" - } - ] -} -``` - -IAM ポリシー(ここで `{STREAM_NAME}` をあなたの Kinesis ストリーム名に置き換えてください): - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "kinesis:DescribeStream", - "kinesis:GetShardIterator", - "kinesis:GetRecords", - "kinesis:ListShards", - "kinesis:SubscribeToShard", - "kinesis:DescribeStreamConsumer", - "kinesis:RegisterStreamConsumer", - "kinesis:DeregisterStreamConsumer", - "kinesis:ListStreamConsumers" - ], - "Resource": [ - "arn:aws:kinesis:region:account-id:stream/{STREAM_NAME}" - ], - "Effect": "Allow" - }, - { - "Action": [ - "kinesis:SubscribeToShard", - "kinesis:DescribeStreamConsumer" - ], - "Resource": [ - "arn:aws:kinesis:region:account-id:stream/{STREAM_NAME}/*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "kinesis:ListStreams" - ], - "Resource": "*", - "Effect": "Allow" - } - ] -} -``` - -4 - 作成後に新しい **IAM ロール ARN** をコピーします。これが Kinesis ストリームにアクセスするために必要です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md.hash deleted file mode 100644 index 60dbf49171c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/clickpipes/secure-kinesis.md.hash +++ /dev/null @@ -1 +0,0 @@ -30709c60779cbe0c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/_category_.yml deleted file mode 100644 index f8179149dd5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -position: 5 -label: 'Data Formats' -collapsible: true -collapsed: true -link: - type: doc - id: intro diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md deleted file mode 100644 index 48eb681a232..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -sidebar_label: 'Avro、Arrow および ORC' -sidebar_position: 5 -slug: '/integrations/data-formats/arrow-avro-orc' -title: 'ClickHouse で Avro、Arrow、および ORC データを操作する' -description: 'Avro、Arrow、および ORC データの ClickHouse での操作方法を説明するページ' ---- - - - - -# ClickHouseにおけるAvro、Arrow、およびORCデータの操作 - -Apacheは、人気のある [Avro](https://avro.apache.org/)、 [Arrow](https://arrow.apache.org/)、および [Orc](https://orc.apache.org/) を含む分析環境で積極的に使用される複数のデータ形式をリリースしました。ClickHouseは、これらの形式を使用してデータのインポートとエクスポートをサポートしています。 - -## Avro形式でのインポートとエクスポート {#importing-and-exporting-in-avro-format} - -ClickHouseは、Hadoopシステムで広く使用されている [Apache Avro](https://avro.apache.org/) データファイルの読み書きをサポートしています。 - -[avroファイル](assets/data.avro)からインポートするには、`INSERT`ステートメントで [Avro](/interfaces/formats.md/#data-format-avro) 形式を使用します: - -```sql -INSERT INTO sometable -FROM INFILE 'data.avro' -FORMAT Avro -``` - -[ファイル()](/sql-reference/functions/files.md/#file) 関数を使用することで、実際にデータをインポートする前にAvroファイルを探索することもできます: - -```sql -SELECT path, hits -FROM file('data.avro', Avro) -ORDER BY hits DESC -LIMIT 5; -``` -```response -┌─path────────────┬──hits─┐ -│ Amy_Poehler │ 62732 │ -│ Adam_Goldberg │ 42338 │ -│ Aaron_Spelling │ 25128 │ -│ Absence_seizure │ 18152 │ -│ Ammon_Bundy │ 11890 │ -└─────────────────┴───────┘ -``` - -Avroファイルにエクスポートするには: - -```sql -SELECT * FROM sometable -INTO OUTFILE 'export.avro' -FORMAT Avro; -``` - -### AvroとClickHouseデータ型 {#avro-and-clickhouse-data-types} - -Avroファイルのインポートまたはエクスポート時には [データ型マッチング](/interfaces/formats/Avro#data-types-matching) を考慮してください。Avroファイルからデータを読み込む際には明示的な型キャストを使用して変換してください: - -```sql -SELECT - date, - toDate(date) -FROM file('data.avro', Avro) -LIMIT 3; -``` -```response -┌──date─┬─toDate(date)─┐ -│ 16556 │ 2015-05-01 │ -│ 16556 │ 2015-05-01 │ -│ 16556 │ 2015-05-01 │ -└───────┴──────────────┘ -``` - -### Kafka内のAvroメッセージ {#avro-messages-in-kafka} - -KafkaメッセージがAvro形式を使用する場合、ClickHouseは [AvroConfluent](/interfaces/formats.md/#data-format-avro-confluent) 形式と [Kafka](/engines/table-engines/integrations/kafka.md) エンジンを使用してそのようなストリームを読み取ることができます: - -```sql -CREATE TABLE some_topic_stream -( - field1 UInt32, - field2 String -) -ENGINE = Kafka() SETTINGS -kafka_broker_list = 'localhost', -kafka_topic_list = 'some_topic', -kafka_group_name = 'some_group', -kafka_format = 'AvroConfluent'; -``` - -## Arrow形式での作業 {#working-with-arrow-format} - -もう一つの列指向形式は [Apache Arrow](https://arrow.apache.org/) で、ClickHouseではインポートおよびエクスポートをサポートしています。[Arrowファイル](assets/data.arrow)からデータをインポートするには、[Arrow](/interfaces/formats.md/#data-format-arrow) 形式を使用します: - -```sql -INSERT INTO sometable -FROM INFILE 'data.arrow' -FORMAT Arrow -``` - -Arrowファイルへのエクスポートも同様に機能します: - -```sql -SELECT * FROM sometable -INTO OUTFILE 'export.arrow' -FORMAT Arrow -``` - -また、[データ型マッチング](/interfaces/formats/Arrow#data-types-matching) を確認して、手動で変換する必要があるかどうかを確認してください。 - -### Arrowデータのストリーミング {#arrow-data-streaming} - -[ArrowStream](/interfaces/formats.md/#data-format-arrow-stream) 形式を使用してArrowストリーミング(メモリ内プロセッシングに使用される)で作業することができます。ClickHouseはArrowストリームの読み書きが可能です。 - -ClickHouseがどのようにArrowデータをストリーミングできるかを示すために、以下のpythonスクリプトに出力をパイプします(これはArrowストリーミング形式の入力ストリームを読み取り、結果をPandasテーブルとして出力します): - -```python -import sys, pyarrow as pa - -with pa.ipc.open_stream(sys.stdin.buffer) as reader: - print(reader.read_pandas()) -``` - -次に、ClickHouseからデータをストリーミングし、その出力をスクリプトにパイプします: - -```bash -clickhouse-client -q "SELECT path, hits FROM some_data LIMIT 3 FORMAT ArrowStream" | python3 arrow.py -``` -```response - path hits -0 b'Akiba_Hebrew_Academy' 241 -1 b'Aegithina_tiphia' 34 -2 b'1971-72_Utah_Stars_season' 1 -``` - -ClickHouseも同じArrowStream形式を使用してArrowストリームを読み取ることができます: - -```sql -arrow-stream | clickhouse-client -q "INSERT INTO sometable FORMAT ArrowStream" -``` - -`arrow-stream`をArrowストリーミングデータの可能なソースとして使用しました。 - -## ORCデータのインポートとエクスポート {#importing-and-exporting-orc-data} - -[Apache ORC](https://orc.apache.org/) 形式は、通常はHadoop向けに使用される列指向ストレージ形式です。ClickHouseは、[ORC形式](/interfaces/formats.md/#data-format-orc)を使用して [Orcデータ](assets/data.orc)のインポートとエクスポートをサポートしています: - -```sql -SELECT * -FROM sometable -INTO OUTFILE 'data.orc' -FORMAT ORC; - -INSERT INTO sometable -FROM INFILE 'data.orc' -FORMAT ORC; -``` - -また、エクスポートとインポートを調整するために、[データ型マッチング](/interfaces/formats/ORC)と[追加設定](/interfaces/formats/Parquet#format-settings)を確認してください。 - -## さらなる情報 {#further-reading} - -ClickHouseは、さまざまなシナリオやプラットフォームをカバーするために、テキストとバイナリの多くの形式をサポートしています。以下の記事で、さらに多くの形式とそれらとの作業方法を探検してください: - -- [CSVとTSV形式](csv-tsv.md) -- [JSON形式](/integrations/data-ingestion/data-formats/json/intro.md) -- [正規表現とテンプレート](templates-regex.md) -- [ネイティブおよびバイナリ形式](binary.md) -- [SQL形式](sql.md) - -また、[clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)を確認してください。これは、ClickHouseサーバーを必要とせずにローカル/リモートファイルで作業するためのポータブルなフル機能ツールです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md.hash deleted file mode 100644 index 17dca17bf07..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/arrow-avro-orc.md.hash +++ /dev/null @@ -1 +0,0 @@ -4d927a358f11b24e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/arrays.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/arrays.json deleted file mode 100644 index 8f17589ef18..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/arrays.json +++ /dev/null @@ -1,3 +0,0 @@ -["Akiba_Hebrew_Academy", "2017-08-01", 241], -["Aegithina_tiphia", "2018-02-01", 34], -["1971-72_Utah_Stars_season", "2016-10-01", 1] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/capnp.bin b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/capnp.bin deleted file mode 100644 index b9e4254d24591447e921bc0eba53bf315a0b3158..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 55432 zcmaLAe~cVSmfzRgr0iYp?e1xI&v!fewL5z|dppOv>+0r@Zl0}U_Aj!VKewC1*`c6I zXH{iaWmk1&PgWIssv+2L8?bz|WyzHF4+R)F0+y_~6%3Dl*|h#x(PtZ;kzwB$HtYco zFh-Dp8w0j5e+cmSZ20qyjL3S~h#b8Fup`UdzVQcr!23(U^xm{4xJ6ykA?JoBJhxe~DNSqd``r$4MOR9VY`1 z_b>Wy!T%tdoBPxJ7X0VLsNYK_MbfyL^he1-W388s$4NIIcH^7bFf9>nIlr?$H+R?L zKjyo+a(OAfos6@5m<-~J3kxNR`?Tep2k#~il319HX#2e{g zkdDXk?zl)Yk7LvS3#1?F<=SCCDUx9p?_>{C{Z_E`*e3li^GxZhy`z2$gjUiyAK&QZ z2kCUkLmzDW{dMyBG0%nf5i#l%S&|Ind%ZO8=qh^{2*u|252)uKfTui9h|pCsnCvILc&(RbWsBN+{k7Fu`JMycT)DW^xOgGn zz10{c#eNcx(qxpEjcW5zdw$dBqkfLELiFBF<6Vf52LucM2h=O{^FHq`G%m%RNq$&l zqXP$g3jS}<&%ebp)x)Jf>J8Fqyn8q)2AS_;o4)84Q8toKCq?Rz_^|nGM00cVJQJSEb)j*TOykjEn)K6R!~=qb zSO5Pto(aB1Tpdi(_ohjjje6XmjIT|KNqmijAimwCm$l+fI!fDxV;90KgQeTIXwT3tpYb~C7Fl~TP%}cgML!*9 z@Ft4#Fl&c~wfHK>e?kXQ1jpZ~&m{;dAGeahAikLJ^!7MSDxRQpY`MipR0c(rJL(Sd(I|<>$#9zRTfx$m+AoZgk9l`_sp!D#hr`iy z@DSdR#9PBX2)ziE2Iia+A*lS~bAQJDJvaB9OkMdkBpLBTnqZhQcO=73{3i6WmJD+k z;2;^~2Q|VeJHXaM^mf&!d%}0L$9N&dn`wTS6dj(_+uzQsk9fD4FhAzZt)uht9oo+! z_z#F^x!UWP7$rqMj5jC!NsoE6L^$1Ep}wCAemI;Ak|G{t-9)qgNTkPH4%K?> zdO1aZ-vdwiJtfj!7veW}<2#2?SILQZS_VsBYUi^&7TyzL6t5ySFnn+2lM(Y@5(Xdd z1gD_>QGUWv%*(Fay{l|TH zg9-oZ`XDGB%|G98f6vW5nup%H>99R@{W$KW@!cY4`p#V6GH`EC+JltQ{8pMAGNno2 zxyhtHjH3gDgm&UMnef(wh^@ z*7qs!*AwQ|q%Vm}YZiXZMxbry-=v+DuiEKzBJH#!DR4T5pR7-&kPpHMYm32bO=9WQ z@=eieG(~*$y=u$%cj$lRq4-2_!*SMNTqUhRHafN5vHhs^%Yw&~{Dl16lpHn$D?Z4_ zNMcjk+(}bRJVo{{z&_Y|sy$V1<^LH`@?4|25U&=KVVZ%MGCvny5c2r8{F+Z5c>G7g zUy9!>Mu~*4{Tw0Nqqr|yew*(VSN_Gzjm2e{fY>4|rJ9wUL|GGrEImm6rh8raPxy|8 zor2MDt9>I)!%EJ=OD_0D58vW@jc&=?`FK2;71h>H^O)L8?eiYd$%m!<0A1fHvTj%v z)W@0lYZ%vloGmq&l5XasI2v}5!x^T7?2yIZ==3AQ4>q6Q18)sN5T5!oI+(ItI5U3K!rGLk#FFJ_^gKTmTcbVRY<8)G!yF`x);xnSJW%qYE&qgf> zJjvX9OUEitZJpgJG}38f76I|~tjmg`-OF=Romzb~zo;&XqNCNMKZ)BtgqGP9YU{7{ z*bh9s`ektuS?t!voBUY?TRy@6m%LH>pAg|E7faz}W7tJ-@1*g)oEfE3q5xy_Q@@6F z>@n}wdK0Y}S);7xN4`?F-88;LB+Bm!F}iju+PT3W1Y52@zdjp3pYqN#3FTU3`dX!|3YI_olmtXS zf-A=l>#BPYa@I*YZo0Uaw@^HpC7P0O!ItlYbb{RC=z5SG!(D3YBul3n?}4wDbUHc6 zhEVDr^IOHi%X}=J`1l`@k;?m&7`e`2kTYL&C^e!@iNvR^r^feHo(WHUJled=G9hWF z{WLw)(iH)=T>ru^kb}+gy*BQDhjJ?o#rKJBzWQ-C+=rKT<6LrgR6YdE1xr7o%U|KC^8J{IT-apL!FvZypIkCC|UBXTAcFC z_M_x6$vH~@Jz}-w(sI7Ja%rV;S>)k@#d{Sz$)Un?bW4-lt)kKpnlPQJ1aJfXW<<+GWWcyt;@N!EU-LW`?ly624KA4-eqp)PUmE{{ZxJ=W=W(>L zWHi4vea)M9JiNy1x>o9k$>6}`VBtl7U-R&qhgR2i;}#qdj&+doTj4Z(sJEk^9T;eV zBv*pI0IzvPg%{2V5&i~S6uVxZun>0ecKxAo`12*o+&)OnmF5akc|Y}9c~%|=tasKK z;u3@_DaYy_YuolOF7F4P8?UjX81?Dz6|(pC0FGN43K|AFD;uVbP{F!vw}Sh>>XAQto^V z373Rp@x^EUtj8Chh!FFbTahxvCRlc$+2~D2S=;opEw|`m#p8=!*C&iCB&jh|pfn{Fa6&@uqwCcDJsZv2gQ-sZStr_9~%Gvm4|oqw0a+vfK;2TR0K@0>1#ZM zd0FXW7fI1G<-Np#x(^Gl@%}9oLn_Y>F&cKJs2r*`7lLg!@s+>IGr_Cf9e=%x(Z#J2 z5o*Rxr(0vPHb1Sm!}vQPjR?ijVTNjdkabwQ^Q6xIw7xvUGv(*_nA3CRBiPa21YRrn z5NtluAOA8hgy-tTqO6tYebb$_dI|p5J$zZOMU4}*ox?6GY*aQwCZD=&Z0SLA#5s>I zdVms=G?zxDm&6P$BUKRtjTtM0D# zx9H9@cs+a^h{Yn88|6rw_>!Vf+d{!cwt1NZS1)h8w>XY`sJu-{G0ke?p8} zoyj2n5d@2!BSRlGTdvmdtR2GjTY=!($>aOkaT}r1Pg$PQs^@vX_&uULdtufN8@uklRz9TTxxzA|HF>>^TQ3gx*(qD)1`4O)xIe)8@4>Vw4z^s=?m9h{R zo3Hc@L7)3v_}FNQl%;c8Oh&@rL(=hUXp1NQ8}Ob9@9>aC&IsG@?xY1%J%E*^zMf}! zub_3G_&vkS)$wH<|3;pmh`L6Y{bT6j6u~$v#3W?1-lM#HAmp<5Xi|zydG4G?+ z$j{sH`hD6=!S$Gplj;TObp z^!Sn~S>dH&EZFj?-_CjX6TYJkGsY0P11sgwTWT!4%1&KH`1sYznu@NU+ZgY=X+haw zLp&tdepWrc$1}lyPK;WUc)gdjviRD71uqhsGit`W?@W?lDYp4p{;PhtksqUe?q&JZ zXT~!-J_SF|Gv)U_5jtipYz-e`>6c~``K~zRxq>aH=;`0!neaX%t}~E}EPjySA5!um z>43-iA^#roUcr`A^VR>rGvTXsBZgmr8_JNGE(}}?Z}qV|f|sQ^>(>-VwqD|^JrY;? zkBR8*7EnL8TGKc>g6KmYysFhp{2{c5<;OHZE&uT+dPgq+q&>;$DXs;|HVfwA~1xA>g$75!sBOU}oa zF$t_sn7~HPezuxAnVr`z*m7%LJ;PJsKPNgFB|dOUTwIFi6vq!>_ zD|+?8tZPog|FQWpAN4yc&JLa_yUYpIlEs$mr_s#c;|cJXa&WEaPYw@J?(JnoKjEPd zHXqFkVct36J?$a89u`O`QOe%8`J9js#d7t34rCrL6_)WZ(7g&+*VMa%MK}M`J7{-^u3_RP8C$yu|S9&`{bmT?0htiF|L$K{3 zf2`jFSnyAYl1Fmpi#ur4hrKkLt=8(}#M<*p{)!tLmd{%ZtU)%#EXcC5`cdX=$C>*3 zhdfrkZk%P(jnRj?gVt!U$CwF62JVa-mkF?e&F&}I`8XsyGmDlCd z!A%9r;az%^C7#2u?V@=I6bG+yv5E$(AIp@uhdQ0g9wpU;Z}ZXo^Vi9w$_MFSGV1jc z@yp~z{_x?|5?1hzKios{g^NqPz0h2X7MeV+f~6Pn)zIIfw+$4dQxq;}O~&ZSPg!ou zBy9bJ|63GX`KsSHL?c<7X(5rD!fcVmJ6k{T<(yW`J(faLp}*=9w+bdiZsjh!t3tuk{+Ogy*;nru;tKrnD=l>clp9aS)&hgd_4k5 z+VM;7I}e2~6NXP_`yos=T6%gTLNJdLAY^LQ>l#y3o8 z2dPmGo_CSRfWAK;SAyfPyB$*DY-bd+zp-kz##3QUs=$uJU{O~g#sQmBo zfvIEJ>8hRgWqX$FTF%;l6*FroR=bE&<2XSAd5Gh=Pp;O#O80k3PwA?E-8yeIV>EQA z-gSo1B)G5E`3O;`fov|2iZrzUJ-RaQTowJcLKFUy$py@9~tGZ*)SrvvLdeC}D{i$+XAx3=+ zC!Tcdd?fwiZ4dX6;D#CEkDBu+qtNTo0~X+z{;a~#jyu&i$bI|ZZ^FeH8fz$Zy8Lv( z!b|@d#zl*_(XfqT>^8oeGliXnS3SN3oa%8xyx6?l^p-4KflFp<;k7Q@0$A`WkCUfw zwXaQBxbxEmTmKWvqy7{8r$py(BK|jw?;^5cM1U-DV}Qsidbatge827SkNL*Fv!n?G z^+M7~ctEi5Ke>8pUM+e!TH;TSSP~S}wtV6TVO*S`_CPols1nwCN55=TdI< z{Dl8H&s0CP_ci!CDbyrOOTXeHw>wd_ft*-&osy}wk6qWe@y3h_&X}qu`mJEgEBgEIc&2=x63cb! zA=4_}ju_ifBJi{fwjb0kS3nY;=xP-`&=hmmTC%r?0@Wc{c+r(o3h>YsUK*P?sQ32M zLFuky;Wghc`Sexp=E@547Jl6F>Ts#o*CAT(U-s#Lz&GQ68Agq8L;KTi{kV-`p;Cpo z6fB)sJHh7;AHDtBq`>9gyQ{N$#mV#`iG;SdY22 z{A0n(YRCQxY|j`}4)Ylot@oGYm@^*F>#>#_+NyCMEtu@xc=)AqQ7{@+TTZn@DCa)u ztg&iiJ{sitO!l+%aRU4~pWi1cC+?$6vrH+7ywyeqoDr56d0PgXpVlA0&tv7O{GxXz z+I6(s%V!N^;WgfZTq^oqS;ilEb23c&c#mW}AlP@}n%>pV_tmsqr`Qk)QYYZk%|h>l;7biW;k1H{+X=chiFw+{F0=8FpH$r~3U@eSWHERQf|=pu|yN7Zcu0 z%~r!xd;TjHuNvR07;)1fUbbCcyI}i4E$ z;G;cc>tT-kfvu&KK}M5qcQzP+vH58{s9%-ej_?QY{`2vz+x#7Zh5zvC%r5W%0Q86P zoncbrtkymxc|~a}YQ8`YZZWwOc`!w0N4=0^SqhwJ;ZNCdUH=j7HRIc~u5)>g_0g z9Prd{(WuLecwVEGoj7Il+kF1;>TDhC%2&}xRIt90y?xa85|hPKJA{0mlaIWS8W)yv z^1@t5Lyl4{rW4-O%V+ILDxbE`+`6@f`T2fQ_*I_mXW7Tj0H?f;iKSmSn^fLPreT+g zEtlH+D<0mBlL3CKauH=BO9$No<7>fd7i>Oi?|Ghve6FW^*vMtH>fp~>!k^CPRS*A^ z?+eX^6=v$2tP=VdN@P&?@UuKEiCMZ=|EivXe}j(*1Fd#^UQhNjee5UuEXI#mQu3z!odN2CEw|*tuklpr zd`67uW^E!BCFeN0Xm7!>FH{|UKsQPKT3zWW2u^e#=;WE|IU42dtXu(U6Uq)YRi;P$nsC`XR<>APq)zm^ zBAn~D=T4{T;tuJl+#;MCSvp|peaI%=+>=F(k4gRu_6gBza|!?bQ;I0tKjND|bag28 zxaB@U8Wrmj%=2C*bH&Tu$!dJw(yycY$uY|iq=19$FnNgm8^=SkalzJ4V~|yQoo)Jp4|*3jeoKAXZcl3-&sQv++41{1={q~!jVUX* z`0k{@6%3Qouv^kejs`FEYAf0gvhvt{9;6CRxXk$5+3?s>nnXL!jOg?z zXphlVkE!g9p)`^;FR(g4k=*%BkN+v(tY`2cERitr!o$wvM1n@vxYF8sT6tP+(OkTU zLYAh%k1>o%ORwU`DyyRG4)hP}c)!^&Os2H6A6;*=I|#@!*nCB=K@Yso>(vBP=G`ef z-?Prk;I*z1zVh8BMlr$#8dv_Tf=%D@jR!zP41KuN(CT$RH+ecyKsnyb* z{N;WCoXV|nule)~)U35}Ed94|6kEioDDoq=urx)GKG^mVfBokaRPd6Yw0=U=b{6Oj zwlqmM2EU=(o#rJ=8fo`G3GC{%FVAjpifwH&Ft2<{2+472?yh9pFN0f|>tuNbC zKS7PlZv_i4J(6G*kBJvoE{LCQ70z#-2L#)$>ObKNevuD5Z_${y*bv-?gAbg)M(%eE zC=r6C1M#1~$up(%nCR9A_&?&$i(MS2P@aWg@$A0JX8>SuhR>1Tq}XF_WF6m4f}X+R zsh-M5`Ke!)8V4vx^LM9c-?0VO#+mq{g63(-f9Kra<$8K?X?gKw>uJILNe?GFX_GF7aF+e_{=RI(R%0%c&g8* z!XNionII9|N{){aFXgFzXMnCdn<>U@|A>$LJ&*rL_}-2V3mU@Lta_nVZsq%Lld1BR zeu2_5&^Cq)acv&Oxeo%7!Zpc;M)DOrF<~J% z6Di?NBd5@*WEG{MV8@O0B`TlNQ=>%(D1_w-a+vs2VCwMVzZzeHe;|12pYP$oIb*Xm zK6nEBucHtXyrUZgE$vg)Q8n`2z+;8;V@fv;_+rfk)0R{78daZ_lXMtq97i$J*V_J3 zJKyy1&-fO9FD8d$=7p#|)_g{fE%Sn4^U*Z zCOe?nz=;0Nb0M3L+VPyv=a_Hou9q1}2+#7H-~qwHYuuOg%L@!|_Reb0u`x~Ae6%kv*fUPRLvC%tCen63(~Mhq)g$O%1>d~D zc!KoUCHYc+D%C^ys>fp@G%vW)+IM~C`@ySP{Us-Ui)W$!rJTav4dd)($C<|YD;{3_ z4tZ@s+h)+imPa7GWT&*Pm)38goT78=1DE4H)TteQ_+Z;x{PAM1EZ-yEN2q5~{PBhy zB{S>M67j6Z*Lc>v75eFl@DAeT3(XS1rmt}-Jk{T&k6*UTx~y68ezM%MKF_JW=RKb2 zfF@2yjqUVEhi$P=#xfO_yS4dIa&s65F28|}ebMd)=V)t2d%d52N;?!3QsrzBqhg=q z8&HLrBHDJA92n{)zT)PyJ}!g&9fEBKrGFm;rLS?#ArzdN(aM-PZziZ)i!MS+R$|(G zG#&;Ns45rx5HKO4m7hrn7GC38{VsTow@oc7!MIQRg|bdaZ%Cd1f zW(V3T$1AW$hF4Xv`DwlOV>(Lt9TOwY%}I{-*qNKPrXEY}q#(M{I!LgJj&9cDODh-k zw%#8ap8-zwfxj2*0A$EHQ1@Zm*XkLc2!7Cp&E}kLfP&w7#J(I)bSD0;yq*&!k6`e} zuW)dPCJZGQ54_8PZMP4v&glPh@S*{V5y=pvWG`iB0A6}HbYc(HQ9!=Tw%sgWqN#84dZ8_~a*U{4eJ=bBDGTxcH?q;YwvjYSd zlB&Y#a-QLh@_UcCHbp^-ORY2a-mUN4vv^iMc`7{Zm-6z-o7%E4?DMn?7Ek@S$7AL9 zj3~nqZs{FP%i{xi(CmA+c#^~a+}RS((Hq{844Ab9y36C{@`|6MN%94GR(UCYMC80Y ztPqVIgdO!mj0@8XjEZbr2;-;0FX68eqt1T9fA{_??o;o+G$(l=wBrfD(O#D}ph+On zJyeBp)H^`QDuRdCVKtz_I0#Cu$r&Yi8^>*< zWCq5TTlOa5DyrOz%S%fyTW5*hUiWavf@KvqMOMx`^3dV|!S;jtJ?L$o@@|zO!R&=X z6!%BAgC&C*Y1{fpF8e0%D$l1x>@Sy4t-$Y5I^ffR>C-v6*%M7{zQRvH7XD*mRG#R_ zdf*^Eq9aqDg<$1)@$-3J2v2$on5X2`3e3UFS9z8Ov)~BHIp<7eaZ&$DsdZ42Oe+%}rWO<}jq0aQ-5`Jy%9_L47bt zGvTcDQV1Gi(_Tw6i2`HWQG7eBXWckzI|Br&N>-Cnsw8Ee`n3F0c7%_qi0a=WYCYTL z*bVEbYT-qHfleL>Uh~Orl21HM@u-%*_1TVUc*f)To zkt$3EVJn60Kb3#pmtXy7)UR=+4% z`jfut=Xj>_JR@q~pib>9Ej3zlNdo5u!Q!bN{|89IdqUiB{K-l6(u!=^8P4{*^!Pua zz``rXKl>T(P`c8}1`EP2h@<;#Jv`eRXveMkF(D15ZJ#{wd$2f3eZL6mRrpNP$m=!Oc6HkClU5J_8vdIm-}} zYT>;eExhKjKkM^RzpS#gJg-)wJRsO|iT=LIGu1=oiUzosVNEQ}>N?wGwmz_UmR~!1 z$2}@aItOyzD;S8@8_bqZ{rQ)Del6uE^;d$t&`J1J2Ahxg`giBc@@agO`^#CyA^pj- z+xCy8YpW;vOn8UX#0eVrnF};79g)AyIc(Kh%jTqQ5Dj?f zgT)hlzs~c37j@CWA{tLwM0BL*o>jBf&l=BEv!bV!%gamJj>Nv4M$o0%b`(GT5t%3- z(QCAM6QMCHX1j>%a+BoK*BKCkzasb+0)}UNmam*pjyHHOxL5gb{;6w4zK3S+eB4B| zTh+;q+Mws30_cNnfAQPj=DE`OoVdpH-9UeEgk*|^eW)RvRl7FJ1Z+7p9wqt;U+b#U zKZ283(0{`(?&Sq!gAe)4k}z}Vw&j?oo&JJP=M%o8)|5@o&hs2!Etx)96?EbUX=|oB zw*9H~`L{?=<#;6g;J1!ejOE!OkE&qtv`!E6!E+$w_rtcQJl?wR7BFNz>3Z(~TV6Y^ zv_5OD;F5L^Mprg~Mhap!CEm&QUgq~=^>NS0H<*S-y`)3qL&WS`P0J$N`B~!(@^JM$ zCf}&EUuox$-}A0wi{rxFX*Sk2<-!q}EI2vXdWfFy`t&sZ;CB}wot{n%d--Y0b-tv2 z3*+HE@U>plidsu!*KM7=GFaPom%R0g&-eW@Uk=`spDDY}8N9Pox=Z~dAk?|O@9>Fh z_fnbN@D?r|@S!7YQK98JVZtiN?ODr8&yG{+tv+;l_<1Po!!mxLYIg;dLv;TS*#Jio z%5lnhk)7PVuC4zxz5;!H%4^E=5JzjaXx&QHKWQT!H*VnriAJ@=u9r)EILJdS09Mm> zf64)=BjX2P$D^Hx-k@-9KXaivC5Go(&Nl#r#k(d(2ikV6CGfZL5aZGvXeKPZNv;)#F)ouRLW4sT(%IoOvfMcz6NRW3< zQ+Y$McxsnGM-C5lCKf~7k?F44(!rrxJn`}CJgxF$^+sReiYDVowZ+rnS-vU$6~^T! zylq~-xa5opvRNQhA=Qtq53~}n?PceWb7an{!<~<+jhovn!D(}UmhcP9`C5NTpZn%n zQ~n`V^DzrGbUiwaT=|Az>n%DJU8&wri7X(r5g&$`6qwd}i8-Lt;>ix6ZWG>PVkzfc zldh>$G4U(}OSifoB#gfk0Fc9_KaEG6O-WBV6bmnUQXYz;+f`j=!RFLb?c%^|bS}F_ z%fs*U?dTj2KV&*j<@NTL9wek+((5Ih*qOB4fuq{{9A8+msu1}RdkchZ4~<`qL#5@~ z!`r!6Q|v0W(NZ60;zxhi<2~Vft(YQzLbObN8|dd|yWZ;Zq2;f1;heNq;q)L|F4>>Y z`}D=X*k3BooD}sAyz6>CI@I<|UJxu@tA7!vU41_0J8Bm?i>Hlye#{o5gI30YJWN)> zCsp`1U+bUsT=?vB%-mUDoOQ`Ybl5w}-Arh;{u4i+=b743?b&QzWI(Zw7nSf^!P1B5 zNwA7LMEGDIW!`a_G7kv0UYg$}M+yD|qV!hM-fG8!j$o9FRKB<8IT8Q_o1f+biQ&R` z^o1@|gJp`;C0GRuFTG!Af7Pd4uOSz6O22+9*mhHYg?)REc?UnZ#01SDLijA0y*|RT z`Dh&@bBfA!LUeMkvSHgMu0}km)4kTi{~y4De~l0PrkBcdBHXEaTmazI(K-?z0q}@Z zYO3K=YfsVdS<+BCpA$FOU&T^GGG~k~tSAq8S!J^Yr<8U9AXxrq$Dfl^kT38o-$|yh z5+)ipcSv8Iv8#s1Z0yA3QKnLFH}T<64$(P&>KvHQ++HQIlZ74OQ}f~1d8T^0^-UX>kr6&6sBFR!g&Xg(_mI&JLDcpYTl)L(D(fr?pmaV{ zTI%21l90n$<-=GqKfYZr-9IWm6XZxoe+SHN16Jb)vyo`?Q$MfsK>3N@qxgP`Lgp@~ zNTJ8$+yh+CWR>Mr6)Zi9p9)v;DKVmgTm*#>pb>L3OT!^zytDMEaT$BOmcv7jdJ86; z&9buPRK5O`hZh|+m+@l1ELOzUTy zS84gj`@-{l`=0dRIP8ygY8lTJY<{Afz#kv;j(JS`?@@6K2C&MJWgxbm;yxu8-e|=nPQh!w3x0T_`&FD97$xoiA%41G%dh$CDHTxp9}uw>q2 z(|SJEuKFJDR&QUgqy@-l*zM=lgkb6Wh4XK!`;rhhU~B^z9d&c&@*vxsb-Al-<-I!d z{8l+viReR;kw2W*_OImKD<01EFQ*~0Fsu2u+ji5q3H5&pUQo}-@ktI)%a<2q;t|!p;y>yy!Aox>`KQ4Lm#u`$f66m^ znAQ-7c5$Slx9aTd=JB5My{HS} zbV&$IX&tfK&Y-rw8aH8Gx(}Y*saWoB4)SS-pDtK<>GkwmoK+@^HX>*!IzU z_d3r6U+{q*Q0$ovnBq@x9I4c3FPymDj(l8jrC6)EfZKYiykTAvU*X(9CQw~qB9(0Y z-Y{Ekwd*(j*RmWZ;8ps)yQ3*5w(+D+HyW?DUDj6DRJ37sn06g7AGTcLBdVjK#=|Pa ztQx9%Si&US@U#qee5*b`M~4gVBcf{xR+vN9){bH@!JHPLZM}rAatL2E?d%EMr!hH@ z+{puil@F!22z+017yFXjfjWEY6~xPG|I{w-nJl&0MpbDhY`z*N^Q5hOOFBrp53{&1 z*D=<{q4WrU={H{}r?!<>st?c7QTtbfA?WP)6KQM;I@@uje}E$|g;hQ1A- zR+QB`1)c-NStO&n;v&t=C+WUUmq+E8l|M&AkFRPrBUIb28pr>Wr_*QPx$^)}s2oG3 z$IwmAIy)Q~Y{$R&Qm|uelNtJ}g-`*fNa2_shAw@};@lYPzo^`jzt8z{yLMc;h}{_7{;tbgbTfgbPT7Qc^ z=I6`yRXP3P{;OQhqbyvo?W^&j-xSgDeXelOG6agp`-C!bgiggkY(Hu|3rErA)5`Ef z;ux@_iXGS1ml{_T%fXA^Xx~FS#cIxZpgrrh%)1b5IVGRjayBpE=fa9MG3PG43{U;Q zwCeJG%D41#9AR}=zQsj%TDw`+_Mh|@K|WXi!LORgOQ^D?f~419V&J#!qyGCs1aap8|q^73WT72W&CmWQ>{_siJRNPR+vsU*+T;9nNBBQJBaTx2}M;c#>08H^oQ9 z$oVh-$Q^BC@7Uv2tz9g?c+NZ2>Ubh{avdq=4ys3i6EtV%giH{>w)s6H)5xC-mL78W zx@>Rk-jfHW4#^jvqmAJtSe(o?3=5aa^lW>Jel#9b5B2NP($b}+6?Xc7UP9ODP4i!v z-yOY`=W=ikEm~`jan>WM+|rLJZN)3ZXwcZ4csN@QjiZomOK`(X%Ojo0!LKsd_7|Om zbf59MW-dkXhKPx^zQnWRU3h9&;eJkBT81s{q+NHMq#R0N-KX#3wB=NN0$nS4Cl4V2 zxC4s#tzgTkd2MmN%un09Ux+H6HZE7{_C2(})Y--0}S z3;^R1wca)-^c_`lgUv^LU+!X6K5e(}dXOHDb0+)4BxJ(3%}0Fg2Odvt3O!%vs0Rj2 zZyInczZbo{?%{XH4Eg&a)2Hr#>S5RJ>R#eq&SrDk6mv38#jL=!<<|Q4fA;CA{UbJm z>CTsX%;ENICtlU?G`_ydGm7B!CF>}taiG)5s~p$H7Af%!-YMAn{^aW19AdS@JK>vq z1>HvfYwpPMa?NVXC;k@fiSGf={VLk+%k8=>-0kK+-r0Ptf2;a9LV+`goAJ&j4d{b~ z7ajiNGiABd-)sKx3_Rs43Dd&MK56OW(j|?1(a7w+Gz+iw$KNJhwU;Xod{jaVyXt*u z>gAWdhT=GQ@lp9xXdfC@o{E9l^wm!HJiNx;8nW0?wW8ty!HysGU+4#_g5F!^zEof} zIJKktL**9SViUQ3wQNJXPE&iRJc4_TkBc-%DeyCSAc%C`dY3VMW1`DCSTJ^hEQ9TD z@zaow=n?;oY}kXpq2+Yk)DW_*kL~X(;6)g3Tiy_E+e!M5`@B~VN@Q+FIh3& znN(X*9lV`yH2&v#D)>){moMtX<$LTBJCw;5#XQQ&Qzzot{3H+m?m1I`(ILAVts=|z zhv+cyS@lQMouEF(U!DV9Ylp1x&gP?akMzna=ZWxee&Q@~9q7!-m?dYh{ipTg84nMt z=D|KnYW}LzqvWf<$TOuYIOYokopMN6dQ^IUH^2exn(011L5$G)-0n`M-eihGF+KpJ zUJvEh^6BjJ4XfRkI7GH3FwbmziC)5ZarCQe%`vi$;(d-q!-XL*#Z!2qn^6Dvco$(= zb@u{}avl5wM;%m*!qS8KNA0Ng_*{61Y`T|6b_G-+GG%M$CA~F8iU)vT`$_WJF3*I2 zO!R&s4AiaaOhucY+T(8kEclOzs}oM7Sj8(-w_dflk(VzHx12c>0b}u{-<;=Jz>jc2 z#S)DNDmU$*5nHDtuzr|TXT7{yp9g&POEd;e12RkaRR-H1nwOQA;xi(CP^{iL9hA)q zTB5Y449v8|v-7Oz=W8C%>q(nz4%^TAZPe%6~$<%(hz$8IGH-zT`d}weLUl@TzZfq1oVe5}c!% zlJVb3xwkId)ob(7{P!J?r+hXMzz?xZjg4m0wyTxHU40H4Z9dB%=Gs{ zyVCUz@0joAMf{P`bJ&B&ZGV4wbvBOF-)`M;6lYy_#so%T;YA)~5`i%-+j$FwEC%3#}1bYArnmB)^-(743bY&yA@Yj1?O4EiO!Nb099 z)#*$9&GhElVTW%{$Byq0@C5LA>dTf>`Tn-o^9tTwMank)nf0U&FZpB(IQ7p1;@r{3{?Kugk9%+GWE9o1NHhD03RJV8VV;6k54mQEN=#cn(bLcKSmjke$nP9y4kW`FCE0v5554N)H4iyGu9QaX6XbmBH|YX( zzYFSL1&uT5fkJ;@;Tgwwt*&jj*R}c4uCt%=T5frq+m^G*Dx4HuBJpXUebJjZXz-*ye`*|t z{`r7+Y~*eCbbqk!3P>Br?s%W zXGG?$nZpYWaSgYA_891p|7u}k>!)@E-QjC|-NY_+>Uzvtdq^JscgaloiO`Va^YaW2 z(ytUqHhq=v>3j*_0-k%^Xu4ZzFDQ?AX8Tq0|2dv1{ij66<6@(^=h&@*G^T5Nhcz=mt$j3}g;SMReoTc9ZJueU zIr3yhyfXybA6ieVr-Xk@gf8WOvxUwQHEW4s%PD=QShV2Z6Fe7{-~uI5VS`KgcZpS} z7une~PgQso9}pYh9x9_kv;CoQCV5hLpAhk5;O3wssRE@EbWG$Hw<21Dj4iL|G{}qU z5ALOByZ5khTL&>Rg0jKv+%KE2yXoPWNYMXYr-z zLK6X|;Est3+|4igrTIql)D8DHW1jDbohnm*7obg7`mNxHQ3#HOzV!Hrx79`q+wMwN z`0BT3f)D3)U~QH_XosMcUo>8qJl-21Gk%uiYth#2-L*UTzca^e4dvr3=Rinc8k-BM zZT?z!{A)g)=X~p&rrIGmX7h^ecdg?By|{jt{S|krLl%xQR>cv*MS|IqG()iMBzg`0 zPx}DiaL0aIJ~?I}z}x&zfd48l)J`7|VWFHN)`C3(V`Ns1z zi)%3t2pX>nqA$(kTkdc4GGylLKy}5z%>;oc+@UVNKZHQrytQ$m`C!ARC;lV*RRh=K z+sI?5eU5E?HJ)rctudUmrQj{8tlasbB<6#yuk;W>?oofb3p8{y@NuYZozIDHoB>Yl z>gb-mLa=di;V?zCN{snRMP~C6zc}mhPWUd{m2+yiqb?lSY~iJ+`?`l$xtzWMF5JEm zZfUKTOXE>xQhY3U?nBeTb+`&mTKU7aA(NWit@^y}@#Ht+{8YAfqfN;YZua#zp1edy zZp*9s1^u(?hv=>|NovY33$OX&za~GG_kqgG?Io-bIQNSmKG<=jetLuFg8zWHnvdS$ z`0pJ&<(ywJr_$*b_7<TRzEOp?t3Wx`^p}+`Sj4EJeZ&$QSXfKc&NCO2j**UFhD-%Hmt%*>NKJ z3G0@Rc`g1K-$3|EswHq;NCn2$Px5iF2S4ClsRzmr8glmW!PZah@gttAJ)RQD?-Kq@ zLo83I-Fn;%ntH7Iydk>!`yS8R=az6;dD;0a@@{9{0dv4qVb`y=-df+h;nVw!Z~RG? z<2QGm|6t{W6pZq|M7AEHqkrAwKj+)6BO0mfPE({hZ3C)o;#QuKelO?^UZoM-dAA&x z={8`SU3{>n)7#r>@}$+*eyaRoN-PdIs`of?*I3<>SHaGdVO=$-?Kh3L-zR_d!@lsP zw2Nx?p8H?jjvw=Pxzu2_CdLWlHHaZ7A^G? zlK~*u@>)H(%InYLh_qoX96Dj!MSSx=08aTm7JSjh+>guD?X*?sYCfKYVDapF`6&Pw z;&|oo5`xmQU*v<)!HO-i3vwg`csmvg@}i;4oh`242Xr}^d-8A;djnyn1yc&`xTc%`HKp7Mbm)}0RC);?klIJX<2z+p$?2x}KPK;uL#Q^4sX zjVzxK-Tk^xSA4?hb-Djr=2z{v#b<2LX(uSl_QK6B)y7eqj@Dh_{Fm2MzF?o&c8+=j z70F!}CqDg7R?A(Br5)D7E8WoFj{fjw=44yeddj&B7GC4= z-+!g-uN_{q?`XiC4i>z|XPo{CIGWC_TbQR8=(t0+=*O6B#yxD|eV>KeaI}#g4E)ft<+S)}hqYe%P>#0TEw{V0a)~E(K4Ima7VlsXQ}$`p z_6OPYHNL`rIHkW1=cGV)u`V)cmj$wPBtG<)ept4{IU2*sD=(Kje&)TFTBm-W{M4S} z+o%o3lOcDsY;hdPgu~eo`^#Rg^I@&8={AQq4|0SqkG*s7wd}Zuark!rkZpr`$KFD1 z>-%N${rA9AzN)XEXV`krRMGD7q&^=iKmI#-$tQkaBicM@eO;@1S?}(YPJTaNLK>SNw53<)BO7AU0jeEw6gGW4@yf=N2)M zai|f;Nb`VT;YCkD?s&kv^-Ly0JnljvcxK0|QfXLf1(HnH?CzM=9eUC}>sz~UFk-Rr0Cb+z?T|Au}LJ*=u($zz5$(6dMesPM;PyFk9zTC<$YL|Y4xNlYd zbGDwEcY=P<>oqR2fJi1fDGgVqFnzq})l!$wRBp+uG*vmyo%>$Wkzd4tjx3v?ujQSB zorgsq7kDPT$Hd@g!uc+UJRH@{vk+`OC093jAv{Ni+z*4oaM;1<8Q^U`@+1AehgUuq zk$2FS_hbEW6VW>$@on)$U-KUC3E${z20218hhf!#2D|IErd_or^rg(H61pF8YdaRhC1v?J4=^4UeZLEcKK~-)2E*;ZK0i&y@MJ zl+SR?j>^OMK|W)Y7GCu5J3jprrOy^a`6G@KBtB2!ww*No|DK1x!uKj_(6LY2rYktr zRnevE^*Z=kajR`Pax9E6P+KCknl8K*Au+Ohwl8S^kwiW zpI{Z$kKFstOwC9#z5COL)&g65a{X`CE^ZmUEi)O?VpqzW`3~YKLgBpK^o( zQfy_8cJ^dm0rRCf@vUERf0yeG^flOC27}Y*l2r%&G67Z#c4OO7dbS+u$L~^Mm7_(( z4uH4Z9d2Gp1nPo?SN{e5jq0iMlm{t$kl}-zI_ltUdrJQLb)G8yXGHE9Vc*O3h#7^| zjNCtFv!=~Q{TTGR$KZK={wB9X;VaJrf*l9*w7>FD)Hv|x#38Fmg^G57_0H_8$Hu&#yT}cq5YxPrm zTDgBsXGw_Dd(C@zM|aivw5!z?AX^^wk1c=1o)k{pLUS`) z&|7%POF#7KZ}ZLi^MHe0+Wc7s%g;65U*V0)Pge9l_U}&OU@curPqOLZ<^h(uw3lX` z2Kq_%;fgAUC>_TQ98}r9>hP^xrg`&$$A3-uUJ%pzU$_0?q*MAu;z5xtf=5ta^}jMFO^iyXavz|fbvLbwW+?$n+E|1s~vy>0FCwt(739NSLPyZv+E zDSFvX(CWzFyl2~A>3_q+ohW^jqB>s79V7FS_5TU&653Vt7O}GhDGsqRAC%vqJ z=d|PMe)NgsV9?~ff^8R-JJ<;|4$)I%>EF#+g?cVn?e`l-t`vO1EOJ!&|q zhpoxHt*?7E&;Dti2~X`4c%`-%;@3P)AL3PbKOIc9ZvvMvz^Y)&D?h6+Kd61UXVBe) zgTAlwi?DPlf0PyAl&@=_^4zvTj1bGLq6-1=+YXwRGE1AE zV~v9=KEJ1YqhFDOP`rUWFP|MY440YkY4OA#ev1bx=QAShgBn=d4%i_lcOOo?442T^ z@<|UM9K{y#^1{l}C5{xq`-xv=u=!}d6x|B`ePVP&d-l-KTt~nZzvj}*Fv2aK{0e^2 z<2~ZL*<8E;Ci^!ys0$MvkE&qHr*&T#cV+v@ocC4=Sy#$dn~(U>E2O0IJt1DyG_b`=?Z%o%{%1Qy2zoppM zJ&0UwJ8Lgne$o5Fe3`Ds2lB`WKA%^@GS4i%seNxk04o1SMDO24S2_*t)=-<%SV&b& z5E#oxesZ-;PxwwhRB$RYL%MN?RkG~Yvo#ZEKqLd}Kx)e^x?QBhl+HFW8ezRMb1RQ( z^d~(;=%>emcUPWbvVuWsCrBy;A8dK8e*XCjdZt5FCCTZy>mW)^|$7C!755F#!;cXTBekd#Q0E%b1?0g z&KjaVc9*T)p`WX}Rk?bj=Tfxg_-Qno*UNV6vw0$D%E|)R`m4YH`!i)ZRw;+mUk00q z2Gfvzg_77OAM?#p zk#V+l|2!TQ+|)yR$k!EeHwuWa__WlXk|SRC_z%i@T=ZA|ab~&}LU<^_(iT%}P6 zmfj@4e~khv{ZEO`za}{KmwRtSbaER>gsWi7Cwc7u;hFHC5V=c6ixLS~yND4jV-~F8 zY1{`ptN7G9!jv<>sdY2wN=teHUAJ&6D>Hd{mx0&3TR)2f*@zVBtl#HvP+smlv@E;8@X_ z22JKjsMqptb9Z#=o6RY-6IU~%;`HA*$h9k?J7CqZI7pZYhL7hFPa4nUFCpm`Jcv% z@Rd)CkLDums=(_w%vzO>>kFr|8b1;pe5X~W_ZsQB@evkRp>1Vr)$Uggwq^Pv=Q7xO zsQu=7ta24Zf4vB_VBMnz7}H~X82VGOHWE4aF@R1jcX)(#O+l& z)~Ikts_K-`vCez<%rvA@r(^M_k143ic|z1V4H>p*w(L~LfLQvI+z{G(9;Ar9o-B-z zmgD;z>88VYP^2N$b50=JWL%~eUv%+zd_6znt($kF&9&Lw^nzPeZQMyNrMRxV`+Vb) zqJ68Z%;H6t86TDoZ9hEX-Nr$yn4i}ws5GucH z?EuB>{!+b~5+m4pYkg5`w?huAbxtWIyVWgnZZ9C$Fkpsr_^0ckvM4?$Ue9yR>%uc1 zk98fAq8pOvXoqL%O7daQ?`-onD%el`rdLQf=z4qEdOvmW__=fA)d5>T*>zTP1U&2U z#aBMOI-9qy@SI!0j>DqT)Ep&m8a=7#F5w$Cw~?>2$gwTJjrMJExh-M=&z2+ zMAll#$+{7-THl|>3;nJ9){}u0cRI{I>vUKXaKtp%i1DUA9yPDb^Gx}j5K-lFy)N6i zIbAqd=xjcst1z#MzA2yc7e1|{=T&VSsh`jBO!@2+mlmLGjvK+Jz2eN4jwCmP`lwt^ z&&<8VezPf0>gj7;@GqY!(|^Fb=wQl58MwdkOSbipT+sk7v~v`39Q__=erxx{I)ei< zY>><>o}H(jfxm&vBsLMePe(ClLbl~p?yrxm(Sse2YPYD|^gy4YYOl>s^?FI(4svZF zJnehO1AxOUD;eLmoBDHYzO0w#Gw!EhNwvxzpdK5+c|frBvHpusc}My^s=$Uj+HR!F zqh<|g=L5;V!GBb8G~=8l1U3}Bq@=_1lXhF&M!GHPS>4-kIvtAZ`a$ywet(x57caB& zhd=q{_ZM6MU%t#%3`mVW)jvUhJO(e29ytv- zB!0GegLk%le@cB-ALVyKTv}xR{>}vE_EtXFE6Zr}k)QUKhj-RZ*Kt&v?RjVGVfn7=!HopffipIJ@uiseO5c@BI#{$}upw4UefB22oNkr= z5riYWZQ`|Ch=^-9-r~BvDMTeyZ4|@p{3wUZzxj+IG}r;Dvl83liU0l{kCmV3!`(Z>Jyh<7i0z~|>XkV9JYBEgN2Ynf z?W>fKYU~{rUUKmYaLVU|$a-TzLM_YKigawbET4WK_=w3qJ=AbwmUcSS-F^_{Y(A2M z|2HyGJ}Q^&lBqL8$SIH1kb!J zT_#4Ga@5}~CfKvyO*DY&(cSA7?@lZKKHCmfUVaSxWjU$K%@u-X0b4WF z+HzX{^%;0>8z&pq(Lc|;Iylcd9bs$mB|TkEm4m95blXnHQ|@q@wMxBRw2t^y53l^N zD>0~~%n+5Bm)b+Yg( z-!FLh2YkzagM&ht4Ef=Mh1Yze-%4No#J+2(HahWbJigNs!NQC01byo^uknM4j*H5L z!XeoH5uLulQ>Fh2QSM76BSPDA6=d&XieKM z;+;_c+WJfPp(dEj+$HYY9MUsey4mq3J8h^(nIC3F#0IuMC0t#l#TfTKznGuKZIFW< zUJKPCg5?dIJve(uGDtt1cxKlp*6;5ejSrt()>%`WjKM+XL%+prcByrlbNOleTXOOD zc}x<{{u=3^bwuf)vl52wZ}E%Zcl0^$(1+@VIA<&?l>jfE5NX><{qixHDZfX=Nc+K$ zMvVM^QkSyobRvEj=Idkdnp&rGBy~H{LDoDV*m`N)hyE9QggdYnFlawf&pYrMyy)MDKWy23G4GV=a4zl&*P~?2S;$ccEPrT_`vV* zRQOMbYWMsoYahoD3M(hGjH#7V^ZvZY6HCIrqxBscl~#4fqiqNAryqED*AC4096mqb zOtmolYvV?A^<^@s%EkF&Q)yBLu`XuK+A}e=Rxai9ZIFcbfVj@V3mgi79*ZA7SUOTY zRv~D?e?o+QFK|XB?(uyrf81H@)I#iRow--mwrolZp0UnPH)L;ZeDcVEU#?hmVFYps1$&sRMBdwfS6y3S6%woYF~AAlCDa^J3% zS9~>$GmY2g^5qM-M3=~eIp+!1JZkND(LDACzzO~lG5A+$Kld)GK^|4X)=T=3AM!?c z8mG|7C3i$mX;@2BV?O)+PKM)pLi0cJgK*X_5=MK&y=6KW$E(8 z1-!-Cbvrw1(AG=j=J zPWik~j8Zme!Q`b;+sAmr5}-OF)8->SH}CP3Po&e*I9b~Izgqg#xD0+a{E1wh^dI_F zjD^!Y5YiRg;sVF5yi+b)Z2scQ=Y0O6DF@dr9Xx=WKa2WTf7n6qziF-}@eNDK!TG;URoH~6J| zUf~11BdXl;hP06k9$vA7@sb4`2RW9g?$hRL diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns-array.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns-array.json deleted file mode 100644 index e6d73ac80f5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns-array.json +++ /dev/null @@ -1,5 +0,0 @@ -[ - ["Heidenrod", "Arthur_Henrique", "Alan_Ebnother"], - ["2017-01-01", "2016-11-01", "2015-11-01"], - [10, 12, 66] -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns.json deleted file mode 100644 index 6304484f563..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/columns.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "path": ["2007_Copa_America", "Car_dealerships_in_the_USA", "Dihydromyricetin_reductase"], - "month": ["2016-07-01", "2015-07-01", "2015-07-01"], - "hits": [178, 11, 1] -} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/custom.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/custom.json deleted file mode 100644 index a02ac155e46..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/custom.json +++ /dev/null @@ -1,5 +0,0 @@ -[ - {"name": "Joe", "age": 99, "type": "person"}, - {"url": "/my.post.MD", "hits": 1263, "type": "post"}, - {"message": "Warning on disk usage", "type": "log"} -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.arrow b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.arrow deleted file mode 100644 index 615593bb3ea62a8a714172b6f3aa80ac008d91cb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60178 zcmeI537jNlefOJn1r!uC-bYvt@!H1exknMG?m2dLXLg6_*9UA3WWreLO0Cj|aYy%Qd`Ctmj{3n;ksfuw~oF@mkyH zKRwo*j+^87C-UleUTc4+<{oB_dxANWQ`7UzZP#aa{eIi&Go56_!`*oYPal@P zJ~{V@^a@Wld(7Q$yE*4e%~@)r2lD!4zMr(gD|tu9x=k0I{&zz5aKtcrAPYJ_Fx`AH##W zm?yv}%)s;D2>cFQ0Uw6X!au+_;C48fPJILn!8{y-8bojzydFLPe+9R|ZSbi3=W0r*S!Tljmp1%3vnFhPYse4`!+Ed|D)4f66MPzOg*)K{l-lXA6K0_S>+pKG5t@!fW7acprQeZh$Yt*Wh+I z;nBz^oCX6h0|&r^4!juN0KX3(hnwO1a3?(YF^n%5gkA7FXu%8M4R9sAA8v$i!wHYg z<<5p7I2RV60c-Fgcnw?*zXyK^AA!GtAHi`LuZO@{a4sBz0DACpcr&~kJ_%ojZ@}$v z|Hm`FArDhm0lXhR4PS(RfjeM(K9_qqoCo`$4n24&yb<0F*TKi& z2KX}kA2{(Gjt6;o2F!p9>u?3U2mTcP0&aqD!jECw)A$bTfigT7Vz>le4sU>~;Un-V zxCw59pTYeH=xcZ?6yQAA3(tk;!xiv8_!Qg-x4}>0aRq!o7=a=@3#zaNFNW8{Rq#jf zdH4zJ7{s2y#ZZSXTn4X#tKk~B7Cr~Jz-{n7_!(>;LVv)o!&Bj0n1KV(fXm=D@EQ0D zd=qYmZNvC>umeV63J$^wJRg1=-U#o4>)_Aei|}o@15O>mpM+6ZfFArdTn_Jn55uS7 zX81b%1h$Xna*u`)crN@FTn_JqKY=g6t?;k#GdOuHmwP0f10|@zW$-TeID8qt4JVD$ zKj6S&xD3Nff~FFu7r2O4R8zmE8GtEdpgI1 zN5K=J07bYM+VFgMJzN8S3Ae!a;UUjJ=fD&+;Zk@f+z3B{2c3)EhY={j92|gUxCCAb zZ-Hy!Z{e%(?{LyH**BaC!%&0;xCDL|-Urvgm*Ah_=dk@e`X0`Nr@^@}0|%iAJ@|ci zAAAwM1wVxcoX_}$X}B0(2yccD!6)Ib;Y;u>_yKIY0Q&)Fz&T*SUN``C=)k3LCA=3t z4PS%r!!`@Q0?vk~!2*PE3A_Ye4_Co8@L~9C*j8j5z;1XBTnev(E8!aW2>ccNBYYQ* zE75-ND0nhF1NK4(UIed#{{ioUkHDwlX80<67all?KLLBdgEjaKco|#;e+YjJAA`@p z4e(9)9^3)jr?^IVGR(jdgzy4*9b6BegD=2$;74#L96!xCf&%P?r_7?G z;c0L_>;oTu6J7zYg?GcX@OiieZih2>a~-fBmSG)U30J}U;Pdb!IB5^p0cXJ+EJF`| z3tkOxgO9>3@I$y0PMJdUj}QEbPq6Nkazu-yo|ybPCZ-)%WTJUBAS_IsSj_X4Y2nw_<+#U-o4XD!FJylB~T z{hC#aJjV&_xiE}Z+}QHns@-Y0I*ildRormBmR;;d_1N*9YQyrwhGSR4HQ%m8p;N0m zafg$zT5ja!OHSa}`~6Pj_+GuyiLIc4QdN`}a-cDj+PYbm;Z-Rrgro({vBUGh3T zdt_jMcSh`a_lQ@eVP+yHsM!mVcVuW(=P-4o?bf`G7Y0tmZoSAZciZhS>S(8tqYq-f z6n5KV!)(ruJL5-22U;z=Y%M-3KW@)C?kZ_5`ednOSHqfX&kYP)C9A@5!oaQBal>n` zMQ*hj+fKD(Rl0%W*^_S5vD^;rTcxF9+l#`0XEl4-4>@R1w<}JER_Iy&a>P#NouKRb zJVbnG`7KVj>eXzH<~w%TbEC-FXZu>NeHn8JxxkHP(>~Pif!*+`e=sBlGUna*?uOOsLp?lXJEj7G&&8s%;U12xo zQ?KQ8;_gn`lAgD^u@N7Mq*tLqePko*^sMd?&-c{iu~!ech@jqe>aNvuI-&17jlARM z!}XR^q!oPE-nVSehjBApbqZtS6Biq$dRkO<-+CMH#44d(_m%eLAd}bq;-6B?_ zL-tOy5c#c5#ciZBQescF{n9zMvTOc zGjxhxfaVFCJMDd(3#VQ5q80;yp3pF5hrZKobO#2eM;pFdWJ{w)!kX{)>|HKrbAX<6 zi;>enMC{qwB3DUov-@4rsGUwRq7QoLJif8ho}IK?WvfKzd12e-5a9~isf1SXXgCJv zn$zP|+xB?YC^>B|#kY5P^o2Pcdm0h&Rvo|OL<|?l=jccaLpT*mc8&;0(ko%DheV+P zQHOeh{GzU&^>D>Pw*;OwMdx-ostS*>~~KE_ty z>a*B)J4P9|IK5J%%aQD$i?BL=oW4ZmmOE;EE`3jc(xwqgXd8QfL|b!OK7x$&Hl%F* zTFLKLq*aQHAh)EUSP3&NSaCaRGFI-9d=YEH#gsxu3k_g;wG(x#>eW(6yXrfu$ZSAc zb-EqbUJfH$1Hr4>k&B7rR3{z3s&Pf5wA?s!CY`8hI~^mROJQ5xEKx;#yeOV@jG#Jx zFYZjbb1vOGi#bB^*Jy2X0d&(O(inMFw&2WLoVhW#Ub~IxY2Zz|h;3D;ywl$6tkY`V zq#II}2<@3JO=kY*XY%v zu+`(}E-IRftkE2d)k!aCIyOQapnE4niFRl&d682KX=PM~HyPQx-LPfvap`!+Yfd?J zRNhpNp-YR0Q8S*Z$=qRQaS+g>K{fOP-s3Q|(WJ}HrhL~4Xkxmq5;nTQl<%!LGFM0F zo3P2t7(2fk4W8^Po(ihHDw>|-JK771pb>@b&=2dqDI}xat2_1zu7!*R*IF1t>7lw& z+A;x7z)Wu&(W!YjYM!;NV?yAT> zA6a|Lcymz)_ajdi_+6tNG2opj^kd6g@2b~bV>XuJW+PmU+)B8L6$qRPvQnfE82XN1 zcPkO*ABB$S%kl;_&kAZi^okqf?a^e$3!3yo4s9GVYzE)P`|(!8%zhw6vGFuqD{Nr2 zHtjPNIQ#<**_s(&7OI}1I2E{2y@&0OyI9!((-Sn@7GunnEk^chu@yJ;Nc+ZinL+Hf z^bR^aQB+pk^w#5M4>LYUf6|zEwI!Sh`I}29iMxu^C|0}cd!2Q93dOvx-W@hNA@(}9 zwX|T*dmM06U?#&ZZ6$#jvp4K#Do%3ijHp_19HIkE`=^fhKw&_-eKw99q{LqK{Fd=3 z%ZNr}b5DoN%Z(i!C44bTz7`MT>&S%KsCKu>F%~#%cl~4oweBot3`4H-H=zIyX6iG_w1>z_8&}7| z4Pl%fSRC4Dh$-5?71}Z8ZXJn{NeyUM<5Z+VQ$&X(YIzB@Jm@@9tIA~!_nu&6*i=_HEi-{|;)1Gldf(b{?shovYTgupWaI5fq zW5&F7*j{qlZMU&AX)`Ava2T;m*0dZudIoQg@W@)!%hcuL-BBrteCiWQMm@C1J@%G0 zIxsMjctDnw^>1u1p%jmH5UmoURyNY5ZD;+ExXwyFP2^?oq0*)^SV0F{Zrwxl-D)=? zFi|WPF@qJZhokcO3j)Sa;@@MB6Pi57?w{wEic=R^0P!2QPpkpiM*3~VRX2lA!M#I zjRhHoBYFNwLcVem^GcU95Ipv&LC*n+K|7h9FzQ(Y7FUZ4?SOy4fQ%vuZ@^mjAl2~0m^O1yzIsvZVn0^y_}3sb?KTt zUqw&(ZeU5Za@km9985-$g$bpx8UdYbwBl|JdEBAMYr2Jrv4U~p(FtoluO@IP31ef~ z{HTRWz@0{mn|8&34l+O%mr?_p#zEPK6XWcuhah1_n&}`KvG-3+TlQ2Jp8(H-Pzx!^ z4;GF#s;D*JTjuF7RE*fCFF6{Q_6xipTJ8&Hx1g0-Fl^$NowNbCvXZg`xR_SdMD4K`9I-H}D8{~<=P-WMIG}+B z@e=5UfQUezBQk(AaRCPy6f+oV+$_{htwY@8Tk~awXY%gZ*qA6MK*~+Lvp`F@Ji3QB(U) z8=Igu+%(xb=Qa)P+FRTfd^lpc9YN}YKWtSfS|#Y^wBt@xfm_^ zJrNGW{6e15Tv~TlNcgSLCR>tS>?t3#Mc)ThJ~ zxgC1RTJN|=Y|{aWR!HApdRAc!1;Z9j)S;UvNOx{%j>5!<`G^dJFH1C}?K1O2e1HkX zwUJ*okhZ1q#W03RY$hLk2=`pBt)Gs?t^Tq)xM0uCkhUU$)7$9u5qq&%%EsnVUyz|C zoj`7EWN^sF%vf{NxQR%SoM;^-u>=aR7*K9%SkY#*ef~lRD{i$aj7CMnjC4!|p0!JS zf@|awV24G<&Rj1|M{+c|z}9l?lj*8?E6pPALodtZZ92-C>$TTiTUPE(eL^Y#EnW<< z8AM2X=t{JDuZa7IRzQkkmq%Wq!{zJP2OTU8t{s{EY7@DW56~oMK>`VXBW5TQmpCM# zmPuOFF-0B;ufTP>ICzoQ4r7PC`sBZ`wMauJtT>Ip9wHyMLfBVZck^UL!YW2AE;%@W z#E`TkVSQL3Bj%HHAPI)0Bq~uzx3K2>4FaK()9Y5dq<_pchW6MHJ)#U(hrAY}d@Uer zNM4JC4JnB(9uDrD=kZQpU+6g@4q7S1+?KJ&O)n@#U9=pREi-~@sZTm}zCaec)kY`E zgu8Vzs*?o1giMTEY)Vv{B&=17&?7i#c;A6L*&ujalQ1+UQC*C2Y9^>Ng3C$e$I;nQ zH%8NtRO&Xp%_OF>6G)}$Bs~4i>VvsLcZzS|ND@YKF*pD?OKu=XInB=OkpW@ns&4n{tXjp8Y^9=kv2_%ucoPWV({ikqMv&CsC4*XVRJ*gc2$Q9?B*ik-S%bP!}oZZa74Ja-&E>k(k6*58_hT zZB7*JhLAG;&R=vMJcG-A>FvOa<8KgK5EH=cbCx_UBg^vQtqSQzM-dO|8pVzn z#bovUxMx++vJ{MmF5^_+&PT>6HoI>{AM zL{ItxjjXzo>SI({J=`CnYgAfIo&YWeJy2i(O>tW4tbGwixP}o`Afr>5_MKHOK_PC> zC$2^xqo3*)p^%Df`#VmfFf@Q=n=gz^7(19*+rq?9!Jh8+y7;t+Lxb>0ks9@<10G2Y zF3@;=m^FIoZf+?_&^gakyV968i4>Ax4O@zDOD>hLRz@T#c@`a|h!iXzHHge`DQRG& z4Q%9RBbHNMgs_|z!-`Rb;NsX|#NMNpCBbbQp^ zh*zXoNFK+Q5{rq4uKd1yegpG~)>2zz1QNBAALQ$sS<~lteSRg3Z8n zUV$LSeW`)f($y16R8Rp{1BV(1qwDvM*m1W)@|EW3aIJ)(RLfn0S!IT)5G!$`aDUkN zR7tVinx)2tvnOFhwl~7E@<73Q)?fi0hPeCk571;7M@~^kt&xbXS=FAOFOirEk0`fD z+snF=>?1(p!Y#7%lp81(CLzasH)3W2=Z*eb^i~y?w-_H}ZPCdn1x6|6w8Qt62?~@! zqtH$SMxasHIi4?}SCnNO%quj^^VN`;l9uAog>fR}tck1gW)Pb!2{Bg4kI|D1F6j;; z;_QQo!TbaRXnqz~saJE!2GPZQbEC3gq@AiSwK(iygaM`hRPI&uyR}eI7*QU79~KD( zRP6eGEyS>d6rxG)Y?gCSlF}0xh@K^d$#t|%Ac**A2&Cha zeh5)Jl=lgRO#vlI$J!PS8S$SH9uQIN-NNsYeVtMvjtmaD=^wGwGDW+BPu zZjLo8!go~>(xRTk$!4{I5dWO+DjqxF1mg=OWT-gUPBo)w~D!D(4k=Dn3%d`G62UWwt5^!9;_;v zWXGi|)wUu&vnegBR(hQWVk=|VnC3Y+@b7iWG z6^+*2j3l>588!%U&CysWd4eqE_HYvp;Xa|y;-bp^^6gBu zYx)veWzkt7FhRwlW_^@gK$d2+de`!5yl-X}%;1G-dX{uB#WUr9@tN^EG#kLf7H|<; z9eR`rR#8<5wjd!xIF2=>s8905E(cC~n%s8DREf#`^_Yu7=Dagtr- zF{y`iK(O1#rTwhx{Ev|Yv=+>b+c0n8c}vrw4+jCf%Mug`ej!a&ywH^ zYf3(w5+W7GWFnD0I9-;{4*v;naGKrO;RVDNnW-whW4PMKE*9}bM?l_ z%Tc@F#Oku0_!3CLrdb=L99EQ|p)rT;>0&wiXlRhA0nu;J)NVYiU`~2>BhDM=dT`k= zVT1|N9E5a?G0fhyvu^5#gUmjR)=6G^u2m501oC<`K z3|$p9k=431`0@5rgSAmTB=5y!1vh^X4@K@f!p2BYyh7_E!^=1*N+Y9XG;gt`B$x_V z1o#!&7JJgmH<%lQLg-MTrv}MP8H+j)!pJ$G=HomW!aXi^`!pDp3L(YOsgT(u%S4FA zu@uo29W*{}fvJw9Rz69163nBAR+R(B(OF|I5FuQ?iT08Qr=F9ZBBsLICk#N^h`SI1 zQ*t80o=CE%3xs~%x|!cdXk2Bi;r}Xu?fc9TrPDS&_pPC_gpCv}7v@!C1th92q59@Z z`|PFss7Y2f37|BQ!S$LAge&lverS`_JwSN`50G^G=A0Qu6HQ|(9C%qf)!-KTCg7O( zuTD+#8#Lzu-(ZEua=7d}7WspQsw$T6Hks~FBczDjPS0Vg9&cfdWG80VN#x39Oy(yj zHBPEB%j}S{q<1WBS;2mgOjSRi|GQirS=NpbBGQ)>%$dK4Ey7Dc?K2!Gz?dAO`fj(< zDqIeyy`e04xtV6);zMN9)^O{R+Ih^hRMM7R%mS4i$~~G6gb5^*N1N=GtLi6tyI5&i za-DU?oTkw=n4p#x-eMIbu}-KBGHFP7VhWgBLfwUw6PY1KgM%KTS?gSmrsvh_Yq-$N zC8JkJ*)JlrOh3aJ-d7APnjnhTj`{W%xhEdx^9n6S1%)3{IxY(KeH=d@`D5-6g@sWGW?nWKryxS-oU9<4H{uYCJ7p2QJmOjR2)s>_H! zt~xPRG7}~y=TMjAQg=dIEW64su4^_T!CDlk1Vt+|@|@00;xirU~&9(}*^s2A>2EJ#vk>4#{>|Hq9hUx%xPsr6+9+_$~=^MDRI*7@}?qh+LsA_NHGf9D09+NVX#{{z(wK)0{%&-;hUn% z?vP}48TI4FyO}Jn%9cv-%5ErpYd26m4dyv=PV-$a+D(CtL=@v0JFH<=>5+eS@+D)`0pXcV&_<6Ok~69&N;Y{(rg;fg~9;FVlS!P4te(#&tzw3XDP{8 zN+DxjCdTsAlS^G9k<9}n$s?sf6#6m)Tje-r^)Ps|sT0OdkSj;aqEr0*E=@os>_clJM`N2g2GynsGs9K8 zFg#9d&0q^Wq6ir;WQ&BP+y~8&Q~#!30=dX_9PDCISaTqIG?CzJUilpDwTJ~q=z!gzJyaiEZ8D#fzON$AECyL^q4i!YBxf3f5Rc0YkCpqbDnew69J54hl5a&?jVJY>@}$H zBe&H3&HVp#CKo+1lqWS>R(0Dc6U$mBqdFyl_@$G2J_UBnYx~3leTt; zB#6q1a<=L+ay08EZEp$;!_*e5Gzq0yukIwF`tabmrc>bejgrQqqK41gu;wRG9VXE= zGeq7F**5O9ePMB6q*x?>S8df&T!$;`-inKn4)DVW#L>p&Zn})^n&VY8Owfl)wk0i% z6QI$L%n%V4b`Iq$`N@3UC*leU1#%wNB+3ID&A^K>zLfV;PT$H;q{Y9d!@B0d%Shw^y5X|2OkVtR=DE@i0AT+Eh? z9mdNv%!{N~hY^iP-XenJbIEJ&#y$Z;7RX2BF%H)zF3F$AIq}S5ye4c{H$~$?9m==R$!NxL5@W3R+iP0teQqa9U}__ z-3lbq=$rn;Xj^_9#sqM>iGl2H8hQ~>$>G8eN3uy?BNE*7MSf6;Kw49#lSy$wtwA3y zFoKw5dwTZk<_4g$bq&l<*-K3CZ-pyT#z>(1VVt9~%=v-Wfxb zNdl_DDB_sg7|<1&@>OuWsj%5-5Me!HrVy!Sz47hDQAlSrXku|v4>f_jVlhpslncZg z6DqI-UfZpkqAi-l6oEG+5&@f*@!psWW*9|O7uQCH1dMd?8GWDA!^g3v zMJ$uFA$rF3Auwn&)q!Y&bOB#)T6OY9cp_9omEKH!Hk3QJ*Qs!=$zv3nbVNQO^g_FfvN^^ zE{Z8AaPN++RcG1Z)}5kiA9P%rBPPI>DlwCIlDiD6F`IB^b?;R&6&xeZ9CMsMhZ1khmD|e>_jCcS+~!j@ z;7qsP`Ncckmbs^;ERDmRR%j)TN>HwR9W{A#1$Z`8CFTEx1kET1StIVIQ+bga!kDi# zfx?%=b%<9)u7Qd=2*be9Gci@8HhHCeCQGVlM7IMncn@%652Xwn?U5u$kE#P8H8F9r zW+{!B85m7_awRmDmO6o%IG9)-&7iQ-8CVt-e^V!tc8@2NZA+?DnHt$JjIswS_zC67 z{GbxUZCvE8pD207=5#TR8M#<{p;R_58ePdNX6dM?DhCo*K>x~%l*>(uo>s&JR=DYt z89CaXR2``UpH7iVk(W$rnShVRr9~5KsdqD@uJ}yJ0DT#Ma?y&JcNrlz#P3u@d(=1M zMjdxhlhMpAI%!i7H?jY?!NemftMe51dBt9tO1^ok1wE*$f2>);>8y1s#PTl8J)P>{ z{d@2$jIW_7ydC;;cu3K;>b1%uM9NNM{fyeeJ|u|%(k^(g6kyDLb~|kb1tqtwlhl|S zowysCNv(@t2r=1(Qb^gfVD#=DhIDq1OBS)qA2x}FL42d~;b3@#Ip@=UPhRzsG>o2< zb>9cp-Qu-lcUDcTWoEe7NXV$5K|NXbQ5IdMA)(H%&6ltV7cQKNXcXYh&HbN-U%o}>Cj?bu|lUW_)wKY<32@x-1Pg;py z)q5Dd0djamdZBVkOkAWi&EP}pZBAxeQi6kUnQ0lEMB7Dy;Vv*vNN*`+-d#ubuq_PT z#DP~4nbIO}m>H>>=d|n`!8vGh^1Nx%n?sxFO>Vd%j%18elu@A#=jI-QpJ97`u8gaY zpE46T$#E>pZye2I{*t^bchYMzhiz(NR6#O7ke*8CJtQ~lWHv!D(?XY-mYN}Omq|{V z%S0+mxz(Pv)WYly4UR9!(~G)!IT|!?#brMEPMc&Bc^cXq zHkrRf(irh#UZ-YT;8AVPU#6Y-THC>XR%s+0NvcwtwIG65R1}3VvQr^1bbGZ1mZo$q z@XrdQkt~6b zra^S?DXsRXQB4ibqsBGWdKpjues`YqwO$^T>kBAn*-$25UN^h9)A_8sm8 z+w@g+r?KhPdGw>E8KUb=X~!gZre}-UJsbGR+{vpnq~@(DLdYib7YBwcYvYw77;9h{ zhpR1`AS!a7k*H_4sND1~`pw4$;E0W)anWk*xR+Dt$) z3P-mBA<~MlYG&pZlFd9WV!C;ig{1CezFHg|GiN&1HR)uN*;a6q>9c*x@8Eo9Z$)k% z0hMz@;HCZK)0EsIMu&O<*(yRz^}T)@gYpAjXL?kM7EG8^v6{)g`qd0Z0t!$8kj!2( z6$0linFrzF-s8_`yr+@QUPoaxNr@11rkqn@G?|!WymZ`%f&4&0&PUzcFO^mJvO-*G zdVktQSr|gs(zC;wMCB}-IvhST(OBYv;GODL7!@oM$Ai2JiXgdjM4?1TaHaWc@!;qP zmBrm?Pr3Tcwk%uaF=F0f&L=-Hrii!fI>(g{K!d`oLbl3{Wdet=pvExxq;Hnd8VNMb0HYBDUQWa$ykLGkC>K!xL0o`ED5G3+D z9zF$-wAQI$8R4V~F~fCKhMMFScA0^_VRuyrSi!XtB3 zlk#V34a&Wy+$*|+%cXuVD{Ah+mMTks>Ed96$PNculhr8dg>76NbJx-4WJaVwlbM;> zNf1pGV*Ec9DwBj4cL9(GGlB9PQjYB=d45!r8JBxbJ|WSXCMRg#^K3OEbb!%YF9EEsq)di9MgyrcO@msPTX&71*20w zt!{Y^oMob_BT^VLeRLYb1xMSrn3RkAtr4}#6*^}ARK6?!OZ!KSFzZXIPaRE&PZi8q zCdNy36IXhIT}2uH~)q4V(o zGEaDTXqXJ>@c7`MX55nko|@UWSEV+7LZ1cAjy<#hXbQ z2!o$o01ZHTf{So<_bhrvZnUR})2hpt=N;-3^geOsDxnOavgUCow!kjy&ZVSMrAjt| z!U3m75rQryMCM=Ip94nwwTUG2cxH5H;w+dO{89yBW~t110c~Jo{#+zLWr;O;?GdwQ zsSWE5%J!Ov!tK4BArjQpL_XC?6jRJ=R>)3@R%W;{>qNg9L_ANuwA%DCbr&XP@>35! z^ME8k64AEPt?G7hrboz=Y*NhNc4L~sWhy#(xh>`OtwRs@hq;CWYGd!p~R zxy1nWeyGdrJLXWEuuFO4mOaQAk3kZ@t@uuj2o{xG1GIK*@*qJ zO}5g{Tg;>k;8?yeE=$6vOb|qPQa@HPcXm$=rj+X2$iV3M7=x9N-BktBVs^*)00xb_ zE;x`Tja`JV!n~Hf+;6Ie)GdfiR@aR-%!ET;vU~P00D*CJlTe;vPW?(Y$fD!Oe}rVG zJf==8Ffs_C8U*1Dro<>ufex}ssJVImSAx~@jxc*tm!asYcBYI{eA9&`~LT>3;t{UooM}{ ze^0cI?QhCDwm*Kb;a>k zqt_4OiH@V^dhem><70W`r&Dv^hc>(pPJ@%-c$nu4kLU3*u#NreSp1B(`TS}g^_li} zIp};}4hwKn`g$La*)jFG&S@v;_#fkoFXS;hA078>SOjgW?>&nT_4%)X_PGPL!wH~m z^jzQJ=akKMI&W?N3AmFFpTeWI({~>TS-a```mWmV{-E#ZxgNLT^7?0|nxC)V*yeFt zws|toccf(d)UhmR!6FBz%S`)~c$^0vSM90e@pH}Q+)m*!yPgYp{sU z^Uj8n3-yos<4@SP&iUi~b2E>6z5^tyzYhEac&_pX$$9D;w1$kuMjAh7!`nF-jVT>p z=cRqW683`RK<6!4;isi@ujg2gW+NSgr>1}2oIY0@X}cZiYkgPSh#eqVco!RG@9EqT z7t?3id+KkUgI?sUHPo4WNp6K(F>1gd+@2l^gls?xv zsE>Dm`dpuj6ZuDdnzf0H%@Iv?#r z{jEohD}FlM$d}IjQog*zqs~u#{W?&8>ex?%A<#Z_jL(9`%3*jC4K>A^Z)K-yo7<+7 zZM5B0pkuw5FRCqX0==(3Q=6Ru$MeR|dDJ=X0G*ePKL|Q@89oAQ>_g{K1GVK@@EKSE zo&N=(?Y{kNJ8r|E0HX5K%{n-M$JHvhnz>Tl_<>^s?ZALTvui~35R=~((q`^>i0pa)E)P8lmcY^k( ze$@W-sB6{O&^Eu5J}&d9ed-u`)MrnGA}4+kk4JcOj>j{2v&y6PrTsnwbR1o)WJBjE zxzjP83))WYqNjUqbI|8;yNE zYG0YX*Kx9I(ze<8eu*bKU!8lVA3~n1?eyMnfIipu`rZzZTb7rJ=de&KNAKZ zJ2%Osu36Xe{^OIr*ZEuq-+@a(-xqHMU4OPu9arDgc+IxaXKDwvZ-o=h#)_4{bkptZL`n*kQds&-qWMr9|iS)8QuhkK-YUc==~SMnNWcL4G-Xp=klmy zXXh(@VuALF=xiKMuQO^q*Lh9A4$v`l?j=y$e})ZpK6?Lqpzo=VF9WqjhW60|eJ&lO z_hXoWL%gZa)yLBDdwH$TwXGgCb~1aW*I64$pXqyQmrK}K=aGF!`b66bjR~ErKA#3{ zFP)+9p2!RJiQ23MOQ5#66ZG9dI0y9I!=Ud-ev0sP(7u0+5d0GdR$murT%E%Z=s5ab z9v=DVq>pu+C&C#}2g#qtQDNfx#5o6n-{*wGVs zuJbLy`Ji*vwrZ~zgW6E9wa-7IL25ken9>V3^W?XAd<)1{?SOB=6i7C;-RoehJhyp0 z0y^he(6y+2&xU!>`?_8|3at~U<5&4dkJ_i?<_Dnjl}u~wWbf-U^}Xcc65uj(E@+*+ zm-8q&d&7w}xysD0`@g!XkhXg_L0 zJ?j0Zq|bF7YEPl}7xbJb+KyeCHdVXoGrcFS=b!93hc@S9^Qbo7MuX_O^!dYhuFut{ zXM*;l^U-J8_fF7tmO=0PpzmgKskYVk9}hQiqLN9yCmzmDG~RXo8UybKwX@#SxoR6d z>T^A6KMw`iPzGyo^V+m-;~8(KgR!pBM4sb{_Zf+JTqAZ$S~B4Qi*e z)5re$qlxT1jrOo$9;CmQp$lr`%i+E7N%#VM4SoXs^+y@o2xx!(QPu`RQS{dzWenjo z^w%H7uX#j&{n7sVqy6pnfBn(^`lJ2zNBirK_SYZnuRogp z)P&|9mzbsBUw@R_Hu&8trla=PAMLL{%G7o4JL#`K%Iyf;713XRw7>pnfBjK&E3UaC zvA_OkfBn(^`lJ2zNBLET{`#Z+^+)^bk8(RW_sFL=dUHc*fBn(^`lJ2zNBirK_SYZf z2Mqe_kM`Fe)zXyx^+);r=l=Sm{q;xt>yPp)w#n}Z_17QmuRqG~#`CL3clmi9e$lJH z{;2s$ZdO9=uRmG}`|FSLn^pbwNBQx!{`#Z(oznjLqjs$IX!)gCr@#JafBn(^`lJ2z zNBirKvhHPn{ZW3Aw!i*pfBn(^`lJ2zNBirK@@wDy^+)+l{r>u+tk-?OV=<*VKQ+y2 zA^c8ufBjK@hrYl5Xn*}tesq3gT_6_2=&wK8Uw^c}{%C*wQC2m?8{%C*wQLR(fUw^c}{-{y(f<0Q{q;xhZC%o1TYdD2$GH0F)V;1f zs?|x4b@kB~-RtV3Z#$;dN3Xw+tB>CD|6%pfpIgVa`sics)9RxW$FTb7!T(_O(bfNO z_0c!}60471`zv34^u}L!_0jJa|GTX|dde}aK6=i5Tzz!+F|9u8{gSJXzU)`F`sh3E z!|J1-y3ebRe!X<>tB-CwrqxGx{DP~Gp8ub?`sl^?VfE2Veu>pb-+augkA7%!nsqup z%bGA+14e75XssEolcF_hv?dJ8q!>=;@j;;VQu^zUChP5J-5Iry*2>X(F%xhcWNRUP zg6IA9N0T5^Ov|gUbMg7~;e-rz0gm;F`ySd!eEQHF_ZKv{2=9BQ$=+K7OF-Q3# z+w9=I8@6owxO5vH%(gYB<7$=J6M4nbnC7}p<+^nA6Ur1?;*PCr~y(wCo8;PD zHubq_&)amp_k9MWWOw`dBo^_!@BDk;=Y9VCp5ObPF0L!#xUuQE`%7-wEJc0CsMlM) zfy6IW&6@GG&@T@*T6J^q(4c3QT(3MhHs~0&;J15`A8VFrm;J#*|Kyi<=CYwcyDVP0#$c6ZBZAhg|zRWj_=_7j7icfIA(s@3qXT{m>mSM1L%DIi>NXL&Yr4%)I6m^@iQL2_m!2>@ z+menWF>R%=u%K(ptNe=fYel-&3|+IlU}zBb^l4?!G7LvwbX|Yll>6L~(rmM&H>#$* zh0#FtfHI8CubNg(&o{k_Z`ekuN<(4M=H z`Wd_78MalaHl!)vuQ_FUxsHCD<&@ovB24xn^tQ}(KHIA>cgVh`jB1X!!)}%uP0y5}$eVV>YSw6cXxDIdx>3~&RnzzN({`&?w~X>9e@xJ0K}&8Zm&qQ`=Ulg}7pz80pU7koKd0w^`?^3$dwjuHi`yes%RsUz*I+ zYWfO4w)(W?bLnY)(J(hrWXnO8N@=SFU6zo(m>Jg!h(Z~y$ZNQcS=RlkRlmsU+3fa&(r3&K`LIXB$2GIT1eO?jU$;Eh;jgm3X1lzYQ_+h?gF$T3_4tTpuX)_` zq~SD8n}0ld(9m^Mvuk`@$tvr-hi&L9mg#xMn10^yjEcJ@4+;%xj#V>xRcF(5f^SAT z#Pl~@$8z8ktgT7yY4x&NkVyY(8q=rox@MT|14(|#G5 za+cJpD|dKAIsEq(s8F^1i&kkvpLd(S^m%1hpXOOLqv1Elm>JYfgi1zhVm#1iSvXda zNwQH+uOY+mWN5$EykybFWcpUc;e(w@)2PV(jtpuoqv6`NQB50m+TE(jw%YXr+?2^491jf0DO9B@ioD>NJgZFW zq0#KY$piW+S#bIatI+iILbFcGcyD%gWku&R8>K40?};2mMYH;nZM4M96siVm1&u>9 z+RH(nhJNSJTv^YIrlVhE=^#FzoR~xz>rA6La}pQhg?%R*nlB%@-;UI<}jWKw0B z`VH4>Aw0BSv+Sm4c%_Ex>vnU^bSkFfTk^C}T;n_R6)d0p{Cwz;X4^Yor*reNi-o)P zs&omDOOJ}-r|VV2H+9P`TaKujZ+bL897+VXNY)@~g5(LMS92;Zk8!XN2zpbZX>!qw zX?vC{HyzY?;oD68l-ocfJUtYtUU1&^eR?r(mpD{LNLT)pQVu0jHW7-yr>V)A}YnC^LE^SzJbZAQT<_*lPtiQ6! z?{gQ>73OuPhr4SXD*T3w6UzpKrD;lzJe*q1$+`7qoxa*j7 zgk7lu=GS6Iut*n!YrohTM^@J@ev9^oCJRQLp|JIN3k3+iL7zi8txZGJad^C7co+r4 z=4H`nEZFvP3Le{=D4>?IdW&vZ-tA}%{l}}|X!sR5B)@Qc`fS;4;sbO(?V^)jG4GUL z^iVT+7!TbZLlc@ln#b#JTDTG#!_(Lk$4H-WnT2cG%B-12r70aE{bF!zvk`c~8ef2L zv9C~V@(Q}sWVso(D5p2MEyKyJG-Re3yb}(7lVPMilqg`C^)nt58@zdtwu=$R}nwA%9I`SHYz5iBgjZUtuN!O+Z%xoDzb3xpAP;xe-sd@G7D z!^-ijK!HLT*0OEX_`D#>vg&nK$hMJDhi6O{TS-1^Oh098G5i)!iww?~F5#W4A8Rsu zB?1qm$>fa5hk0(rsPOGnQB>nnTGM%h@n=R-c2f6Upd|E6dZ>S1r#dyG$GwRbDdGKjZ1g zO}D0>Fwr@~+7RK;tWm*>%(gIDOr+}t=JOAfk=e3DFvL|vk~l1<9<}`dnXKvQS*O%0 z;e&ZoL+*@Kta@(UC4dfebz~MTt+y(MzD_hM4t;yLj${wuOz@-lPI2IJ@uGWCw2lz6 zY&?8VXqo7ZcxujeQ6fV>#x&ux)(D1**blv78Z~+yoy;C2M4k;oQOskr*z}4!5wx@) zCH9`cT+PqntUSA{pUGveFHr0Rozz1?r&X7b2W3FhTm~gg?BX!UK0PAm2nVNiJP)=S zrOy{u&?8wH7?ij9(PMlM7xdz4GY7@@VyD;artUTyMQp={-XQeBrAem`2Qg!oIJTf1av#pF#A2m`c4~!G!jrdnR2d^mO*5r- zGjKBmZ4J-0gF%UD)>c!LOg?73Kk($M{zlc^^vt5Wi7>ezXY3e7R5j1UV(JXLVirA_ z;7yVMJSYoR0xYL&A?w9ems=QElf;kTeMfQK68bqTc9+)+f`8mMHzS0$HeEV}hj#oq zdh7?GsJu|5Pjjn;8rOm{WOplr8kr$+qu8Z#U@s7lB{keEMrPJAy-Ewok9*=ogMO2U z%8_S8hc(Y36~%m*5`nRB%Q6P{DMOlHm0!%FudiUWOwr39DM^vzK-=aV(PWS2Z&+LY zhTK1VijXdcLNR0HX$2xrNs(5umv7hc7w-u@5gI1MBW`QjR$~h-!S`*MG<^T2Q$g+` zrqQ<=y0*HcpR{nc3sHW_@M&Y=tRozkxbN84I1*+sCRG~ahIfWaiU6HqRCRv95= z%!n&p@I3>?)z>V$M$310*+IUug0@z#-}Uz3=e3)@ZE5(p(z3kDle-7UiI*iJ((#~X z3yURhIH(oNL}W%)g35WuMWo@BR>lDM=@r9v$t@85UF1Vr8EYx#zx~1}Z5~*_#QB&T zdY#~|;S*cA^`_nKAQQAegh8+-U$}i=@?r9wcEW&YG^t4-=c283Vr1zQN8(_9&MqI| z=a|)Y|+$3P$_vX9(4&nD4+SluElHGo5y~({oN}5ZqYOYE4|5^C*TpSyh>`8*fT3y_lZV;f8uK5@@n$;ogPJI=?#jK9C$J~(7$tB30C-j0gC&|PE z_l7QvG2NsY;0)Tb$hLD0nu_OXniS)h-cCzo)mWI&e^3v&;zu{dwaH9oB8ZpyojRgv z+Kv+W`YK-I?db`9z6~%@z%EG`V=_$(whNd-Lnx(fXFq7^soM+EcNa>tlZF&=V)BfX zW>zegs99=y*$7qGfr`(@H|K8wPnB3fe3q z`SGqKlkNqJoJo^gHfA(#W0>LsQ4>;Qvdd;otm-_m(*}XBL$_2;^DvA|a}cKPgr=Hr z8MJ<-6TIV2!?_bL$Slzamn%KB$YJ1^aAO(S2F7~OVnDS zOK~JSIi3}B5RBF~AVb4Z>_lcF&41c}%Sr%Y(GWd?Y+5wS;yGgBcqxFQ#*UMw&$FRr zG$z17&P=1=~S4$*4)tU5B=>hLRfg%4qV^ zm?HVC$x*Gact&R6Bm;>LVn!MbpO){SQWhS5lL0eXJZ)o4!*DY2WwS-c6=RR&9tHRd zw_V{I#&+T=(u`o>-Uxx2k&ULa5&>BauT^Jtan%FAr}Sx0}j2&?n`4090ad$!^9AW~h4tj>*XI+DV zOF1sakBHi{Hq3kO!!BhHPGy7S6%%>UM)cvEk03iph657iOQh%3u#=>VxX)nT==9t! z35s%9faTTg$Y~o~xYnf$w{&otTer}2-1~-@1ui}M!N4<|nVr-0*(UKIK_;LE>YL7G z-ySXD5o~LX4+z{&0DS4w+~N_&4QED9Ll{idTy7yeoWLX!VY)s+wU1yuso}z}tpOxo z|BX(<5c3k|UP66VujL?S2zP|~S=ZhoS3{cX1JT4H%!(tz<%sOQ7+r}0&$6C_M6PZI zLv~Y4t65+J2PO{CtEXS9)c{ezD;eo3<{053v5wqHUa4)^b-s(linecdqX#XpU$8!M z0s*crr8l%UbD^wk!rJsK{r6~?Rti+)bPODy#S>lEx1rNDF{ zY&r6na|ul*k_H~^BKTCOFFQ4nlLhOSEWtg$c3w{#0}u?fhpx){Th$} zBYY9F5hw^kbx@Wr>PPVCNV?sykOGCF3Po~5Zsh?`B0Cyj)@@JFk_jMgBpI@TTMsdv zU?e^%5EIt)Hw>Rk;&Du9;VeP_)b!mSL2I+q2d2Ts)<6U6ZUxN|DXXp>k!fq7IT8dJ z4)UXmK_;Ym8#op2OcLOFMZwkH9#GPC8cQ)b{kBz&^xJ(OV9aw^VW!C;NtWaWHAKt# z+_S9lW5j7h^H}F{0}#TNw;9t;t_Tt$$t_m|VTR$fq$#f<0xMWz7;8p}q&o133r>Yx z7-eku^y==>%9s!mV}A6H= z*FdCY25qB`$CkN}9!IVRZ1P=orq0yg66?|@BKZU0NedM%0hT*li#Uvr#*VC$ENmJj z965&8sxu^w;OlysLb2+L?xsAbYXq-a1=^S)K5p_?%`Cf{G(1Y!z};H zk(i|Y^tqb^O7sV)1@EeBJD}y$V~#`i6||!vNRo4C{3W zAEAKLCMk!2Sp19<74QpirqJ}f7Lw1C<*^=)Q7JL(&7c$MinDI$i=f7LZn8G%C0TcDqDEpJz(QJjjl2|eA3$9K`7%Aejwd8)Y1$1yDyK*(2H|M0 z)&LL!dSQ9L4Ls6XhS4E z?PG$693VTDl+E5YJ#WnprBhUr&8)t73MKPSO7$?>Jjr@nK83IY$Z=JG2X&0{eZTl@*2-Y)p?j*|Lm zYrYMlU$)jaXzfepiA7sG1rH=khp(rKJ88iw^WvJ6Aw(tQmSfU6wA|b`|`A zmT0VCw3?+RFtuD53x>(nr{sbAhtUkd@f)D`*uskr6KFe@PDCk&C6kLL@72>oFO9Xce7Q7~Ig^`p<05e@d`s5A!XN(GWp^90nj{QQ@m`xt=4QE`lnIQDu^@aTb~hbKBe}kRY#Ty8AyEL&qX;9B4NcvBvm`b)FG0bg zxclC_yZUBvHazV%!)9DP8Lz_$iriAs=7T)#dl$Mg_yzE?i=uYuAl|GlNr12X+H~*O z91j)Tj8XOVfpf6jRf)F>6NB3 zd*MTe2;Fi7rFtDO2>-!~!t9u~va;$rTpc9^%Ym7+i_3(*1OGHa%+UAw-aCAnKY;z6 z1hy716n+7cwODL^(4JqOnB+&dWZP39Ko+h6KM!K%l$3w^1Sqh$>SZ`G^}tf$oddMW z;G$3L>Z3IXEoKKMJ1pha#b*hqf)F<-7trTbCAIsO#TC|_4OUbZhI~cii#q-wchCu?d}{P6Qo#SSG>|a@{&HF2QdBjjX%K$~mpJFc-yw*~)6l)Cdz{n?3wQ?p|c z;;Jj_uOJ$NHE|%T&ox`kAoglhfg(0cTJBbW<{?y^wE$2V=O7ayAVC-WGi!+egnH)w zdpr9nD=k`*-@ngpwA1D`K*Bw~zUw^E(6}I10#hlMBWQ8#*szg_fFE7?z^A9<^!lVS zc<e#KgXV*N)U(kfs)whsLKf+nEIrkx4z8;DU;vuvcjQ zyplMWo0^b_o^pUl#zxW9%U4o@rBG)!2VU*i99T-kquK_+k@D64yFtbR+`}l2<*jvF zB6cDnM|oHOGfbZ&9GEK$4_DyMarkeFdYjSVE-DJ7o6O{rmO~E#=R{EFzyQUU}4}Xy?=Lx^h;N zf=WBvl*%n3s`h-?UEBAi%jJ&6?^fjUNdLX>?vdF0*yYC)UJJ<+%KiWND8MzokzYT( zF(%Oe=9Qrle!ZZ;IssNv`d&LsmPp)1{Iy!e^)pJehe-`Ay@<8asw93$J~&Ls*tA=h zbiY}oa>d{^_^iuMRR!~b;AWaO-P9yWW|91w6xnnJV)0xbDBk$MzK(qFOD#KHa5de%BqbG-d;JXB(}qMflrz8#7%F){Oryd|h1qiXrTqCwkYt~1dN3E2 z3uEBm)N;unH*HJF#$UaKV7T`D{}mzrQwDo}@OxxM#(Po}APM#Gvy&9)e%R`L^_RX4 zTtRZpEuXpZXm)HmUBFOEwI!Dp)FsWTcR_9#cHZqr4}^Lqv(po~iSH*$60DkykWplv~Zf3~TNbeuB(Fb;PL zc1Y(9QK@@sSMKlXNXJg>P7o_{eOUYY;)GONPvfgGlKG}>m)(lL_cvn6NMce)5$E0H z#%N>(ReP5_@oz%Y6A-(AeXo`FuYTmpu^~Khi{``A+VhV^dGWe-Rr|j2^2a_U7bS<7 z6&KqF%MKVx>fy?x;ZQ2}$Y>PZVZrZ?+w}Y3=uLYP#~~aMsjHW}BV}_-`FH&>&GcJj zk4@#h4@c&;HEc;yvUy@HiP+&d@L$rxphQcN$5$THb`7OZ;wxAiW_94bK|H$Q#ro(})=vk_vy(ObF~5p^A64P&m|G=ZB%)TsROG+E#Z zevT>w9n|q;Dmivm;5&9FOgo<{HAjMSgu#wYE=LRY`3U9fB5ebseD>X7t-_OJICL(PwX5r1jkS@g0mbU~g2fe160%5qz-p#7Ld$ zs4D#(m3wCgqS4{0gN@PuxqJ3k_J8(UNRrsbwI3dh3XZ6JIUGyG{#lv=rO|>3P4i50_FtwhvSe-FY$^Y# zBQbmbhy|94`KI-gpB+nG#6{1dfcuoCXMlFvK=?7YOx7+`0b}4oxU27`L}K1_Jm_-D zzg$X;Qt-0LE66rkZ;rJ_iKFwO{z#A3T&tQzQ|peu*7E_a*-$^;-LplWTr<|frhoa2 zx>xNX5jDzJU&K3L$_KW>B~$r!?pzHYfis1s&l^-3i4dT;mdvqx zKNC4_db@vAjr2p`aa__O%o_PeX@gJcM%Z%H>1h0Rf=J8hER7#c<%PW_IU)7JeX;Qz zggp1+x?3c2YcvKX;?t55)jD4o>5b=+&bW5XQ2war$Gb-IuKK0!ejEoxB9QmbMh1ST zU+8%dM>krX)p&@^yIfWO#Z9T5I^oH0g)_bCp?4>UB6Tf2q5V?$NI`w+<~XqxoYTwy zZv<#_U?j0@tdl`jJ2zAJbwYZ>rKm6OPHgiplwXWc2^siYYN)@RX&WbcSed@7{B}4s z^861*U^7S|iQJ^3UX3bGC0^|8DOj7<<@cl*gylK%(Lwd*6h;cS?fdH0NW|95>hIhX z!BZ=f(@+7i3grFl0+=JhcyA;&STJ@E)dA~U5I;TF{9jTphN3g+Im_36JQ9w>u@p$R zZYjS$5{*F$!a;~qvP-kOWONJSTDlYglVX2yF0AT?F{<7~MMLV3n$D+|0CUM}uCjWUN%{c+HpVv#4Dn;XNywbE61x7eiy1TUx`wy7K)Ik#V<(2eT4&edN%?5Ba; zPneLtxaXTU3@1MwyK?tY(6UC6Vgj=6Byed3pT1=b$xu(;zzw-gWjgVKvM43n z%NNymboYkT@NLHB&FCgkzWVTuJr`loHk1?NFNf4@dIJM$JpN!Pa`mMs6gdC+4yNJXyW72MWjR(t9&8e9vi>z;|c+W*7fk|1G;e7spPY(%Q6?im8`D6x#G%3l48G23n- zPb?ATkB|O;>VNy!9lkOfrB-4;|6i3xxGS#y{N^61kekZQhqrpP#lmTQH9g5SkkZuu zI~KJ!jI#RDEtlAsupe;11mDEo~I`V2| zPt=4VG%(Dx67UfwEBzO8#O_8}D0ITDBf44V-xYVw9vB$vg6jrcMhRE>WR*PMHaB3_ zl}XJ!v(Yl(@pJ#BA#Ly%)WpITB^7@|d8}WZ*4lJ8^)qS@@P5g?{B)urD1Xx)SPFNm zv0sdI6bJTq9O~_h4E#+LKh;zoC3J^%KnyYaUoHUndu47tJgBiNf+&rk2T0CubRLg< zOF8%cLmkJ}eZ3t_eP0Q?a$t;{4^8V&*mAR z`zAr)occ;{yejlpWA*$Tl-$rr?R2^$LJT|k;*^+CnFB_4^5JRsH)INJbSr3Otu3qf0{^Ea4L2{&O>ot|V_7Rxdus=w8 zU~ZSq3ST5#`*P)?%Tk#R!TqL~h%qZIqx8xj4}D)5DiDZp1NCw|LX>G_GmJs%JjI9- zAn2IdoVqfm^*;aE`1@etmRiq$e#jQ;J&A1HRdSJVvT*e;p^tkYb7n{NZm)z4Z3y#0jme1aiIDwb912-4FUhE z|4;8X-j(n%wE*|ZT|;VOh648nIDz_`d#1T6JU9c=fjW8a*ucLFJEr!ZQ=w&?R@pRT zf4#fIR?pp{Cf=d`Z+klCW5sybD(ye9YlN#PA+y<~>8aCu!ZYr^A7{EUGflRAfVARd zb9Z;Ga`(BR02&T}(rtR#z-Pl3lFB=-oR7}2%m-@y4=NCVDgIW}S7TA3w`u8FR{#5J@rox%24$rld-+glk8uZ{X#0?@fONNSgavuE4xzCW88j?WP!U81~N zs&dJFp;5K6JF;fC)T?TW!5mTe6#g z)Ycn#F%?Qk@!D`M`yIwWC=xn+Ov&-s*l*l;va+YFbi;lECFjH&tE9pl=rqr;H$EEspN+da2PjpSr! zY7jyT)#0*Q>W-on#=eh7yJIAD+TIR2r{aC{E=FNa`Dw>V^x(8&_XV zK=2pT_})}B)VE;7e(UB4@kA`Y=km8wi3NsAUpKG36BPGj;j;?^72`Aa-Z_vB^OVxW8n0sB~s;?jH zU0~t4+E@EaF7Gory0EGIv&e?Ass33k+|rfq4#W89P$+iX0JS{BSS=(XiQ@+QrqtJO z4cD8>FDSRDU)dAIYgN9HOZ4}%BuxJ=DkPK*^D~i=(c|iUd&A3atbb2x#;Dp-BOUnT zp=j@MmkP{^YyA8FI3DUh&PGGL2SaIXsQ2G${OQ<;B*XQpPK%@VC8Lz%E#*T!UBkzl zj{4X;aIx>bsl&Tm`o?=khU3&tj5KJ)mhtcVyQfc3D6i_L_MPngVl=|Ws^rn`%l|zZ zKVfcO{k?FrqW#-MIEtIE8qZe}k>sL5IIVh-%daUPTkX$gGbC82C^IxDIjviN%EIoC zJ@L-?!UE+|O>LwWePrK>9RhxsO2dXKG>e9__wJ6B*vUwgrjYtadt>j~6J_sMOL-&8 z4gzcN&-V7v650R5@6S;2A@TqKw$t;nsl;-WBPl`6ZEz8-(oa!A_IZ%`o*RQ=N;VPf z8#C3FTjSir9@CO9_J*-6FYJwwZd|$Ru!p#JNLzQCK+u4+(|1hW`3*`^jRL(A&mF=Mz28Dp9P{|Ir)%(+AaW z-MS?7G=LosN4QD%y8)^lNBu9iMyXDk&({+HW2>~GJkrq{tHs9MO+7n4O-Ht5 z>L0$NAJHRW_z@E^FV(xJ``y3ryPx=2=n_!>#6L)TpmF3Z^Fq)9mu!{#RC4d$$*=4l z{O@rklM>2dCY{a3$c@jNS3jsu_w>H}&fLL{DA$FQ{b9&_pV9m{Y)F6_<6{v@=!gFI zXb-I)7>Ru<8aY!~NX~b6=*q7zBKxosr|<%g1v6;M9ggh(!GHMsdxF2%`QkmwzsP?z zCA0`B1vZ%hZHDg2l!lUdkoQl+*>D|1U9vw@0PO|O=CX_Sz;A`f#)^OvLg5PnG$0;Vy#Vpz8Yym-s0XNJ z19^8Z5wYy;#B(+I?QugX2FcBjg8WfKfc62YZwrA6P)HS8xE%a~Ac3D}{Qy{4TLzN* zfC6jO9jH1~4XY)h7?cTsD@eB|LBK@v$PI(k1IYJ@k7`Bi8j&Glz8`R*3cHCl!*3O2 zG#hM^+}Zs2vC#K-vA6o*fizhAiqLcd3gSx)30xWI%PXxi?DA`1-Zp`Z_N8P{Kx5%a ziNi&5U&s@a}DG3ny37vOKs!-I9xI8Sof5)L3lo)5| zA6CU^Z>f|vbv!pMWHtFa#ZJyiY7~^hNj0j7x|7<;8BCubocgf?NL~jwUoEfz84yCH z_vJGa`Ftiz3yhSa2|WsfAWt0|!&0=lOW@2>ajBEvHi8%GBVd&+JLVb%D!dQfeQq))2|xWF>r7?G4y23enRLZ&^7aos!~kWp zU}V}1e}{5_upls)6Cb#PB0e5XCp)r&o~-^{paTL)?raF>vP>#FJsIp%2+$h?(W63$ znw(5eP6#VD7g_4WJoJ_WN(K9qV`^~o%@R-Fc?GT>n{dt9Fj87gUK#@(BH9=<)cPDaFlyKPp z$)YVGh>Xfs+>?|raR}(;HqV)sH#(D(Z5Cj=w1cSYdR3snZlFRYix7(YTQ%u;CVTRI zh{G|hD7;zl34R?Op_ZFQ1#m`I(b3Q2!&;tnOKNDPpcrFPNEgW{yD@5#iMU0!pMhu4 zSZ>ntoDvW!VM58ic7V6F2HB(s6(At@(j&I4ZEtrG040zH3Cj+Ri0{BYu&&vGg1jR8h(uqybI74*)tfMfHK$xfVU~nR=vQfdxQDGtRFWv1@Bjq;!7gI4 zW2Y$xIdbRTJ_=tA7TqfRd?{ffdzsAy+#iMdiH-I3c?`3mSste*BuS zqoMEczRN<(3wB(6>!Hw_yGDZ}4g@!6n}N`s%nIyQn!@aogI866Bf}bRLq30x@JX*S zYy*`2HY1>~@CKBvr7#Dpw@nkH&mG4xnOP|Z`s@qEknWhifXsDmGaNm;gcqD+QC$jX zroyFp%Y9x|GP10KT|ohvPj(}rqZ-pRQBp_KmsVI@lELiQxPTfh@0) z0)m<@{hm{Vi#(;DT3T7WJ}^J|pZeHgC?2%I7A%i6g|5GQ14|%IR14#rL04OQZ zC)^1+;zW!g93z@Q-vK%GW3mTcNY3kqeChdqDaEs@*jOYRvrP}-Gi6+^&~{5z7OvqS z8U})bT`T2xicko8VJmx7Xn6iH|}$K}aAJ=&(RX0WH5AA@swsYjwS+Z%u>UWkniIG*iuJ4!M%bNlHh z_8u~94qMB(hqp7^nOi&Y!^5IIEX-T zh@MvZ5J3@>t*8u}DRcJaT`|CSs#qlA{Okjxr|yics8I51h-j{-t)Riky|OC_>dwHr z>=|R6Xn!T#2Z`>Y!&+u{C{^}gF5`u{Mf;e|M z0%Q|-COGavR8RIX$diYL3+zD;yn$$y?4;Buk)Q8nxl&qXk6SRm&;7b0$bGP(eWy+a zSWJBxGOh{16 zaR#2uJTxGJgM$J4;dW$vpCiZYeDv3d8z6wdl6c#`2%*;mdrzgVfoh@&r7E4nX9LT1 zmfhdfGuumSUTt%!f4yjXV%>yq@q*6v2WXAdj0uuun|k! zCjM%h+xs%bpUPccP^J&UJ(Dn#LqB%R>$UKP9&mEtTdWg8uFRx!2sTqv^^qb<(-sel z=(!t+-l3&aKJobZiGc1*eMMg?tnmBG&ObJ0v(d6e#yY_2!IG1ugm^)CC&5Vl$rttyx6%)Zxkf#tNX($ZdAQqx+7)~_n zX?t^-P>uyhmiv$$Ez^OZ#%Jq;hvI^ZaUxF3EF*rBEggQfLMkJS>R=D|_P|mv2%b6# zYp4o870mJ^W>j7z6s1(iIV=3hK7hCEfo)q_#INANec5ufy;%}vpHIU_^oVkfYVQGowS#A*;|ko{W*S3r6= zI2GLYxKU?K!vVO?w5I(*{HtutY6qvXrAr2H$t6ysby3Y0Tqqt{{(e-M3H;s(?Ap%O z<~D9tWBUqaGTDzqKQ_=1x~|;m@OST_@d@_U&DkxTe>V=ph@UID3Gor-9&`yW6|BPJ0|B0q!Av zdO3gyjgf`Az;6_gUCp<{!e*1MoVg+6qmF1B;;7~^6Hu4%!m_WOhF972c`c#EtXGhI zh>(-C6aF8imbM)rhhbGv?_}t=Cy#I@Ba>wlK?1nAI;b*x1dt?7u|*3@gFm@W%U&gO zBRPRTW$=1+6T>fG2$dW!i&2G@#vcAHR*55n!ImK`Ry)BJ+6*)bk@g`CHk}APcFnki z3=WT=!6$K`Ra-vkNRbIVaD5XU#{9#8?O+qP{G9sfX)jAv4UGSozGZ|vT2 z)3kr5e_BwCr6$`Qiq~z>4nvk69K|CfSm17{;zwP4G9|JBdL}V5M zk|zP2oONNz5zJrTC`HQ$p(_%=u$70vse~Xwsw(^}3ihZ4#t?%bZ~IM{3kW;f%p?u?&7ulxgqrUq%9WLj;cW?ma@@4S2Rewe_nHhki1dU@Qr)wY)^rNZt(C zZNWSU0q=GW@DP@p?AB_}0DVS(;e15fDX(G;A=Ql+780Ildg7f;d0Oerd8N=uJ)8p-G~T z6eZsd>KQ6IrAiK(L0*O}C%iTeGm>6;E!dYu?)DaoikZ5WwM(z?5!UN=8I9fhY`6~C zLxhkm1J=9MqR%QzQrNC?fEbf4_hu(1CS(nB$}c&~36mWPA5V51N6+Ah?#DRv3$=Z;cJ^_`6%zG(V%WzTodj-70fAitc1CjM0c{(Yx&Q8RD zX)6asNx-=Mk2sL)F26m2TpF525kR5pD5+2l+BwTACzps8&>@L);Doxi93)#9vjy~! z#cdm$cJTJ4%>{jO_#9Ltd{9wTcKbekvZw?4+T22ZXVYykHIh>Y_2i$1$gl>ZnK=<8(qShJx@90|@gc86R}Z2z*=}x?kbgyC^A$5z7W2*TFSD_m zB?|Ev{N<$4LUd0QTOi&H>LVIQ{(st}}Oh~hw^e2Rktw%=+F0ULeckIVUK z^yo{mRY=B(Rg8O)vE;=APMxsz@Iho3(vh5yP>GNjvYR0$5gCNRZM5{1A@~|-sC?r& zm8bHyA(_n?-Vde>PnR!{| zv>u8dJXmK967k~ zGEQtGY6WjbX+}tb@J!&L0e7_wcwX37x%c7gswlA=1eC4xd5!?0ZV28c!y|-Z`a{Cy z_{GiOOD09)K@0%1YVX#8NFaa^Sgl+^wC=WibV8sypcT2O7p2E@31(fwpn&{iG^Fz# z*V1DMkS;57CvgcP)HFEz+cFp~i9qsY(lMjsljHmz>d_pE1{I^qj~K^|b=u#y`T>=C@6uyGDcsvYCUw|HST1Yz1-aIyALC{FP8 z9PbDK0=M%RUku*C4m4~ypT<=~fEOGML)?!UCcI*QcKhrv+D1pkho5qrn{vq-yeM`g zcyh~!ZsJ+(lQi01e5JJ!%-s25O|*a^<8Uhpu;i>MF5s9b@e$jnC*D>Cxgo<89yoLn zxBkY*LoeUbpUt&5#qVt4MEaxxNQaJMqzD!TcO&HwavI^rw95o^I}VL>f)YXit1Yr_ z3HI1s^R{`Q+XD*xQPR^z$#WXA8Yz)WI5T$9pED z$H1WvZ-GwUn)bXkrr=t<13MvzP*z=?6GQ+s5W+pC$k{$tlz+FkK?JANfxB!UIqTCA zoh0aYFH*r1Y!|M?wovc_S1J)sg^&Ru2MuZEb(dHZ1ggwrLN!N|X!%6vPMu1&?arav zY?epPChDM0MY@Vx$23Vw$6-x0T_6!P)&M&$v0*HD3ywjP`4lriGAiG|iH~gWAzKy8 zvB{_yn@AsQNOj6eiz^mQM$|hhg$-!e;$K9)KoQGLd4tU1Ake@rlqAkEYeg6)fdQm5 z^c}k3N7-?1L(dzN0Bs6USa_wJ^RuubKaK{#?#`Cl?PLa(uxst3X#Foe5_&8Q%Zh^l z+DFQs+Fq$NaI$5VMZ%jx#Fd_*R92tNabPf2FR7&=ijU^fgx@Uu^s%KSf^lpbkfC&j zC5(*+*+3gt!JlxIxMw29o!n45MaK_}Q@i5NuPO2I>0C~BC06A6qldKFW2aBaW)qG{ zL=8FaP&kroe3XCa!5*=lXhX~7@|uX))_s!2FKQ#SN0Ja5@w0OHm?UxxCCDk?FHd+G zDd{6dSn^FvASR?UhcLEe#)u7$j}m7~;24}VDBIEKv5v-w|JGMz$Gy}~40*_5Y^tF7 z*vB$FOeAU)Lkxz-pPU~nQ3Gw_Y3d+HX>th^EBMrS5OW3xZP53NH?ngoC@cg=_%2!y z$RL5@4~$?IP_5t;7s4WsZ95yLJpB=lk`*L{gB*wzID?1}Le-nllxYp!L88gf;|@46 z$SRHLL{MT_AgRjDChGdHYU(gQq zX&xWW$TH8K@ZtC-f03(2t$p z8cG5l>jKqVawr;$+?6l-8U?c|7#{q+04YWEfFLfM5XM8&{I7(=Liz~djdh7F95N`f zHB`Rg9yUPA2%safB@eAZ_$73lC5C;GGI((Qv(U%dOQw9!|j_>`+KTrRC?!^BO>|gK= diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.binary b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.binary deleted file mode 100644 index 5a7cfee67365e7b8702fca7e9aa3a6957fc15bda..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26213 zcmZ|2eT*F0n&#y-Tz}Ppm%8y`$zB6fLJerH3Gp%mXKPDWFbL| zKUP8*#P2yLt8DkoXhyfYG9xo1&Urtc_j#Z9^l>jtTUj3Xaeol3WO-EdUo3sFZ{NN# zlf-$n69#5;CrmE;Z}Hl@rqd3~JgnRZJ4M*7`0cpg4_jH<3U0({bkV%aMaQa(^V7l2 zupeh>m;|$vlNV~YdHsmt+D5w@rTzW28^74MZ~S_cBvHQ~Z1nRm?oar<#?L+s^DvEr zJMmVOe_qo!9ygsjFV@56rQmuy>qdig%|FB&XH7GY!!Ql*wxg_>@ff$GUX<^IoyWD` z+7BjWBkqSmGaqEFJnVMEi{>x!#t|=%`@wqD4Ax#O>4kS^`fdL1=UK4MuP0%!5+~hY zh2@C1!sMdS6HKrqVNyN}+kxNC;{3bXb6!1DU7Ve+%+3TG>y;wRABI5@g+-Qr<>`yZ zO`L088&R;qIz6tP;Kk9I%3RPavtAw--RE}I#!b;qqCv3HEAu2ypZI^I>rAm3eimF0 z2i((zn!fL3b#8J#h|?g<8?1iR=$Co4=pEEg%~ZC-K~VIfuoLCQvf0mzBVJNQ!L=w# zqWs0uSG;myHdrq6EcTD`$Hl41$whuNtgNu=4eqlYc6&u|_4ZV-5#@1I1bJ4b&6j4B zt3Q~Utj-2k%e)M(ayd)35w_!c(2RN%6S$9-p2to9qmqyAfr*FmG;oOT2n?CB)k@+_AV6+~R(nGme`u&(dI}?38UB zuxXBQ?Rk@?WfJB=61PHmbD{Oag<0Eh!q<Zn*z+Ngp3`H*^~ZV>iCEi#Td} zT$6RPBx?=6uttB_E7=ll{EMYIKK5aCab_x5<|>wx=h@wf*KOuY|1?(~GY!QpHY*QI z*~dK>y-%@}1J%jN;M&g9rE6UDYm9O_FnPQ~gd6ViMwte+Xe&;Bw$WeuclgA(N%?II ztzI;UpSz)<6L+#;qikV7-S7)@fh!L!CxfD1=B;O*BHUZ7K;$iqgiwK3KmU1-J7I&yDTF zvVAwpMiQ~7_oJ|f4=KRhz@O8=bcq6u(At+i=ZAS*uVZ*G%>-XNViJ6lZ7S3BNB=&r zoVXU^iD5?}N_m052<`v#KV==hI;{XV=wlVNa=>a4I*9#=%;-u@9(2GA2!AbSf5%z2 zxWc}L^&~EA>iE{$#|OmRs=`*v%Rx8m6SxLEfelM|PaZ$|yf(?#y`%V6sZIu7UZzpZ zI}z@j52MYGzD(~ZZu3cAgo;!TGeYu~_bD&`lF%|=nOeXOWP;euo(J0?IgB;#J0?up z%-N;&#`P$|l`hnH;C&-L&yqE=e!nc9_-Adrm?Ec{2;?jer&eDeQmbbt+oi+7b z+sgeA%!6xji_qU_XIVlZ<^2gS?36*HO|-CX_|zuVLsL^krS;`cmd!;jdYcEFbs=Fn zZ4sE8QE)fI9SFk9=D*;Ialc(Et&tAAY&&`4EBhZXSJ%y*>p%J>f1DrMW2z*OHVI(C zHZUbE(c0V)hG1vkt$OUg3`rtl!o|H=CY{J|oVPwuijX0Nd7Cf`fY_||*LCq6JMw+ZB7 zBkDv^Px+5<=&~dmi^WIzguldBoYKw*zm3y}SXnD*Bw^8ximz=tIB)Vgs4NVw#$^*n z>}QI$!xt|aS=l+FTa}%`?h>VeJ7IU%UY_{B;)02_GPs=;?fxa+;~x{gK6cm?4WbDV z@xFJ2zeh|j0ZwEwxzEMqlY2=4{zg#8im;7t^u!is8f6Z^d*msna75DIKsx)iX!)h8AxpEE=y+zl7krW5%A1nof)H;RkqpW|yz*GjgA zNYcljgcN#Y{3Lp@w8)!B*uMeR(2TS1YX3D$I_7!z$j!olMEqyof5~e{fca5ilB`W$ zmn$ANX>&jV57WoBlVFketmWON2(T%On9#RCbJzUNd4GdX5gX&I7Xz=8xJd?Hv>SHR zVV|tPQZ`FWasRIO_Sv?qQ^fv9e3+k@(q;hb(iS-sG?C&2dl51A|A4Db^ORwf2`RM0 zfL$VbhW+p-YuM*&S=qv;0w0uqwK7|&-3m53gDy)Z4^(y>^V`JzU=6GXa)0LiWgKM8 zfZ4qyY;?BSp{CKc9XIu6nFPOK-9Ys*@TLE_c7ZqW2O?iH3+}}`4FX!`(E9JQUsJ=7 z2Vh%HqkI_nepAN8EWA6elJ3ufTbyEHF4% z^NpK&F?7nHcCY4g)?3v}(-k0USZPr^;x)y9M+uAQOaDK`QrI7bCVrhL(tl~Fb+GRy zmYw4K6l7^M_^#%{X_8>BMSH94-}m$|6>AJa_Cw5gQ`K`ipmu&Ethn;g*~581OQ=v% zTb_0Jl?nhq<$Kq0>In;9CWep|lT`T5zgc>~j~y_zV0LESC6RG134>OY^G(>uej9|} zHT5#6wZnQGTusP<1R-0f1sk80VNM0`Azw3cJ=+1Iw&QF-43IURVp&=H)pU#6Sh$TS zmG@j&ey7Gx=5g?82v6YbML@Hk_4vQ7m*!vM^J9i>%b^ugoT#`^`|o*;l#SoUowDmd z;4!lFBv`aw9|UHbr6w4(S^f8TeVSZ;$GH@4V(X*5?6PGG%M+$f&c)gSAWYoi=gQ(M zPdVZ3J+mz=c#%Z_RKYn#xSMX%G`tkd1GQ@<{!`fE#*1t`-m;MVjExCF*J4FT6P`kG zr~k}u@@Oq=QThRUK?}~##z<*R=!OkUK1*LL{esYXoa%w1zEk#kpsLL{?}XnQQ3C`^ zszp-SN-2bZ(aU3w3J>PQt2n(BtYz)g)j9U!w3BTGtA$ekbMqFfbWC3n2WxNfc}A z>_HL_D4|GNdw;k}={&&+6vR`l+vf?m-Xx^)qMh}ho8RLXkC{$CF6#=L!L2&co{C)N zdR&FuGJgj5<3i~>PQO_i#~nY^9*g{m^pK^38@dw> z6shGI$8Er+(CUF#vqHJ|yV~1qE#Ux?fm-S|Pf1!@^!_a_KjndII)O^4O)xLNy$$Q} z(s=d=x=U~RJMo5`vRQ131S^9Dc%ut6V~`?K2cnWch` zFKY$jMnGOwwN2i5=KT@%)kt-6dMdabZkBoc!j{y#m5WF4-)8L(*_bx6O2Q>(7y|Ev z|9^9nz(w*^3wUwgrhu{P!mK5thzAL=TM+pm54pLS^35tv#};O0RY>+S>iofv7PuXs z;`ggWJRlKRAcNpow$u_(tPl7XWsx@Fsvgwd<|{`0Vs+z|twg6BWIIt^WJAS1HTSk` z(OcHf8J-gs2Rvccf4gQe?jeuZ73RI->C%WUnVp$|I0H>H~$fToiN>AGU&0qgc?fGtXCG#jjFuk zSj;2tko@kIE)?#!)h*0XSg_4^2vFkcZWD1QY^wb|3TXwibnmG^0aPfu@=YFR%~C8g z!0`nOOfB)&c=a(qBQ%pBNfP8fLP$RdarZ3<-V3$=i`1lYV=C}s7QD$lTw@!`9HaXa zo(aBz<-K|BMi4#XkqUiFGY>cWZuL(YDz@#EOcx1!WFezSdUoY4Q{GgM1U*Rg1^V_Oa&{eH%+iHNNwR57y9&e3>pAJyI@1zvE{iU zumisD#J*DNSpX#=L-)2F^VK!;DH)aWD0^JH$@`-#7%F?>12V9C1*8#^Ho!eMq3g<~ zrRpYTtMk=i6#{*k#{Nlt)W**B#?_K+dZo6?lAO2ozw9avZWC_+(X1LZH0wiYsM2Me zieXx&PI3;LVeJ1gFJjScC^o2M933$F&62>v+0{h5uH2VVcYs?!6Ri}Zny~viauqA) z3ia%tcSC~oKd-wk)L`0E$>Wt0E+?jh#zilg8e4^KV%J*XeiiMV1C;Vql7da za_v}kagksG`KmJhL2Y_yKnoZwF@d}0b~-@=XdP3|&hrsSUu@4WbExGZ*GpQ1<=f(# zjI!ba7medSodULICF_!&zghY>c=4@QK6O10%K{+Nsa%D{I_E{aecUw3OLv6-DV*-} z^Tm(0PC7{#-HQ4ylKzG;YHE{J=lY=ek`&O%gVnkU{f7_3HT#(p!Xvxndc~R6OY=wk z%)!Ok$pQrOz4=Q#> zDh?SH1LKPe@SQ7V8g?lA;|Dbbzmv;NlFJY=e^zFFXl)SeDc-`NM2*5FTPwerZD-qY z^pFSmy!Jon`%E(qZV|Uuo7&hbHPyUlYs=&vpu)gk7 zGTsaG6xK$ojIW4*g|@#R4pOQVxAY18o)|iuBuZL26khzO)y(6X4OgZCJQBwMLKEVkwsqCZclPl|=OEX1{|FR*8e;VACgtKNoKs`{stlZf}4Mbq)%DQYO=N&#Db{n9mO)mu?sbEHpk5J^}w+F zFfS`*ACUNTNoIf0v|3?Q!YF~c_RX21$QrRr3L@|CP{QG6a3~ORJNhtKU+8N{k6Q|s zP0@0VU5}%LG~1&`n6V_cc#84rGz{||dimP6KIkK;+r-UMUto)OW1;dl385=B!O@eZ z+#%-U-QBnsZc(Meh`GfcTOeq@_Qf(-iXwgAb@Fu_b{^PA8XqE}jkdDH;6Y8uLQS*j z;QD|x6x=Rz_#DcLw6)uFbbI{%3IzvC+7OEhLkCbN-a7gO{vJBA!W&)l z={VcXDh9+?GJ=8u4O2M~JUaFP)&!pR`O)jC5Zo@$v*@+BMvWl|F!u&UA!c?7>Hy3L zuKB@Nbu(6-ngum-6HsZ13$~(UNyNjp)UJba*TW6_9es%}t@Dg}A@$hp0gc?aIbjpo zArlZ40F2*QgF0f^am*+cLnXVoP+eG<3w{@J%U%hKleK=fT^DM9$Yp9XlF3Ny+RxVm z?V>lw`+HrpjSl!E%73t6_M|-W=g9FVi*H6uo@d+e_*KBEjVixXP9Xx@nsOHwVIe%F z1sHKyq0tfj=>HQuV`5mS4(qzY1*YTGMZo*5WI!5d5R-_jU)%DEC9J{#QCA`76~0DrUnCkzyH8? z*H0SiM=TL&NZt&{j9HmDa_L>cBzRdddvH3mT1w(1?Er2qZ=y?3pgm8tAuZsK>*3B0 z!BG70tpuPO=iw6@F{sB~J&UQNm{<9O|9AN0dweN7vz&r@)q{iGVg*DExn{`||6kz; zJ~q^{>ie)TH>eV9Xu8IJ#DbUtsyN@^qEXU5w~}dXiMfIR+q`oLz%KHevP_Qo*7Oi6 zt7#5dtPK)zfQc^gHW3SyySjR9#WwwI$M_T2?bl6wzfBAjR@3v9Y1&;$Qh?PdQz+gQ zoFLLbaNu52V1P`)Mk;a3qS57KmtI52c}cE&6%jG3!h)B*i1L%G9!(e6DiWa=#>hM{ zO|rIt?p@eEj8$FJkCiOdw39AFya0>@3JXUOV3Ur_`@;MI2n)GN^oYHrT@MafwsrMs z8GBy#`nUlMm|TsATzKN&;LAs;-e@Sa2MI*YqHSuNp>$GJ_F8)juvP$)WESHTkMnF8 z!B};D(}J(Dt6?1|n}Cd(AzPvgPgWP^7b^>sY&xAkH7F@#Vrw@O)={B>8=1w--+Hzu zU!7r-SSs3LEJ3qrq2@7`T7S8U>l(kitB|pQ1ZG}D&^JpeTLdpB)lC3)x*qqNjn4hG z<6LzFygnanf-akmXM=+W&Ov3R2rBeY7tL&2-qj5jW~z>XJSwu5w_uP(%Vg>2wqJje z8;Xj`TC^<%4~dluW{4&zSfrQ`Rc^Z$l$L545fUHLBnLTV>GvjQr2txZQ3oFEYnx3r(l{eJ z($);NSNenpkeEzUYTN6(ZHK{(JhXg8_l8`xDw z2O6Ah`9ufh&dB$vjUmI59v}&P#0z~YrYS!6u1UZKa_A!5jNq^#uAuIy>m&|oKDG4r z$eJ1~P`RW^XFKniL_k-)EGo>dg096hBqi(BuB~u%6&wtf@hGz7hCt5W=T=2jJpvYF z&7HXHgj5i6D+(u|P%njhz$LLaQzBO(4$bh&I9;bM1sY#eCIys&*8Kg~(!-{JDZBD|M{{!i#nOKJ z{oLdRt%Dq`n?m%M7f}y~^v-G4)3~hh+OG)YHeo`dfKL^)^lYyo`|I3rGwWWc{clPO z%Q!70y=#Fzvl;;PaAII$oPuO=*J=Hfp$w&Np<>GVJKx!=(zdCj(Q{RWGKj+LmzEMg zzHCj##$Z8()z#H!-v91)8m{~n-{a~e?1dT(+W1vcRFCSp`3C587F!dX#xFr7D3&5z zBYH>%d$AO9TcfsHw*r2lm-K~AinQtJcF}0$7oE+W22SULfSaI8uZ1AY(rb5Gkx?4G zd|iOnCJDc^kl_dxL5T^Cl^-&Pxn*&$AwV(ObaKKio1!J+nypmlg5{2i0j#XRGgXoi zXH=h|6r&!@#kwgb9QJ6tX3AJkeD$Bs>X!Pu86Lj-Ej<& zGf0>^fl1uDNuTbobNR68O2Q#*iOvo4AN}|F$#+-^aEYA#9(JpZvP=0p#7JT4fW~pf z1C^IxFxI_^V?XoiJaC9lC5|%tk=hQzMzWRkxhnvJ_j+j)mN^9XsHR8X0Lo>n?`AnG0zdo;g=5o)jFdTVoBDv3re%&{fK<*A%$vcd zQN7$>`b9ajMyafGbQXN zbXyq~I+7AdItgsb)Ro6jU-4^JgX(hf=w2fR=J4~{Q@-nfR;8I8GJlr_(Rn_&1&2#B zWi=<}ei z;b7!gM~5^eEhTsrg1y6mhdrbTLMrgYx{UT7#!M$yL~(Nfe+oyAs!$#`v0Ja)QrVOt zpZ*0Ve3)>PgZ@hTP~t(T*XnrJHcXn(hT`;ExHasIj_@WaI{_&o54q3fhotVhVN7j* zO{%mp@G3n`>Jzlqs7l`AHFS?6xWpdai3?QOYrJ^ElsqIHGajps9yW-RDd#}bEl3%( zOFq1!>t5d*(2jaTM@<)14ta}yn&qVyz5kep!rX231;NTQ5a7SF>?bc5x^)OB0sJKS z*1N`+yj`82owm4JIGs2|tnLpv*>CJdA!%|7LN(gf$gkY_Q0VD$v2z6~LMq%o2?>NE z-S0W?69U5tmRQ|ktSihZdD>x=JR26xR!VYJ5XT{#2epDbQZN>93<{KSJjrESIDk}; zwbRXW@M706wYrdFgt5*=Q+F#k2g!_MSgD^{p25uG>!?5kD*QbwrMlf!v0g+Opl#S+ z%X$5{pAQI)tRpV84ASbu6#Q-8I4HQo!#IOGO)pObNCfJ@RIX)3PyM$a{7YPV&NOnV zKN>LE0gaGuJw^h8N0i+MHI*!9)IE!>{uxOf9b2py5E?tGXRKPCWn;+4T8lMn;B+9{ z9kACw92xXW&jhS(Bp977@Bd?U z>C@6bBlZyy0$@?@#1bA{Ju1Q0OIz`LB-e&Fz7r0x1AKuHB#ax=kKko`bW{Ltvrcm< z3TYg6@2Wd-H~z-*+Xsb&nH+fLiM0I~Oh@K{WB_&SYRHx!(EwwYSrXUcd~Q^gNgz8H z+*BCd)%vD^!Z;++R`hh~k1=9OW$28AgxoE=`)i`A-!j3y2+(mGX$>%pW;bM$YQ!y1 zwLG~$i5M%vzqL6x;;2r$RW|1^<2nW#N0P(w%sAyb~6uD zcS=ZHF!Mch2ziNzsRNNk#{(b2#!;6=xWBgOq<8b$o;UN%X{(bB>%Mni&2`}Mm`Q@!@B_BU?GKz<;~=OY zEV>j;N=vXh46x)(=}?15Hg`9cRfO$as2To-V?cM75%qe5%l>~u8F^WVz$vfx)T~bGfol*mHlSj- z|J??pW1>z{=)qD$^T!}bq%wD+CapDGVc(f4O$teFj~Ud6uS0M8(!a{PqoQPxxvV7F z0ESvQ4FGP9@D3ahyodPG3eZdDM}G~2P;wL~;zr?cZ37wZQqYK<#VIaYRC)>_7%nB@ zzCh)0l4in=LG7}|up`9)VbdWq@xSoHzUS+U(zq94b-I@m9;>RK)bBwID@r)91_92m zlekhcrd=4y0~<^MFg!PjoScomz3eLPxnlZb+96iqv8ksa!r8)1-@O4{M;@iuqMoj- zn`H=FTXB$Utn0G1jNR{4=a$^_UG1-3v5wx50vSyxA|!a&e&`bH8NOgZkqf#3?@D|? z#3V2`&ELXkCtgKZOSOFEePVyKNf2(3lG%hW%%AWQ0vIIyHwMID`f3X43q-(KwLS;I z+-`Sh*FyLy&=JfbAXaK*M7*R*AP)0tu`AX`>G*=AAqwMyZljVgN@fZiu)+>Yxx>0- z4lH1=}@!iSl44ee1FodPtZJ|G~Uq(R* ztzp)0gDL3u4m2A0(unGj$hS(Q0lf;g@u+VW%WJ-Z@TgT6LgjYabspx|yd+^7;fjk1ZKLK2#MTjRegy z`sLa#NoA#^m#6!oP>+>Eq$qf?q_<2!vG34y?XAy@0*kiCR__C+-jnetou0 zwmoLU3|?-T$BR}R!x&P4$zr4~KM>vV4-LWd(+ zm5T!cMgN>9cYlo#V6!*G4L|cXKcJ?gVy|bou<*?>(}ixXlb;(<`(U=Z)2~4w3skOw z47YJ<*HvO@%yvzasMpVM)E%E*;8=-UnvmQ; z+@!F~4)0Z<2uj389yj!2G)-_9$8;i@=jMOnjZ-xMDp+@f~eAPQ4=TPb4#e?V0xvS>y4! zIqjiVK=V0Hn<_-tP&l9=eA)jG_#u$D>)TK)fGM;&j4U;yZ8FD8bC*BCtEz_JsB~io zr>aC?k6#vD_~9(S;Q}ybi&M93#$&Utm(sAv|F@ncSw&!Vi%lmCz}v zDf%tUZchE%A1eDqY)xc!ROWiqnk&n}rxou}CPdN+afB+;RN^!k%T7 z-pnI#ZUyNaHMwX^Y6=)QRR^7R%B&YX#s^l$Ix$b94=8T!&MV1jCqTkC;Bz?*0;((zByh%M4K1nrfFyzs7Vk^c%%?7e#`Wf3KDz>7*@SJHSh>HeQ{D zEhXPOmG4DU<3;Euix%S+y#fs7sU5@{TP~4+gHPcdB{x()9+8o=6g%AFup|b1PgdsW zhhpgMOx(Hb2y~)(;~_QU2zthRJHfq#qWFo`g&wwJakPNpeEyiga^z|$kVMaedD2yW z?S0Hd>>`!~pH9a^*M*$u6BS7!jA}@G);CDtS&i%K)3-aoV!x$2rqtD z`xC(8Njq;;Vf+b~ODY@TXPx7r~e2M^om9}Qe&!NfV8~}qkDoEB-eUHDDazC zp`~HECA*H{+mB&Yhi_1I*ts&-LuAA~$}X|KT+>Ah|F7Y$BaHZPBkNIHBU*nG^HQ&2 zs#2XEw)BBZW$$x0Mng`-m=Npr&>$;bou8RhB*y$f6BOMls`n!C*XEz+k`XHxWIZ7B z?tA~3g#@-rR0m08WdP#xZlTJ9q1p084nrJIPFCoP{7!vv+Rs)V=z_}$IzJHXL?VqK zrWPJI&A-aSG6n@_kqhm=0-9(H78jj5Zdby!3J?R}UG{tY6w)vVK_=)~0D1uK-@i94 z`_|r`YD*a+0o{CMr%vAXVefm0#)rts?)f@%S48BORs?hktWNVZt=yC|Wn@WOej z=56eFjw5EKiu>m%)i2vp^q_&cAC%+|K{xWMMqbF55{*5#3gA~weo96~D7Q`D+Km1Y z+prK^HLEu_{96#gc*|-kB2=^$8GaDhflm?Wh$W|y9=;~A08J5YWB*^^T?x8oMY#0H zW-2rSgL{V*S&!|4jKVU}1LS|{IKQ-%%3GKpN}xK+ptTnh;ihUFg?=1`8VwzI3mg@? zv=cF&r^t9_1=@$C4gN z5w)TVHH9t37dym$lgP76a(-==c?+u6QP%f`Z>+@cnXSV{eQdp@`Nf@xW}@n+(}s2f zX3Uky&HylMF#F>qqb_7qC1i{fp-6pVTZL3Wnxu4N?N_Xf;~0(yI|l7U&ZuWwjo8)a zAKMhXx?xu2?A9rV8vfTo?U(-VxD&?35pO|e4?)orOXwc~ErE$7;rYt@uZR_@=}jS)vIep-YZ9q3^y9h@}>c z#c@zF|2akG{#p}DNA5vtpksmeZ6XI`dgh9qOGIxT-)k$=Pucf`Syhx+Q z4ir+6KcWrRjU1mw@Kq0zxR1P*-FQ&@$GRAY7O%|HabN-e`ovctI?jsQ(={klVf7@6 z|K7H6POlNIDjTQ((7z}-Q#ueooDGh;5LI?7Q`58GSy+a)hq$}RY*G*r-<#QGf5hG+ z&IBva$TeR^Wj8!^L?#woC)|X4NpBbguUJtX&41Z=)~zgTSqYx1`8l0FLOlUSYBR&e z6wq(@36(h0e}Q~KG%rD(O$MM3_SgOsu78K>pEF)IEVZ>qgY4F+KY7~gGd7}EA{7|} z!jxJLLU`Ysf01`N4gz5$5;|ZOE!90yg=}Sr7=EzF-RSrL(jP@|yc627ly%j_cLwYb z+G)A7+6KU>>&lgz4{HBNc4#`dzMj=`{ch&!cZ@dtU@4l5!;XAc^7LJ}CFs@z;UYa_ z@8xZjtwdV!Kl2MeU*k6_KaqP}zUD9U+aFQNEmr0jd9tP4D?HnH&8^&|1GQ;af*2|e zV%sqOFd1#3*o^`0FP#8@IDn?-vJs@^^c-Zbx5y!}=6y7!$eKDK6heHk%d1I^uWa?~ zBqyJ2_^O0RHa{b3zV-eOtrtxat0=W?1v192wDc!WQ^gcN;;xb85fH(dyOR?+LX;RD z=4QlU6L1-q{ePRQPs{|5f*ArhYEepMX;cA6{0g2vGdVqJlaCuoJAg}7l3{8(N^EIp zatXal3~|mN(DFh2%PX4rS;)KLI)%RX)F*-}3OzHLV^yta;|_x<%|-}{+T0yPP&@8h?+>{8 zL+4`n_L^m_5`DFuGm@<{c1((RX-E6tX5xm_HGZY+INpZ=v7YB|=pm9W^1q700wdeLQB|6FkKd?^pllN~DjA-z&?#znQ5)LRI z`fcjnP6YPh)6|COnPYLtR&;?d9HF{hnW(Fyl(aDyFi`1atLr>2iuuXBGvAhAPfKlp#`_poA8d%xuH449g1yF|xCoJUfq z`r;{oQ51UrpN?s!BSef$DMkB!&^u4U{moUmqkYb%$hgBxa;*N zc6#PO4F_oOD{9GKpv;{53l_B|fBDItelu39-Bmji;AVYCq02gdWnoc<#*?H>D5_n) zI|cB)Q^f9)VGurqA@d<06gHS8IU4CPC%PjwKZ3`wkOzV7%49ULS&gwY{=$uv_xR_#oYMrx+B-Rnb@%E*6hWwooq zUz)ms(`#Ec&%OeZe%;fr)UNXpp=5!t=jJ>c%TWzh(e;YIUN#y$1d5R8AR&oVw}Ao- zySO2B2Fiu)h^|UoFRic$HnvZ5tQUNmWnY*KBr`$BNJoh9Lj)=vK=d^?d%yz&_OoZ+ z-{BA3(_s)e9R>?w`Sd*RR@LJAIYGNe-Nt^wMJJKZHQAYMl_c;=McBxRq}q*R%Rm(B zQlhnpMv9OjB&~%yN5x8B$WGftNX_gr$O3!H2^E73Vx9i4ov7WYsN(0U$8m*D6awnnbiGu_r`-6BcJ!0wK76&1lG; zLzVKty4E41*z5>1oiKm88OFp($OQl4YY=V@NkWgqU68$$)IR%WNi5cR zF5)moQ4Y8~(yu{>V2g-4f}cL;{lq0_{IEx}+x3Cq8?jH@By-*0lT6DJopg4I?HF^a zR!qOxuBfl3x$0|30+QGhUtxLFs&sB{poc4aU;3g7#_=lR6a%UebRp;1=sNBe=mCp& zD`CWM!oR?4Apc##qo-zrJA(>=0h9*-4yK=zg!GMvNLgL<4UMK9?>}hJxokJtK!N1< zRLpUTK(hjTqZg>9N&#&B3N#`eyZhb>SMB9fWm^qF!F4zmj_RJ}Lax+XwyN>-4hRsC znlTM{%@#KTj)D>T#&5I~$94ylfWVSvtDT_N#o0oUBwu;|gugIBz63#zRYPPv%LK2E z8#(|)BK(*mE21xq?oP?gf~42L11-*auq8q8Zl1xsWV6pSwYCrYISOccOmK#?ocXSu!D2lkE*8o5zCpjzP0;u zoJk{6?m8yfj<)fEG+s8cuW99ioNn`4*dnECg2S;;)Nr637aI9@0cQeN>$gKl}4 z|vM&HGg2p}%ro#g4;La^O#mt*BVB8v6 zqOu2f*@1@-Db3`^I$#ui(Gb#WpHkXYI-m5_QwjnfGXEX!bk>LU#R!JtHCUK!9iWLe z@`WAxJV!cX7gz6HqUdLIf?pT=$Ucx`Uh;El2+s~DqEHa4_PA~9+%{GH8&wv^Sp1R? z0EcGg6lmeU_09GimEtwDyXODM;vY0OuivRv=l9!U7~#YGBzBVzRPI^QHl+9Xvq!p8 zQ{^ge1p2f)ZS+%awr&IqwXP4OezA0#i;h-jr<_QiuAT@nG`hU3ZZ+C(Xb6mKfGp?; zFM>pfb`t(wBepVUNNK>yFCX#N!Rid6InQ@zc$~My$1a8K`Vv|(C4HJIW4{o@02h-E z6q+^&3-u6~^#AZp$8?;UEre_I5~vvHtf_l?nsX-tO(yqsMi*zzLM7r)PS-NNB4$i# zusR2zAxjl#Y@zRRgnDWq(kN%6L!`#8!wH#ry^4?y19Mjq0 zRFKZFNM^8dae=cZD1h zpFOut63u4%1L@H;2}|(7 zRo>iYYDG$v+zJsT^V4xKyGA)o;0$jujMWTiSE&E=p!TPHBSjILBu=Ub?lBiA)dvuZ z*beTd`S6`Rt3(tk&&lxv1V7i79x_7vYu|DcqmUBpeMzbcrkwcW=yJEtNy`d~Bv;~( zdjRBDFzf5|3Wl0*>TIjk|8wzH(}wZ)xN~y5_y}jS055b>I-VJTk<#(OeD|HXmT`a_ z7GTdxfiluO2yVF>igMbopEPMQ2#;{81Kcz-EnPZ$nkVPG-H*}wFLpZd(RMysMsT_^f5-t@LQvpy zd=4W9sS5O{GcDadbA+gH2KPOwGBp;gqJ<^0%ogHpS=1QEddS%*iHZn3sr4EI(Ew5? zSD3uy90KvjZ#yOT?sM_D2vD#czKnqjfv$1OchPBpk)i$QjxqVQiYQcGkb9r7nH+%S z)A|E1Zw)7xDF5xraD;UCbZD>Z95Won)Yy1gSTBhod%d1xLwrMrlLv%!BT8Z#`zEHR zXi(qLQ6%@XWRp{n6kX34fdqAV;Ley&Kqh;Yk;`<;Tjkx;v|cm~BOHRK!n|{2OW9kq zso?;u&raNVX&ruiMlqX_UQ#N+(@ovrvM9c3Au~IGPV+zDokLeSNyGQA-=`4e;m#0aZ3EWPba$9p=IqhQ z`KGZ-0j$O1UwhIpyj#wwxzNo=6e11UV(>WX@CizB)?(Un6!E9@Ex-p4Y|1-iQYxfY z*j{!+)ubG{{T7sxx2#Z8poo8Wr@2p(<`9ta=;-Im7AN6}xMk8!OyAJ*?NIn``TUbT zDCw9Xq0*Zv8F1ssUIO&4c)ru%P-c{}?xgagQ!`UDi=aNV85G!A`e4{KV8&9D0D^p) z<@w&I%5@`HjGkC!`dZj7+TVIVumDhBAZW!wRR)KQ7$%0Cp$&PB^r+|m+NdBnW$@vs zryamCEu!QQPG^tEA=HupuLX1S3on-bSN3ki5G1${I5|2yJ#K>8UY*Uy5oz`e1=ip; zCvUNWUl<+E1smqZf;dr%XNWD^b!=YLv6x1dkqj6&T;P1i_ULE;gKb|C35aE*0#xse z=P)v%{(!L&dWs6r?@+d2`$Bx{V2?du<#g)LykB$8VMFXhVA5n>;6Kw^fD}>n(zkQN zB8SXB;MF4{4ZzGej?D2X=V$k^D&O0+&AM?yP|QyTF)svkQY(_*?HYH#2;jqFRaFs z4^oa%R?$P@z+l5#KJ4LzQF6r|BBn~2GjWJhsHqqhAGUv9 zDP9dL1YLFD{XNM!Ma}esE%9^S=77nQ^ zrxYN8jqOYb=V+39S!8e?XxXVcMr&m^k1x>#dla&rAF@d2sexEddq%fitDuwyw_IKB8Q~P(WPHoxZhz;xH#Jy3M^+-8l)8BB5BN_W#QE@(&uCH(TM(mPgsY@8A^w0g#Q)INeRA zahPz30kCt=tUCNKWmSY?QvE}Lt)uo-KK>IGWCazs3?2*0GBN62ssYg$!)S&4#2pb&T(-bQ#*X?^kM#co DxBJtw diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.bson b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.bson deleted file mode 100644 index 3b37c10c0a11739f6bfb75ffcdd8a36f7d07445c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 165 zcmXqJU|?V^NG!=<-~zH3obr?6UGj5s6Y~Ue^YehBmz)?FgfcQqiW$Cw9 $/ -; 2.$@ -]#hOC w )[#B^#& E %;. -h ' -NwI&M:a37 e^ - +='    -O) g:6' qR -M @ R/BL -^5hrVlbZX2*f[-&aj4LD3 aR  > !b&Q&*8| P[ 3 WJ H3Usbr, y,5 D_]x:7A-G& ?U5b8 --+%t5K -K25f" 0'& ?T?V! - hbS  wD% -?I9  -`pg ?K73~x IEY*z?di  Q*8E5.E(!/>/>()%CU[/'Z -#~ 41%#KI*os4w-(!/  )FZF!s/9:^0L:2 -&* -$ 5V "Y bl$U5 -<-CQx$ I_w V!9p;+*db-%#'B %%V)   - * \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.msgpk b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.msgpk deleted file mode 100644 index fb05c0c859a1b54ca082435f29c1acd6d37e2aa3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25544 zcmZ{tdyHJyme%QRKibds+lk{i&eKV*E9v*+4qW%Cs&2d6-FBzjkDFZi9!^!At~#z$ zbt+b40XYak%x4!kQwe~f~vR=I$#cnQWwcYtB_M5HF?6swyf$-VQOIpUJ`qmC9m4_YMERmXtlg@R4=gnWh)t_1vcKHKTeb`&pkk z>;$nkv*^2PY!9z=+#DRwOuD5cYQ#aa)^Q%4<5SH_=(pXqMiPes>$?9_bI@t7dmp-2 zy*97I*7cbaBa_2ZZcul9yTv!}qFw-0V1W{pLV6Onmo> z?}u#Q?6=#WnLQKkToOl}TyVr(7#$v-;dx$Wo_#Lzf)%gUXu6kIM%^_(4*aGYM@hZZ zaULHuuZ#|lOt_bmIB_rYX*O!js{{qN56ts?YTCV7@UcgJRpwjfpiYL8Fu1@XY>suU-#)H|w=|`8^n1SgiQ9wzIOp zzdL_X0~ zM_5H_`0QKD%?HgEZwlxT${p2r~? zd?KnvVN~X5Kj}C7vI%EkPkQ#-N%QK+%=oA~$7k3@)AxbdGcr8vUfJReu9zQSfn%-{2V3A*_f*%Cx|{bmg76m(_S03IxXzO?t3tB~ zHg?^=8dM{9Eh%FyLY_~~o58uT-7F+=ne`8v=h(d|_nN;6J|uY7ZI3gN&4Z3_m{Zzy&Q!lHy3%$mV2{~@0Z;9Tn}e*_xsF>Q}*k{wp~)I;=3zxgog%pnVi?Pq!{`f z;%(n+;41==CH%JTIyIcV=viaZ-{rhGC={?Vd}YWSa6&wgqe<$3(l@uw@hcww=2Znk z0(<@?!0rtEC;9crm=LVp!VL0Bn{5Iifb=j0`%AlVu{Q8lw#Igh?FurSmRAUaW*QK( zU-p?-fYT8nQ=Ol>wWtL)wRw9BJ$OkRd_+7LHZKanG9$xoHcsk(z%P7UHtqrnZ@bGJ z!8>loO;1pCHv(<3{SV9=V8>Kubef}=!C@&oPHI802PXQ4A*Wu7Ih*C;Ro}Of==^{= zkYf>1F>1AvCN6#2oUo9VIzF3yJr2r%xKr9|&u=V)dn`pw*Qu8QV-7bA8bqjO@BC)J z9W;YEY;nT6G$NlDr6NW)jbIW*az2zt14j;X4_BsW&)C<^!X~Je9)>-m4z_fcMD1++7N)&;> zkHN^SS54ev1w2UoZT3N*85|u2bC&0B5@0Tv7dY^V)amEyWe~UIyEh^%-!g!??SEko z=PHRp6)>NTHhJUw=P-fG%g*Xm&L;s|QtPV=+3O|1$=!r%xMeQ^=ks?4%sz*R-@fQB zIi1-r`puqEd?lOfWWU?~Bke6A;s(1El)RF);2Ti^7V=a~Az0W5;B9ftHX?j-O8b-4 zf;v02j?c&*ozgJyZr_yi!G248DY($de%U$o*u3J*uK>?p(XaY`Ljet_OXD7=2__O@ zb3?P{q;-KG2lcyHRoN|uUb8|#aX#;3sc`{H<++!Gq=dt@BEe|)hmeZ_a|ZMp;WRL_ zkj-#cz1oh2f|r&F{hkTQ~*C%BZP@~lz>w12*-xqEaWhypn*i24x8@cw7!09CWV(Ndb*^GUF>t~^`*2HWNC{5o12n`j8`%Gx zxi~U2G6RBE{Vsn6tZU$Wo}&}$?fAJ3#?I$Q%-gt%h@*QOLoc)Y3Fi-8vp-wKCkRea zNQi*W<x5B7@%bEkS1+}R#Jp$zUgzKm^OCi+6)3w6L->8m zvcw&8%bwZ(Z9!iUH3B$s7?g;pgh&uIB`;AE^H@)?J))HL(iR66RXO0`HGTi6jsZ$m zFB1~s1a%Dl3E<^|cd?gx%kv{e{EFvtJYb{O!i>M$Z_ehUq>S^pIcW9e%tR)C-Ce7; zYwVWwv_5kvR{_r5MOd0;+7CMezXE4~WoJXLSl#4oaO!z{D^zJ^ zoV(vS{hRy(BXh3?4Q#|+PFg^>bK#WDDM~RX?%wc{0v`_EVWA3CZPrs|rz6;Qg#+sY z=6evkP>MqBGX_T>Yr>!&y=G2x3YFL=n)z|k)UVgU1HxMV1KQ_=V?k@;DD)u_3nEww&L(pJ?cr zTqW?=UDih;k?`UI#nq&VdGXE4o(j#b`?>jBdA769JU4Q2ECa9fGG#J5JfzuX4PKLd z__WjaPjx7QAD#_du)~kfV9F(oxQ>^THr1h%`LBePVTf%uD;r4*7#c7cd-`JGUAA{k zNo~DNO8X3WbhA5!r?a^zB$KGyB&y0IGLZ2)-@1(VVhXO1k7Y&U)&)E=G_T! zQ-R?yAMo6=AM-WX1pY)W>J$<;U-1fodpRVefipIha@RggJW{^g(5vRa)o2T1SqUPN z2wC1q_LBWxsBe(6ig5u$af4s=n-}w(TpYL`c%<#Aafl$cq6YtOgDu+r%NQ;v6_e!G z@fFVh{Pw@YP?Z)2RTxAb_I8wjI}C*^6xyz{$@a?KJAD=N62BNDF0ZD(gG1OH=1I`V zZuAph3j|PX%Y}smWuAtaWk0)aUf40wrsW^90SG2c1^Y^4XT9!Sbf;j)`2)o1Vd8ikxK)}MeKEyopUYWcMu1eUqW_5@tRU-Re!?*$V*>5}jyYZkH zj$W@OjRve}J&3D3{p;hxK{7c4)O?~yF9Sacet=goa1_)px{FZ-wnsi$`i@R3NSrG z`{pNXZ|L7;kC13q{kFKs9VM9pQ}Uq(JTq!4nA+g@0v|7t_mlLjuttIno4);9=42Lz zQgxNi04@ERQwMY576XB~U64Rzx61@BXPdFfu$g>aKX}NgQB*uj<)0(a3`9n*wiV z@QgXIKnMjL9Do&D*#6%fCK?Yx3AT{EO&A=Rp0J7#z7I*re|5_2Wqs@GgzMRF-!O-! z$0wBQH6pTavgmL6&9PCQJOZY{Z3ycTasyH5ATPjPB&aPFc36^8{>}?zI?aW(>o!vy z#rXQn@sSyh@v_+0&N;EKbHE47Lrj4eILjF7;(!^<0#9BH>zjRWz#N(wA4j-?0c$IX zWBumj*f86FISEU!_A->bn5?1iMERVQd@Y&xr-m28Z`?C)(n74S!s5`%K&2iV-b(`J=P5td= zKy-%zr??{dA-kbutqL_~qk{b~iFyDy=)E=rzBliDK%69*fjiy1W)99{mmEe8#vm)- zB*4~-(3fjyWeH?#w;7rknHuRP4k(A1bZ`PyN8q?zyqpjr2{WC(1(GFY<}#+by8^Dj zfY~onTsB6^&mJ<68N>!Gn|SM$Fp^;VXENam5*N}f9xX08`^6da{6eV1R#1z`7vLap zgK}M_B=PFNW$cy@Se(iYy@9uj;EQ;9P}mZAXl`*C1yq^u4IL=?mMc+rjX)8hjVLAq zNy-mI|L`Y%*5W~)a+Oo&cp94;_!Z(6$t5def3`d1*sz2)YtMP>>yS5IsMGgD3qxen z^T@50yMCzH>*47$GXMsHr&4U3v(q@S>-Gzd(@Vj?8utoXgQ#y90YfH}K zpPIch6T?mA`EvF?3<*W8vY8Nad6bc6sDuQun$!>C-H1?4?hjcSGM$mEeG8VNWD#x1 zSC-qQY|bOtK#nMZ#fMP@GC7a?;2%h7xD+-G`0a?vq_)$e7~6hK^ur{;9M&okFW?M9 z;SbMDBQ?$^b+1ZFjy!+o0J(OFsL^G+A0|-?0a^U>q+CauB*LvV>1)wuv>Et!l{4f& z-v0NTLMd>sgH8*j6vr6 z+BxtN>4kgQBMvJpy4R7-AT-OMf)oasOz6&gTOq!jaS8>ZvB=Pn(=6jO7j;r7^Rm1? zzG!E~P7LNNm26g}+8*mAUx`J@HOV;lApTgzq7 zSJHQ{-%L8qW>gGPpqD_tN~(n~p=zLrZRtbODhMxO`ggBocl~S+r4KGSYfaTo?#HPQe&*U$?#l|OFAUO_P3mV=Ai6p|5-Q+{l z;NjEXx(D!;o4HqszJaQLcltTZl+~Q*J{H|IyD zrzhR_J>E5&geWEDUpSugKeSakq7u8Y zcL6eB87(7plXxBfBE*(GU_D25>DXAN2AzvOYD0s`b#zd$p5HQiN5`gNm5Aa9z(XTb zla?1Xi$ot=aXkx*i&134X|{=N)G;Rlwb}m1 zNTr03qB~cIrK*;Nyu`|-&~!@>=wE$*l`SRJR4s)aS%TDC=()lv1Y(Awmv|W`a^ZkE zGao4+0;dY(oBZM;1Y1Hg+cdPxJU7;*wkk3s&gxi{&dVleg-0;cg@r5gwnA>JtT&O8 zpRwqCKiM|+NsFcNY*r-m^rwQqQ8HTZb1qyW}^1Eo#2xyt^4J1TvtZ9&O8A^{x;Y2J`hf+?N6x8EGP>=odiA+)9>YbAc47@3}$$xIJ(&Xmto z;8UuR3kJ-PByudmFGeZF&3;MkdSsk)VS^|zv2@&|^i#ZtW(oe0W<*z%y{9cB7A%my ztsXP`1wyyYsSxYoiD^yvdqu*0|1q;4WRXPK=3U2i~v8%f-Y5)EQMiTZ0F2wu^m+O&2>s_K|xC5VyFMdtTpdW z&P-S;M#&K*!0YYyE5wCG%qXI23oW)vo@HUD<>F+lWJzHInghe(2PD6u0i)naNQ{@? zHobKpxfuBf>CB^TK|MVX5eln|s$8I*68o&VyyS!;rz#JT{ndo1mY5K0Qsr+Y+&|FKj9PxiZq_omG`(T}oLg!+~a~>NOxg*mv z(;#bS*H6sz^d7;mGpl&K-q=MJ>Dq;o{k=}?#4ue1Tj zs3iG?ZF+;0l^w?cAaY1tlsCMPO(mB-3*VoF5MZ%{X%cpFFslXGs=PCI9Oq11w!q<< zzscHAZ00(Bo_PcRp3JCxduw6dc_N$iILl7mKIk_uqcY9lSk&x$wOqv@TRZ5ba(wAs zbB^twCUXis$`1-&5tj1=TJh(8+h9R9iBIGzKD*5>sxpPksoMa;W;r>Wjld<6fDe3g z6yK83Qw^79hd{jdPe5Ne!kQN)Z1TfXaz8B0NoeIz;yjKop&XM#@ij?^cAb5=$GrR$ z?yrkdau`pS1&62XxA*hyyx|%GJ%V8V{1U9gCsFk%H&fkK{V#9c1fy*TM9(0R6gFvD zPjT_9yln~Qjifa6U(KO8927OYTT?=F_#;~#31E6b9U3a%93wE0C6Z;3+(fM{OmqO; z0pHZ0mZkxU4T4gL$U&qfYbX|F75W$O9(1U_|E42GnZC}~q)82iLbilZ`y|$-h1W-& z-Cy7YXrcfuen`QUE*>9m@b4-%&3@|pSIdxr!lN~CC)B5#YW>ZJ}O3YU=$O}YC&lFEpXL#mW!r>fVh262E5 zMKf`{QcYS@%$am`8krzE8O+7QgAuamH2Y1A(pD`ACxDYQRFU{^I{hz`)E$3 zRUMGs!S`v^@Gl?Hz@H*x68%RI#lPPI|2k{)v~4PO!udp z+S}$_4sXj4UN=EG64biRE7&PgVS(E}6OmEQO;Z3&l_S0p#q0>e>=PC(8Pyq_My)Bd zsSjGk7C}bY7m!_YKky3)r1WEAb5GEPz7S^`MAFXQ6O=lUGN?O5?!qk#{`_rHR>R@f z2UW60{<(n-rEj9s09^Ur)nHvZgAVFcFI{L>yb|kSn<<$AaPXgZoHY4RL7-bF)k9>J z*f4KNkVr!r$iqW(G#4o;9B0=mYMJ>ZotKYRcElVTIPGFf5$#X?i0Aw|Hb>H%lY!IbE)80`Gy z_bGQ)ybZ)t${owT7UgIBR%RJJ4cf;^9;E5iFIL<1;5A8zXTKdozkrod=ci}SM%8ZB zXa&iXoPs_z)MAMv>RL)wLJBqr9w0hjxElwfoG4%ciX@VYCYNK2tNn^#I z9Kb(&g)JpJV1|lX`U3U6)jZM~=dqBjrY_=5G%ZmG!SSNE!8cHfJ;3h>#v!~(Nn( zWz*`L=_vzGM6;*QL+s@RHls%43PFh)m6eGI>z%%TB;&Rz6UruUZxxNN4(O*%y)=4| z=sAQq`{fn$!pPLbm}So5*#Hh;-0Df8vtNv<*(dFwQ|)Szz{y*8yBoVvR#s06M=IJP z+(0_i+X`>&?79g_koAiJ1H+^$oHEaSsO;9F*JZJml&rv+n-*0Sz-W_hg(@YWPT&7i zrIUITxQ55OP@FjJF36&Z(3MEKC~LJrr9sl7))@Z1s*dxLrf@=-!~BeAP~Kds=1FSDRHdqMYme$JUVP!K_}g7 z!pB}iYuyz%quNjMK9;s7 z6w;P5=h2`bEtN=ah{GZ>x743wZQ>{dMKxI{Kp0|uP2$kQ6KOd?cAU2ElZ+|E@O$*l zk8Qc6!1q}8z~@i$geoHp1R54mfd4K6*bU$n>HLs7*#XOSy6BWUP+1t{ef(M-&Y8co zy`46@sxwYUox6VrC6Q#At+mY-IP6(b*Ld!uMyQ$^g{gtx;fXLmJV45h+NRc^l2#P> zs7H|^kcNy@)ogW}@K?{9lb3xAUf8*y*m;X&k(7!@kd)^r>`7hvz*A=xluq*NASO+x zm1+j4Q75w3uI#uhtF4!-QJSTldoq=PS7R6-HJ zXM$&+cQ7Olm~#bF;NtIeGXKWBBOFLuk#>4Ful15%U||h`gg1Xu#X}c5L0X0`jG7)m z18F5$_36!V26vg$qZ(^b^El_82RM6fll%0tIVhooPLTO9YD*J7ijSlQx0+CThZyZP z8Ca7{jan53pg<=|!#g6L+R`i9p@>aSTD|ZFjY#VBqMnb~0vYVhe&mJ*fubHuT4}av z_7Si6&qLJYvN?TEJ!tqcvZDkSHd7DO(?~^x1ROd2Z1&N_p*q3O;v|GV#YuFAT?ZAC z3BPn~Cp7^Z%i3dJr`%xsAl9`M0L*4l`sy`gGlC+e41#q2-dXK{SKbKR*v=j0-cbo5 z{X&&Jth%3C8?b|) zRsq5EZt~0$2wNn+vi)$G?VqbZga-AcHUO(q)r6TQR!^va*mfs-m6x7b@FeIB@*U(; zO}BKc68qpOOj8kDNT_*21p7s<3)sim2L*VLs<;IGEp~B>Es2Pa)FYh>on8OIya`nq z?mfl^eYz|-O;$|k#D{t*EWvG>mz|j)?<0z1{n?HE$h*|8srC^GV4g}*dY@M(?C^v= zNnA&@F%ln=gP0^&E2W!|_F&stoTZtc&FH{&PtYeVyNx)I_SWrV(riT7ZCLjl6Hin*k8fXbPoOVcW~dnO5%z)+w{8)hu0TjAPbjId5oYES=oCExWYK{F zgGdYqaMql9CgFk(0d%CZYy-_(W78PNs$aI9Vbbrg)n|sTP_M1u&*)s!tuBz50>UUQukNBViK9@#T7cAh+&P+U=r*pV# zsUrKZ^UAthDmF|t+1g|1H(C+C z*bvVg7E#Pxp*YQ6Ry1$+x$OdCazg`<$_(a%vZnRcX!Q}|elHm@Z-8}FI@B0{1J~;| zk2sIE|D!pXhiRe{sq2o`A4(58S$3an3{0@~*~_-G71iL2OY-BIs(FmZQReLVrSq6Z zGY5tcx+CuDe9o$Nhe;|VzDd>^Vj5kob^@ahC#pTa8kiPx5G2;+RNChGDg z?9kgZL$fHUbWpEg2{0{dKFZrP;6xf$AUQ;M1e;)~b@-~l>f>9milVO>(-IlQ?6)uL zWRSPj?GH34e9(IXr9rSusA?(U+hxv9O{hKc1_g|U5-rFmbgI{Kq%9uTSh zKj*;8349NkmTtUW{U5?BO`KSvGlR`;Xb`A^+FUPHp@syC0ay@dO+rKv-u~2Fu+P4Q zc)*4~>rT%O$5M#=J&0i(+?8AN6{Jk}5u9?Gvm}RVx_9ZvA)%FV?2)R`9l&Wu>YBiE zLIK$TRRZ8aeff}3J`RlImlTm$M;mP9Fn1{+tHDE%xM2IP?yVsvCKRZ%s9+k9C@}cO zf6y+b_AN?@_lXN=Z{&1HZ?xk>OY-<3j9Ex~UYpqBr#c&DXP=S@65ok;w5}l?>rurv zhs3unqV-q~a)XA9YAVZvXjavEHZY)fH3`cYMsP@c0r5*rzFMg>jYPeU!#ZV#rYM_1 zo~@l_)r0ZX391UiBN>{>(JOH&qy(Qq7>&K1-~N3wG!FtcB8;)ixv~$>^Yv_7RMxhk z`!gTOm~zU>f--WvEMxXTKZ18cXEVGKq0cti zy-xaqHL?X;yd2I?rV3Xc&P-BOWbKtm0=4W0ZlM9!o;)HEoaxf4dn+VYWg+{)cr`v# zKDGVDmwsmUUrt1Ds3>TlalyQdSxC-fQ<1ExCukg&Gz4%`g$o?O-)2-Ynwrc^G4t8A z8v9(whRg#JT}iz`DlPqpoh+QNsXSF4`af^-qNK-Po-uo9f63{}u|NXMRXLv$H|vC%~~mCry9gp+>_ z;gM1~s;yrU5&y2!|2tO6AI*%6b<5z89Szlf%;X0l4(p*&twF?~t?lgius{}5hZB$r zE7yGjvd-s!5qyx9vlB>YY*S~~pNeU!rwqBo7S5%iU8f}tsf1%bCVh5zI77+deHD=@ z-`d7Fiyz0SryyVhHJXa6r)-w|VBpvJQi&9?xcL))NKt-fhJ(|vNSdE5K%?Mhgu4bu zNe?*!h8B;)p8j`8RGHuG9HmZ@0SHS_J{vSIzQln%wqF{P`xktk57?-Nh(MEPe2ti*&6+9T`h(IoL!d?%D{gBgHs}#kJ^FCmV9+O zElcryuFgwF=&x~I^-sbOvcrC7d;~9#>AhxIgM=ky@wP^DzPzePsYwk2pt`5Xu%`g% zUTK_+?&-t%a<_Sny`Oe3I}6v=a@SE@@r{MLWJeKjqIKI%M-Ru;9ymtN-BZm7Jv}`5 zRomS(A?2e%d{RE(I(V^15(FbXMaMH#*J?dfYuUAr7)_##!u&Lg*M3VG=naB=asZ6h zKG03)a!&x-WYs4*Lv%dd>ou#wHJR@L*JIzt8klODeX3C?DWe?{OFF-OTKO3?{@IAU zos_-?x}Zg*>9X_roK(?N6}~N&V%ys|4Dv;?fG868n85pqOQ zh&GBGJQ?%rHkvN1IrDO9>w4WL^rxC}38J6Auc72v#rYjlle_-{hC{jt5a$tqv!8YP z{*`$y8#O7QEi)@g=g*n=e>#fZxk;}o5~2nb`twA4Kv#-1mw zN(bD?kUyf71S(3&0$5eU1JLQ=)CuJ8Z2u`5ShrgZb%)jkk&*ky6#2{ceg?8f#5CL4 z?R-teaS9z`+DPILkyqy@x3NVp(bGYMWDG17U}f;~_&= z75dgiy8HeZHMxp=?+EGXM9};69Ch&c0B%NGe+iS+fj=u)9FO4tQ;dO2o4AWa%0p* zG3F=D8G9_gjBAuq*_?`MXfk1`=%GyR^aa|IR`$zx&8w+H;lztpGYa*^b{a;;&9E!b zPyG22wQ&Lf`Ic;q@ic5!g#quN=W}0Mndr|{fLc2 zucR%IP(`49Qx#ai4Z;G0`>u2BZ>mVfelwL!71f3Aw<@HgRX$wW{&gjAwCn~FY;9u# z{m!&r+ggccvoxAg$EQV-=ZI{tKp911@sA%6Ix zEm=~F$742UMtW7Oqp53UYQ@$7U>=I|56; zxoP&~eR|K-it!VIdWmig_$aL$v~2<)mKiG*LN(&^L%X=@0%D@dGd0bF>18;VsX0>W z%YUT;1?8U5$}UiXclpNDXR=6_)@2Bc2cfx|k-(A*s7CiM@D5meQQ!X@4c_Wp`{m%9 zw{vIN?>?YiD1Sr6KxkL$Cc9D>&rHwAta$%A<8mr2@AR@eUrRgdN>Ruz2trQjRU76f zKyxoqOAf+_4HE3avr0nt%U|X2q)^a9sNB1zrkez!2<zhv@18v;_bUw2-p=aghzU z4<}qA&jLb6%+U#O0Y+PO^Gkob;IkL&^9Yl8$*bHTWxG|t z$5bt3)}|FGp{1Zhl%)pTAjy#0`~%xNx*+bP`4E(}iq+8UPvE`1shDncy3=>n@>+2x ze5=!cmIb9vO-~eJa91;MPzu5q${$d)LR{B;hME+ZU8ZG1$Z4aI^Z0F&lX{CXQ^Wlr zqN4IiXCT7G())sV5>UjSqUU^$un321Mc5!c*mxm6jh7;?Fyojsda2#|xk zQ4F~6AmXo?!?G8at#bVkMn$EZ<~t@9VtZ*D@89XqKuFLlH3&&3MuR34^=*oOO5x-l zA)B*_T-@vQ{h@;NT_1l1&a8&K{m;!93<4Jai=8n$U5NP1PQ>h{0@*Wfk}^`JY}(#- z(o3|+mm#AxPKG{ZK1_+8Hx_mzP43;>R~$`qbM+2-f%PS4P(GS6;(2Bv&}~_gy6llE zNk#z?kv*a;(BA!-bab)7LuflvaR}S%-iJ`hLogK52S~5|JHWBju7aiX3HuU*xxV8& zq4|yC@O9mPVN0#twgfunNUn!+Dl;qpC50zC@s=-|gSKel}B41F6&)aU|)Fw{B(_XjWPJ=MpiU9!WSL+3@f)F}njt2NGAbh$!AtXV!H>ApL%QcU}etyqsP z%GyvZP;frm)}%?uSJ0qh`vq8j_RJ8+i^2}x;ICYO01ACFZ^M-(I#1C6ICa6DlNhPX z1oHQ!1D8@>MMkB5X#~qAbdu%iBrJc`x?xl;JJ$B#UD>wut2O3~i5K~MFYw}Jrq$HX zNDTzRR<%6juJb)(QZ)xXlc9d3sauS$OTb23djF&9=4A)`&!@k4+Okm$ANzzlo)*XMXM&d<~e7sn0GlSOb*XiBCyEcppfZ z;vWB9d{@&#%)KcjbdKW#xO)?;jVxAjkzoHYYE3G~;7OJMaQ6lc@k9!sda(1uCHMlj zNYpvU%ymMZ(#a<=feN!1BFN2tu@?u>g*tLB>Pe?ZIZQkDMT&uiNkSL*8ZxgD&hwX3W75pJ;RKdvr; zsbOp;ZsXm$TO;PeLawp+-iq><-nQ-sm#I`M>e9#}Py{}+{eR3+U9n|z);yv;r2x=wXu_Q0Is;cj zu`S)+#QmYDdGaB3ejJ{cN~N}_0=HtR{Zgfu-X!;OK;Itjcgq}}8l4)23zMdo+M=hI zggO#fcsXUb9F*Q?2_R#kDeaR}4g7)1(U8zZufZX$Mf7{^^!r<1czl@h+_bLv%dw~X z%nSLLj$H+_ovJd{vc?q?z>jUF=AwpgTU(9)@{!!YHuXG98z{ zQ9D7B5cCa%=9SC*a+ARewQ9t+4KGbFVK!YWuY~7_g;x1c&}VQ6j_XNGNK$ zx0qW~YXpP|1cy7L^^+*bd_rvjAIpCCB561tN$Who7sI>k9U1mP6wI4j1iaY? zukj%(!(5XDM7;(UsZt8FR4=M8-geHXw-U-6azLM2Pc`&$|E4&TZj;E!fT-O_Owh|y ziHDw^gz|6+vaLf{5Je?X?-)ZKr}JJYa;1LXpu|XRue7apSjYkjGV~4E7l2e3Bzw%4 zStWCaKj-e#*kLb3p{S1vjFB=z2aZEnnds}5&hT^1>eiU!4Y6@bY~OSS{yZ_$p_hj{ ze(W%2o^2-2Q=37?oSH*;gW{tCa=#ZXVp4It)AUE2RQhH)kYqWFP6QrDT1R@zG`RxL z?c?};tQsP>DcIY>SVE_tiDW)l;oGS7@Gjio)zSbw^4RMf8&IAO)QV#DT0mq5iYh#h z4(eR+p1tjJGQ(7}CJB21_R?je*SJDek;eOHQ$|$|C0gKKNqm=tklYtujc;MD zP)%_d6?S!T2SaW&sBmhxKO&jXI>AY?7D6JB_zpM!azBJziY%rnfsJ{rL+}$w2N?zf zf&A#3=TTa0cXDq%h)J|)U@3eP5$MY!=CIYC=)FNCvBkU#{pJVCd1!V*V!O%taX(Uy zVj5(3qdR9qdTuWX1FLxol>y3a+!gBwB*J)-S4aSfUK!(U&fyaW*n z7vAyijM9~m7V9iM@+(ZWFbOKOq7BXypUKee)pucad=Un>Po z=Y$xQuzl8xUZu$uEnjXM=}*vK0i5OY&fs&JuSK=vM?^cjaOd+Ga04&(8`^h#&$2=8 zS+urC=YW)d$d)@fH4OrG`u+<_12DjAAd9JP(iv!wC*XPFZ6pbN3+rFuP9|O8hZ4cX zO(;_~!$s%h0!J1da3R{16kADsY#|X%5*VQPqOE&TLWP$;$?>)OO&Gbgn0>U+wAxHN z@c3Yld68TLLl<3JhC=L+qi)-E_TdlMU|_MO&P5J)`=426Oz$d{u-+u$AY*a+mCRGD z$4fx;ljblrO2uw&xk5YTPap5bO1Oz+D<%cces{{eAfY5Z=5DK!I7!-2#%^fNCl`)9EYmJ}O6U>Mm^O>p_a8=&p?POX{)%LWrW|Hr&OdvBQGd$NfENzZ~y1 zKz~t2?d3l2nMRV$Ip_ku!r_aWxKVJ+m?UY%>s$ky3LDn z4^MFm1x%1H&J!+hzHxfU&~K}xIt=xtHyx&>usfZ>ti6JO9&QGmNPD6PhPxaU zzn^<#H@Lt^S3>$Oj>tuiIK=@H2zp{E>lD?5N9<&aB^EwDrdvFw#*rmpB!9*&9?sXV z1Ib}}3m~x$^VF?i8fMDff8E~kEWxZmWcrBQ0^@)%?Mw$9=kq0+XjH=D^PR!Jr#qhm zhJ%-%=zJ*U?e&1D9PE|Z;3sjFZ!7!THny9 z@I(UXp|PonVRu30IrUNH%t0Q|Rm9X|MLQcmW$fkv^*)eYsu2G}3!fjvEqsa<>QaQX z+=Z|Msx+nLztS|mnsn&TBrnH6qzO|S*#MZc8>h)b_@7=_`w%>)TWlX_$Ej;5E&Gtp zAkrV7PC*^V;&F@C(jwa6y)sYzPdw-%P%d5xG9-Ri>ALqTxYQx@GDHZfmQI6k@wECB qU}rWN0{?Z-efHto;v_^zc)PTP6pU!lNU!e(a4k2HVXN8k@c#$apNtLw diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.orc b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.orc deleted file mode 100644 index 1e360f94fe7894c9da9dc1fdc5d100fa65a1c71f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44383 zcmbuoeP~?wz2~nPjiu2@8cVV)$+9dvj_Wvyr-rIZpv z3891%a(}P)IrID=IbED+zTeOHJ7>=Me!rjRhjaPrVx%Y1-qzMOuq*uT?kjDPy^#)j z0(1jd2!&t$KOw*)kzGD?H_(T|;a@x#?EB!gAS0utk$y%7BH>`)*WT+KdH;VtcBSoa z{w@D+`v1EF0|yQa4D|GLb@lb_-W`qZ*)u#mG^F<4y~D$M_S|z%fB)XS0|Udu6BAus z;jre6jC6N@?sJ2KhYo4%(4mo$@$uceM@EK+18w~u?%OvqvUhKH_n|`r1HHY0w%zX! z4@V*g4vdcW^mKPeqy7DTedFVSwxK)S-4he)_4e-D7m4)ucXULfeSOhrB(i_MW_NWR zI509YHm1ATzrUwv_wL@_d+yo0_x}6)`VJiE={bCOWMt2t{{G(H0|&ahM@M^m_wMcM z8yFZI)OB|69vB!K8yq}%@Sc12?>}_tz=7k(4;>mFjz;(H-M4Riysz)Qp&>o?@bK>4 z{_XDGt&2q>d-mwo!{Phx+q37s`z9s^2WMw_v|mM||L)&KqnfdIZ!~)J=*Y;)lcS?r zzM-L5ED||*aL=Chw&?E%2KxJZdU|^gAKtfbczAGd@7}Jij*f6xk3INL2M^wV|M0MG zcF&&PUM=$O-GhVr?Vg_g{%G{z!2<{O?0Mvo-Me*-zP|o`jg{_nU|{#|!-usPy87on zw{Krx-@bi;w$2|64)*kDu{3{pcwj(Fx@XVPqy7C~_`=Z8J@-)V$YtTq20S9kw9DT`&zKEF|EPw>B)$ zHu|$ehXx1lyRWmet4qfw9PaJy?(XdD=-9Wfw|9JeXlP(S$6$0+n=&}q-QCl(cWl3YRpsnZ6{ry_`J$t&l18sdD>$&&s8ye!k{Gz94WMpV)WTdxO*N8+02DH_>^WNUB zuED{P5#3ER8ffcyFB;WR{oLoY<$LyMmVQ;&jYdaDhlYB4ckk9=(6QuKzdtsnqZ^I( z^mKOi_3hnz-+c!U#$x^bI#}9$9lX7JJ36#)Jw4Ir`1tVfgAexgb$54l>4@omy<`2W zzCP`juHDtu+pEK^`#p4M|NedZ?!EWE`^Lxhi(?-R3}^+rx{e&_=?Sz&KN=eoG7Jt5 z40uoeqrt)6-dIe}bKtv zPCA@1M~`-P>OS>5$BsoJx?-ShKgi$JrTrZn z>+IAj^?!8y_<;k%!#?VB`oFn6HU5Q}MFbbXyF?V;NJ{rBJB-#;>bvzFr-m^yoLqkFa{bX;i4%dSZ2HNibM9ax`z2DtEG!zbZcIvitz(tqZ z+j{Tx_v@Gq4D8?E+dDr$Jlxr-jnKD*Is*f`MIBQe#Lmur`*i3;8TebGNPk;;>5l8-qFePS5cosLp~|(=@}l@ z%4nDGy;tq~@7FA0sc=+S-QT}|zfRW3$i97}qdu9}a_H3Eci)2#-gD3B=-8NcysJy+ zU}z{35nC7=)6Pbt9UY=@EY8cvkBfSAb{;<5*?HiAcFRAd{_01g`fX8BEk#eyeAy2_qD|A@E`W>otYUMJAQm%;K&i5 zY4r9+ql1Hiwmlz4B3eRiz|o_k5k21)7HCCuCdbA`M}?buibzC^N0_aR9UcycyShYK zhla+-dwciq7Y2O(^WELy@W@CwEQ%otqwU~#J~(ioukW6F4j&d1JbALO@5m8hqwrem zB|5B4)h~s^hYo4IMF$QY((fKPpl4(L+hV`M-;ojBjZXjPKOc=AInv*+D~T^0IB?&6 z!jbOoW5-5Eg>zc}(NWO~9U!pwFJgqO@BG}H>7>kb!b!SWH=QUnLZ&7rrp+@M+fJDa zR>8btt!>(wteLf~tee_?aN10`dFHz7X42lWQrW3S$jmI4f7Hq(ozzrS+gMpVcg{4f zUaK@#xx|)b8ct^2v6FG5z8}vxmX$IuxNi2koi*yZNqf!AC2T#3l{4yl*@W$EnG5;M zM%GGNYY8Sag4~2kvysSUjpoo) zoC~=r^9d&vcW-8m#(w6ueVygcZ1Cg^*LG9fiM8ff>`l_j-d?+%-!xLqfSI*~le5=y z`HZbSS+J8EPJYX14xXILCCtTyoz0q;lZCCcW5t_+=I3Y4OJ^^bPuNe%CuJokcbZTQk}RV&A~7mT}_lw8knGZ(E?-cIt7;R;4m*cJ=D=EO~&D`}Z4j-AO^ z$IYj#jJ4t3wv6Ns<0)s$W>r!*?3A}PMk65SrX1J2l3#-)JnpP9(Kb@efa!FSlcLL* zS$93h0kyfU8+Ou7Z}BZ-r}5016$dr_rxlh!L$A3uNY@HpgQJTHCwtRb+ceL*`K-RR z?WnV5<+AzXoKWT&nc8kX>(L>RNN1H(n}l9uas{I${Fak+bl$VhMv8q%ZRD*D+sHQu z7p$C{Oj?PUm5jN!w=AO`u)s-4+UDhT^P-#GbZ=M}9$Dz-* z9+;o@_}3*+F(K-6E@N|}@yV9(*%Rg^kz#WN`fO*-#e8~lz8UcJ%8JR#=hhOFl_o@D z(!7$i3gUo^2@9$S=7U^5t;_B>J_WTA58z0o%$tzRsRqc*EEFNlQBNlAWKG8~pAhXc zSJvDNSY6+Z>$r=wXLBwXS=V)v`HYoW%eh%InP0b48+Iz|WQ`VJ7SUXR)5G2>tz>C= zP3|J@!m-P1U?g`K+_18-bi&HorsKw)l<+!hXY{oeBU&X{sHZQq3a43^SGH3+2dGiTuRG@@yh$FVu9nt|yha@uXca&)O`;ndxpNCu zEjJH(^L%F|sOEADaOFAxh)`I_cz^R5e(Sh-ZpqwQF%bW%dW#^SX3E<^cnvF@CXB`f`w>IW71i-mrB^Rx2hBB}lE6GAYX5hd@ z^0;CwS}jMQ10wIMZoIHq-A+IN63B>9u1W(**E}6}^GIGMi*+*(G(Ik+gzO4bO8DFgL=2s=K zEWn5C#bk9yjB(X1$%(6W?xwUU`&MiLU4$iG+<5|=hBM${a4wTy69zB3O@SO){n8C6 zZHnWS%jax!-OZRH{?3}2vC-UEvPJhUCoWmZHE}GC$(EgUt;T+4)3kCP`Ml<)g~Fnw z5FRI!U9z?v^oXjJEM#*_R#n@+V1vNtP=gToCw2{$Ysqc^u-+MW!`k9jm+a~e z_g!#v8GB2&V5Zy~?gp3PB+S)+OJ6(X~ za?st(X0{0=E=3Q87E=P!Xll((rcO79@nD>&C7b(VMs+V~TPe;pD7We+@~P7eedoF* z-Rc%t<8Gc#)_1e0^XHMq{U6RUp&pW2E36^svl5nW0wExgano+n-6)*iZVbwiE^Jul zb?g<=E!g1GO<@w15CK)#$o0_Olba)gNq@BmF zxoDCg5;*R%hcR@#6vrm(eYw>jD*v2`$cR(~O++B9>hkqEiE)(+!n1q=E3P1`}dGFKAp0vGX=fkv=#EjBp|LG#3mR9$a% z6}fKJipOm8(S%*dsBJU{f)EVCfUVoemQ07(Wf2!wTMeLa+Spi%^x5F+u!9RFwQh$| zs6SscF0Ejc%jAr;y1tW%ffvcVCt0F-=Q3^*<)S(Cc3wDd8&0VKJuG2~>;xW~7Q z1^|LRT=8_u&TJHnnoh>a=22)dn@{)$DI-Abz>}J8p`k!dsQ{X4Z5;N^0)(6=%uOT_q)1|wdHjA#EPdd5VAQb}h zZ5vI!9t3Vqdy+!d$(hErE9ONRU=mfSoi2gUqur=2v*>SpmGvd(!#)yQUDc)oYW z%jWOTIDc|NBK)~*#)7n%>rQeDGSLh+W3E6$5|z98lGiQYObx}$BCyA0efTTBHI02o zI;n~LZWmz74FF>`j$UmgWP3Qv-a?V;0dbg6Hnw6VT}*RET@T4Za+|+!P>}^UgTWgK zWYbKesOPdMyKXw4^e5;%oks=@lu-0@8*rEnh=gzF^y) zM076eqiTuX>@aK7P6;!?`kV|(hEK$eR4aHYpDG<-d2}|-%h=7_xI`oiIsw^CVm$N> z8pq8uwu{5T!k6$@eGSW~Ob%OV3Bjg?Jqm+w7CtYVX^W9(&1+UVZ6}WV2Mu=C0KuU! zV=fxYzUi*g=JaFR!(C)t)lkvE*AhA_Ng`IX7m9c~$RSO!i%Z=7WMg(>V#c=%Ra%Bo zLgTD?4MDKhc4+J{ya>;ghG=s_&m~qvBT)>^rX0ux2Hva;A9gR zOmim2AK$^ZQgZk8|<+S`Sb-KqX|fo$?4UUE0N3-aw{G5j15-rcu5<^+Ly6~OHIL8(qog-N1a@z zkcOrj$wm;2+IDsSD`|@v4c&a!!J>ntj;QRj+AXUQCd9mHUR*=2O4=z8Ppi5TQrUA< zWK5Z`Ax~x}o zhXr_M;WW_=5RqEImV@i&#LNk!(%8ZSM}z}MCxIAaY}NPmV|of-oHAKp%&f*%3vhwh z7o7Q+($M{kt=cZWo+S5k10uBKGq@&@T+T7C74WP;a=?+=uFn;g6%&D6GXvQskOLfn ztpY5d2=6>Y=bLZw9IjE-o;*1-B}$%Ma};q=-<`*w8Z!}?rp&APjnzVltonu<&-JSJDv4qlYdx2h>TnIa^=4TtySqszoBlRG(P{w9!x@yt zH+LD^v4>0b{c8$O+1MZo!30H1iX2~TI)HU+Ve#}-@nhz3^RbnMDgR?8c2*YHTLlr} z9Q=>XZw4zzWf+sQHEj&VIC`JWBMdwFIQ}k4TId^!aoBDQ#|-D^CTz*WDWkp*9S6ZN zB&6i1$T`sdIsnMCiW!<8&JfgqgkRGwOIHOQE-VPxGbK5d#IiatPG>8NA(Mld&u&^0 zZ|We(5sQyw_BuINX-&?Zn8U}sj+Z~}Zh(D_fO%gtHrDYcN`v0!Y!`#fOJlFL`hr63KJ6FRLusGBLI8jqbE1*LPFM({2U_P{n<$fHla0dMus}xd9 zJcs8f`5C7h7gyv-lO3OHd&-lQDztzy2Pe-JHh;@n!_(ji%jZ%X_yM2>NGdx{h{MZp z7#QWx8#o5ok85Jso|zloyM<_r&d_RvokO)Jt{`JnHIsZPXKvVTew~;fo$I8PMm^A( zGqxQpYuQ@kG{@LBtL!cneY^RvI2tPPMQaUtGR3b!jO!sX3tq?8cEegjw?Hs)((EES zNb7;3ZT^NxwIr$wE=>%UxV@Gz;pAy;*>NnF&0yR>5~J{zlS>%Y?L5}ND^3a|Jc|Uk zTF4{wrHrP~$+@{%p<>Y_w64&Irm%TL4Cdifz_Dr-pt>cO02ck$LvtsHNy2T3uIIq) zh;iE3a9oV>P2rGV#48NPiatO9yXvf?7vg`qfsV4nAm}T@y+k^Y1&1OS(gbv(0mq zem-P$)A07B8Du5D#hEq}z#^xV^UK8pXe` zFAl7vUA&3t2MJk!53;f*Ddn`fi}Vw8msFuArrKjVFiScfEf zQBCLM30<$G`rtA^?Zid{V{AP#KT$lMo?Q&`<-5&rwzV4JVNYO5m(ey^ z7)MpVaE5>@VsHv&L~jP0F|RHxmRM-bkBHC`Y(SuxC&ukOZD#6(X~eDKhjC#UVQTp6kg-yym48u2SJA}%+Hr^7# z*D6hZ0kMG4D}5}$fG!kDf4%5P9$DsKV?CQBj4SS3-z-K+HW|PgQ7A>^E!1?%rsb01 z>ymwQT`_IKl8Q;W=_t|fn~-M|4ZKH>f{zI81tJETL9s<5EkKI0HlOqs8X;n;h%!%D zC^{JMFu<>EB9S)(@?+-8CIJ;t!AcWnqLYO~<1n!ZfLbTGVgo(MAy%p7NIqp-d8~LD zC+%h}?l7rj&#D)^&+*K=)GEXct^fTA-7sPMdF`_}-H59NcQDP0SMGHG1PDxH{ zO*7@L5)_ss%mJ5vRU17Q(HC-x=i_EFnZOrB3|KQ@A-|R<^k>m3HCP!eHYZPbo98wB zFs&SsTQKpPDZ&gvBSmi2f^5QUh?K}<#lb-5I7DhSLQ>`z9m~ad#%QJOqTa5cAa6PZ z9aXh6c?3JQT&f7N%f@0oV9DCxw+O}FN+Tjl54SgnO>G6n`HPzg_KQA}i7(Z6vuipQ zU;_3t4BRPu2{&#%D1rO&rl>|@sj7+a1z#u41R)KNboQE4FDGVz7d zU3TAmY3kbVrJsxKPcJ3Ya@2yPrfN^ z28$6y(=uvh?UryJM($;f&(CMnd;^yJKN>Xaz?Uid&hi zwV_Ehog4%h`{Hq^8K{tAxB+rYN}K1BN`It;uO@0`Ou~2OGLT z;B^y-jPx7{S>v3wd=3G%PQ;wtdB#bu!J%JCSUCD*ha%94sL9Dz@VOIH7%9v&ONP`2 zjfCYPCwM9)I`%e$^HVsw{B!_JC^}u}HKh=D#DyJ(XNmh!Oa*B-=*^W5(!#WP_Ug=R zb>k$yM+YekbpsbHYA1tDU+_6Q)uHl(Y zyr8p&<_gqT#&&<8+*>7nkz^SE?Wo^rWz6WaqCVMXa|4)^x2#?#{` zVtE;3+Yg?cKXLLThRijEBWyZ0$uYO5ngNjnxA3NrsY5PARTHU?hb2c1g=@_d0;{^{ zN0J&KZWqxllIHA$RHk~6r|Zdpi~TL@=anAd7#gHw%i zqTG$8@-|~mX$D1&BymNjJ0%IVs0 zvXOS>Ppa;gtTK2Z_0vfcXD}w8g3)>al5L_tkTypPBK*YT=ujB(Tfp&K=h;!GU?q&2 zHc3qUoR|GX8ds$a)s@ShC=4WJr70<6j1*Q!HlNmG7%?5>w7BdB7%({blW?5VERFE$ zaz?tWES+~TDJS2J7GpVS-C*gV9xQDGEL%w8zD7hwe8Ra~)5AZc%(lmJJatuj^27u> z+r`qy%)F-$mh{}o`u_ZhNpm@0$a}hPE`eJ~{?EK_Qfn*Lq}r>g0?}WCW|Fs@o+xg&U8=1@yHg88M5b ztQNS5ifbAE-?Gze!MUE4Aq(X-MeJqCmsSf-DGN}NLxP-5cOzLygUwA~UjEY(NMmjK zxln$ajhYd|zIu}bp%mYm9+vmV7MIE3@N%h1J*inq2NW|oUXWhS@|I^PzRkse4gZ&+sZz#G!;TjMzJDl@D5dVaOO{Yb}B!^ysm4UBe4q`pQs{~yS`h>VP2V- zG@hKAEPKpK{Z$;95OBDoq_L_tx+CkR=Zq5(O_X9iV6BjhExLy3-cqdlWe1^?Wb6;Kum+l?8LGufB4=Msbpi3t>W=C)&G zY9hvvx{veM%RvDl>LFke$H~A-2Ho~bX4Lo2lM$=HJB88Pw~JHifOfWk(LSFw%8hjn z@v7WI7!72lBbAE3*x0?)03t!rn(k&!GIkL;7g}volB2qvQ$he*ix=8NRAE)GB$A$W zQt^CQK&|a%6-+JPK}CBn&ulgZ2O#Y)hOq-#6CSW7>b8Pn0JDR^Z8zuzHa@P91P5C^ z=?kJuTlfn7kjJJY*w~cg9mCF2)b^!PG=s$pDJ~|v3Tw2bSq%`#iKj03l*Mm)5x5oR z=11gK^)nb(mHB9poP;cG)c1>70*e@xb0k$>CPJW;IZy+;fi#VsQ&IUzqozYp|Efhw z7D|$zJBaW!Yo5>JNU9X&z(~54&65KCnI;!DaC;@)Rol2-ZZ5=(*kbG~E+cu(YkQtT z>(g%wpEU^<$n<54i4@?LgL0}~ROqGR48hPWM6IO!dUA>9cHLpd^htB&Cbp2O2Zv)e z1j5*fCV8p1BtWkqgC!}2&;)c6?PBMMiMyMHXL=#83KdT*UMrSC zK=N+{LXzjpv3i%t$gGp`cN(8AMca5uc?pG~*=y+5Fgm0b8A!aWN1y3^rpYJgt8E+} z`Zoa?YXl`*W_*j{9o0=_IfNke;WR=8a?n~mV3Qo1XJd9u6v#oUuI~BOPnNs@l42>8 zVn=cyqGExvOi+sIw&F1#wUb%fkD9J@zldl(SAOCZUB*f%)}>jc{g^KM96VRu2_1 zL_qzFG8u^Rm8dofoCwJ>t}~;!JP7t@nO-6`*#0;jW!mF+$-9OSpS8$fM+>I7%o@Bz zeDyq>$u8_ToJzTv%@MVb4ZiYHW}P(1@5cT%EysR zNC7QIvx~Tpx!3?x4Frr%rV*Q|Zt^T5nbd3%XIJGV#FVSnbvz?Tt)3rbn_(&vAkEe0 z-fcN?1m0F~uPnhC%wVdT#k|@+OHTfoqJb}G94s9w3(ypXj=(-FI>pZ_Gd*GodZLma z_%<;yIKoi`$@?0c_!WSEO{5(J3iXQ^HRNp6cM>q+6&QP(0N)@GOZwgnm7{M3@ zLF9OQm9F%OK9p(!jc<^-B-065U8=NEjYp@qAIuj6pY;nXn@9**GMI9Tw3s`MeZ7(n zRzE0T-BXT&Xutrn>CT;QgvjHFgm=31$;nh)SW1H1SNj4;l z(Jw}1PT>S=23}r~L8thaO)-gIzKFvB(N`4JoSK<9vD^ZVkq%C*%QJx z9%zfW{WZ_$Qt43HuNY{gXfUsW+nP!^aOP2Dk(c4pJ5AA6l#FPUjq?0XM7;SSCs-&{ zLh0`3c&h6Y^5n_WQfuJzud?HeD_9$|EZ2sVPOK47fU>g2t!_7rs&>g(5M@KstZq-6 z%L^;zt4!8(PfX!#fW9X<**t1(cUm4eiN0mGUL$0V4Z_-n7KT5erX!)_u@&*SLRHf+ zm#y1g%HdRX6GOvoA~CVsS6UkcWhu{j4Z{mj4n}=Hxo+{Fxe8(_mx-b`NEnSEWor>v zw}v?3Wp0()#WXH4dPZt`B@L^bc}UbeX93R~n~F4q%Ms^9is}k>>X3=9>4I-imZyL`Bug=wW zEChWl1`Fj+(nyL*{fd|2i#r}s=!SxSvE$sN_D?S1C7bLxYcjTp3}%%+R0EAi=3=<1 zKvcAadVtcYad$x$yc8!~*j|Qp9mtP8y@KyAzoCMTwS6!bukjje4a=_ru#?F`a~P;q zF{xL3mPNt{jb)3RH5p~^YMxvqN?)?Gm6mZ~@v?a>He3EwAmt{)q^dI5&SnDQm2$lB z$|g`sKalD{<_HEr0@y3%W~hePa)`)h6-H&@F=!pvRvL&cDIO1A)^Z5vWnASORObO3 z!gW4Z-1Ve|fwsi}0SF0YrWoZ$(%z)rLma0?#?BQiO6nO+L4N*@dG0phxiT=8?Z*{} zS5E=_W$PeRey|R~l^~7b*IH;kCcR`;T1fE&j~_^Z5|9U0)j_C-D0q~J;L8jp)9L}# zVMIH^nt<>Rd!Wi-1HcPL6q21+unN1wQkTU9x|N{yDump5ZpE}vD!7J)&QC?pR@ogI z2dSP)@MRvwVHxup>I6lBP+VGpJu-Vq*3~2tcpQvk+l#ErDqPkNvh$k~Tq&%H1d*VI zT73^YLjsFEiIQke2pUN8ekxowOaeN28&U}4Rj4f0&uV+v$k-^t28yL^7I7=oK@uFP4Hv&KMc0r9gesB)zeq`> zHFQyARTbBhKithNex}LNKe!>{We~t?I=GuKXTjh&KWm!^=e6A|a|OaZeKQ=vlf-Z3P$elN2;stTdO-U$kx!hD3-L-@sIyGp`Ybm~9D{-vYKtDLIaa zY6e#IY#GZqB)4rs*r-lb*Hyfa6c#f}v0kD#vn*XBC_~b}AF#bd8dSNB7aIbSVLw#b z%C?po0E&$CRdWJpF|0}@0_zZ^@$a*0<DFXpRM386Vea>p_RJE!4Fhn026$S4-FJ6+cW6a4tRH4y3=+%8c#-3;aj z(Osl?V~V0r?6nHdc^&AmgP?x8{7I(DmUrmow4r_aRy|}Hs|ss7d9unL6h|4xXXR?zzUkF738gt;z+{JjC7>8gp^io) z$mKZ4;iR}O@SEh$P&2azaMo629*JI+hRd=qYt=7GLpxj7Eu>vC4CMeolEcWKvd%Vw z&atTYBoM_tM{#|Z6JnD01nqFnRkw9^B&YiNHhT*7qjnHo^hG2y(%yalLgCzJn3|F) z(7$9USj~$cWf!wq!XURB1vgC;@N=b4n1?F@v*wAFlZ}=qx!a0-6HU+Qcn$2p2{!#8vmmtlC>Per^TJ39zR-cp-d*M zeCaRNNJ(wt#22ip&gA3-io_)XcXI@{Uw0b%ii|yXjw~5NA;?)TQntE3KYubt4)S6i zhe+klOR-1W3@S`VeqG97D?^SdJ@DI7&53OXJSEi9t}tqQ#Sk6hessK^z~#Z_mI8O`px50znl&65^2Qk; z+pH0igr~@3j&+P^O^!I`TKFKvV&UoHYq*$poZ;&ptlAYQTMqtK(miTG3ga;_I`UTv z0bvapbQs_DS?Q1^b_EuQ`p41siWjVHDkZrYlqv2Fb8>pl^JW&CDm{nu0Y8M)FNhb` zhh{KYA5=w7DC`*=Djm<3uz-~XYLX}#6UtpGHBmN`Gz@kSjk?kyK%z-rW)s-_Ow&wC zoO3PU9BkEYxQ4FNECH($>EPwz-`!M>_C$=tEIB%!!mc2Bn@AEr{sJ?4In?{JOr#_W zG+7ue2_dR+E}XX z8PJ&w)ekJa4v9`oxGN zLqq99k~sw~pu2J=tdZ_+d3e?m&Vxa{$zLP%oV=Tg&R&7wvm>~el>4IJ&~HkYLTN94 zNB}{^OB1*Of>gD0UL-ldVgpGQ(u^_Eq4Yp)hd^ND{)9zjuCZ0!@dA}(>t@M3Y{U@i zcYG@yhJ;lK9K>HY0?=Sd=ZEm}lcgrf{a*e5b5g)n#J{dPeHP0^Wtk2tnAixCZ}SAic(^|6sJ^;Cn7f_mnfE2xI~o9Z7+)6OHlZ$ zzc|gduW?YwmUT8P-=z);Gy_jh&8a#?@dNwbECE?0s$uL+)b@;vONbp_Y}c6+wVi8= zP;k#aZGLfKVrF51xVvVsRh!xXurTr=$}LR3xMfocAVdwIh+m4bGsVg>^-%Vig1h!Q zq4$V5*s*H51SLL2j-a@zv5F%bhSCVZmxd>b7>h+DR#Z0`rW---_1n&M8#OwGO$=vv zvL1khOcYKY_8qsJRoSkqyRu{AKqRtFOr+HGjxaU>v3@9w>U1@NCXb(pt;UvO8%f+( zsk+`IAsxojWbuPI4bF^Ay>i)kiBTvmP71lC#lYN@haHxtB~H%O z^yX)0W3wma^Co14w-OXxIhcK$!Z~)6QYm}_PpuP`(r*Pk5#x*&lR6wPg1P|!WyghD ziI6GB*_~)PGD}npRnpm_(i?_~_)B{dpSmwnF zknFXo>NXgTZYE18^q^Q%OmFXK0M7a~O8j{4noc>Eb#Y?KSoHABN4!eEf{W)TCNF-m z1`~bAswy?1z8IB8JO&~lmO86AMv;dL8AQ@X80asL>2W2v88w}_vn7Ko8jJ!(Q2rD; z*Az%tH`)F23={LQE;TwsZlI^=DOINBSg|Z@t;M#Ky)Fv|aU(Kc8FxQX>XuJ=y(5BX z%rU{kEhj|L)eRP+O_##AM5K3|nsu`{;PhSw?xnf|nPBr3GKd;ZOmEreRR&;HM2Nyx z!JG}E0=?M4CLnZe6i-UUrQ!*&@CuBFIvR#d1;a)kJhi0Y~9f;t#{smL2xQ%_i460)a65c!M} zRH6<@659{YdqQ!R*D@ejlmub9ifiQTyP7lwalPdgB0>Glb<$sIXUkQ$Q;=S?6P@PKIftms4mCllK()NazY>SNIrT>;`C4qR`Pk?|eOY<%l z3Nvd<)x}I>c@fDZgLU(&3Jbs!{;IEFN%oH(FYQw}PlYw8g1vXu9jBZ$FI`!=P=2_{ z+WxdBc9Z!_lz!5t2$ZDoQnx6n5J4zu9ppxsDRm?i5PgG2Lnx7<2+yi+rI;;M;loFbU49iQh3^Hx?XM&>!PwUWN)7@zAdQ+a>H7;c=3)= z{9I7x2S`^8CB{W9NyMwNm*WIJIJmFH9#4!*je;pDPx|W48H=l zHr`*|`ul#c1sNGaBUu*_q4@vG|)6I42Ht;$U zS>Y;8tb{vGV09@QU4G>kUulHox4hT1tVn|4wN^+(JS?7Z1{`7FbIU{k@uNp;(gC0<}*rWA5u$3{D z;LT;RS5)`rr%%YKjjhO3TtSAjz^6i)sW(MZ0~`wV2``Le5#IJ0=AQKK=e(7ME0Ew; zQ%J4#5T&7g1BX|?kZr%>qEbU1n!^`e(oH8P{SQx=eH32JZF}!BuJCtq zoQfw@b@8&8AMbV|&OKLwOH^AllGPohi_2nADYHdGCb1!=tjghS5@l2z6*`HC0@8UV7`5^TVW)U8{L*K`#WbP>TjA+vp@&DpG~ zn7}Kpa7Dt6$(B*>8pfL@9-q(rg=zERg%vCdvD3aRAOjOW+yXosL)G)OWL`?Ix*MjK z8b;zrnK9ngoKo=z{|!4Og@GTQZ9XE;(v>^~ELBLLuAb9&W2(JGaF&8r3)OAo+7{~G ziK)3ORW0mynS4wZ4Vm&v%_K-hmGa{zu}1{aaJU-)$d{y=8p+b9XAnEX3q6s|2wIan z77JMw0L96|c#?#Y!7VH8)OD4&z)Dpqzf9H6CNR`DO6a11dU}2g#%zItOv-7EgUXoLsUBKCB~a_^nZ2R zJ0bFjeu-BVsvXA)ZsccWuYofW4sqKa?EE};aS1+Fj1yz3N z#LVmopD|Lzven3{bKR-DR;;#%e1aHGB~Ns}tlolYG(w0HfP0&nJQ!r*5#c2;b%3G` zsu&cacbk^N52`_w`Xh*t2#0dREA)(own}1vXF`sAkq2rima*-GQj9a%MMTYd`O{bi z3YJ13@&4>EjDeKLe@Z7`4$w+R_ONxN3Tj;x8{!wTKYAuFqqe8?ctu7iGR@1CFLyF& zVpK5zShuNqUe(Sf2yhZF-K0)*r9Fvj;fhD}DLX`a1vgT*IZ!6ci#qres`qL-=@r&e z_KP19&MMY2|G;{7&d>fOQdB5-X7m2dLeb)P4da{4pJ3EBWAo5Q?-h{67^6k79d?kk zcnes?jH=fb!%^>gu7|4^5hGVcEQTF;hibQ?o|6!8evoh<1{LoAVu+6u$APWrl2%6kUS|z?*@WAGpimXXI9*+x>eQU z$Xz135xW97R?`u}N&(yAxzXf&46XrylC~G2>_u_Qq00Gkpc_u)D-0ubm z+9{zzWD>wsOm2&q{C0`xvObPnnO^=zYD|gToEqLDivGPMYbEAoxwqJ z9*I;o{i(X1xnitP#FD=!aqUjCJhA!e7OLfnZ9%$`@Xit>CoLVea8hpGW&`u{yk`za zlbx-!jMx-3lQcL;e^v9e>N1e5VU!!@8IBy-c{6KNH3{#N(o+2J^fK&|F3WI+7( zVaZp{8o{0e1Eef}5(vs$2Z%W=HKc@5dQ~-NJcX>(-jLn<9A`~0Zia)`bqU%f#gOceQWki;COuFIi*i*b0mtbl9%~fdLe3UDbo4QVfk7Af=I~j6@aI5;& zajD}W$D$trVz(S}3t2@4;_k*-CJ~^nuV7kG8Kbe{9$`0ld>cR|(>=9FTqthIuIJe! zVuUEw82HMD{=1wFW*|XK&rYkNTB(^L>T5%F5KEsqhtvc%-mg1DnIeMOL|j_H(=N%P zkciSJoD=g5dZP+F)U=bPaR!k8K~Y)g98O<9hZCakR`)~ z+yc%UL9tIEte}kY78?IG>;_!3DVQ2}ZvKX@yH1uDwH>LTidN7J=)G&t*8x@vo19v| zc12Mw@IgWs_Ln?+DX8)(8jT=N0Ki#PYI;=`kQOl@4MF zrM@Kk37>J^lL)3Lxn@8pDdsO}fA-cYd3PQ!=gW97JtbLc z>GI~{xfP|&>(dJ;;ZS=ikwu#zsFkGDqMtP|2P@xhe4e*Z`30vg6)8JK85%&4L{@{+ zKH15tra3!BWmK6=7FWe&Rn^I(Fc1PF8by`NmVPntj{Lyla0AbXfW?O zsJ04kJ3f74ni$aH-?^zN@~VgpnJ%}C(`PPUQd(QgPmLfRm_Pt3?dxOv!BQngV`6Hd zq8<4?WEVeUQ^ho}!hxF&5YI%*5g>qqEkQ?;$+oJGC7E}N$rA0z)v347*7P@Io(Dqy z1jvsY^9DW{9LXxZjf}F*prk%b?)B*UvL86kA^8m{(2Zf|KGTn7GEEpG9Gz$$->?o3gcM zs76c5u+a=a#=YvE@akUH57LuY2|gw^hhv?QFnv*_Lda7mxEuyYeG^DNDX|#fGQh%2VHt ziqmp=-R(356>phSFcY*D^D+c=T1EO#Hi!ET0H&*aI;I!71ByKudGX!Bdec``lYEus z3zhJQ$2|Xwf|;unNV&c5a^B?0*yNlv60qqa(GW^Qw6M>KBeWQiGrRP{8r)IjzD+fP zW+rCmPQqJp!rRJbtnV9C>zJD;{skQ<3x3I>Xb$y;(&++3s6_Kf~e~Su~+CjY>!8V-Qt~LccQ_WK;0QFt`;3R&M$DkMB z`1#f{RH>#{GdQj+JePXg5biiC=duV*K*aj+5^XE4Vi#5IzxlWPzwK}P#dE>Qw!i-m z!M|zy`F{v@>(9xySDz1luI(NDdFgM1_q6@wZ-e)>J^w=RVB5dG5WKhT&ff)NZQuX9 z;InNX{~c4_dNKH5+lMbQ@7w<|xVP>7{}>!@`^T4p^KHL>Dd@Jn@t?TfZ~haP|A&|P z8h`F>`;}U+|7Ti1|Ifkyr|p$jf*Wl=dnI^(+l&7t_=UC~{}(Rx{Hu)q@YUc<+o!Jv z&$WI3zcTvqf90azdyRYf;I*J`{!cFlUJksz`}N)D&mTKBFwocc^wTc|UJ3--LU$Gx zMo0fO@UMY)taq$e+FxmZIrwrg7#tm)ntChnR^XfAZ-$?FrlaHb?Xj_7u%qMap|6L| zpYP}h29F#G24`nq3%nNiO*{VrzX`k%d?WbCBO@a{JzwwrdhbIIO;0}`em*=k_0&@% zBR>iLB>2!n!QemfFBEz;_-gQ*fo}$ehC-o#2>e4p*9(Ql#-4og$dOMvKIwQa_#F3e zoEg29oIVDS0&=i5Ks z^XZ;z*FvG8p}|4_F{06Mcyu%p(F5&j3x4#|z)u5zUi|ao(vl!^bAh(tr&q5A+D_fk z%%P#@0?!40$-g(c-smD*d-{dI3xTimFC3np9v!`LV`yl6T)%rKa3^s5_#gZJ*w@pe zIlt%M>})9X&EPkKzwZ5YZ!q}Xz;^@hwZGS{E9v1r34am}g&uey6#9PP`#kVZ+JDj> z3cVh9Juom3jZRHH^UUh%?|XmWJ2v)i;N3tl_-^~V?OO8Z1J4Kkk$-<||6_YDH#YY6 zuD5sHx)lmN^2p2n+6@e>uAVy8)AQZncZ1{OKMekmU3o9?Uf}3aef8tu$HABWCBT0_ z9sTL(lTT_{B9V_eKI-V`cy9M|yWeVmtG%P+)G6)b!w+A-{-giCi~oYbsi_wRUKn_; z_qpD_z6%$A9QkqN*s-3TQ0U1gzZU#j@Ed_|1O^9h-5MGB>Q}$j@vV;UhQAxu9)`o; z3x1ECi$+IAPMrz{2M1Z;znnTXH}~%NyW?s{B8LuX!Dzo0W?Qe$O488L80RQ#%>HGwPJw5*v_$Ov-LBG}cteiV4%fsv8H z!4Co-1P&jbn;RYdVc>^sR8LPddgRFR@^?bt;i{3ytGiy^)zkB}_d5Ac_b@(weM5TaChH)HS}ue=utgYPfth3=Rf~x=u>7U5>G$X-xYO}w;LX7J__1TckFS2Ur{~LGUSHRd ze7*hkb{&)7^6%Xj+xhRMT`%o=JNkB%_4J z?K%_O59`^}^P~13wf}wK?*qRJ{w|nK_w=X@ZnB!kj}H!N&bRsZhtMBFk%;KT(@%dG z_z)xz&VJDSLAQV1uY5&2JvB8u+uj!ZV>tX#`$ycJ*7LXhzwQ6szXbR%82onkx4ZlL zCMI~64>a~?{yqHg=x8wbm9Geu`0ig87CJhjQD)rvLHiHd$H$Kz4F=yBd}Hv%z>9&Y zsZT~e8R_T>AAT>hoaFR1bzT?a4;DBedzZbsO9A+pZv7*)6Pib$Pq2$FN4422x)l}389Th ztu|PQpe577T)7gBMj|4gM~~`Ofd8|8={v*U86FrAEeHmG9r!hi5(@n#@E0hGj@PME zv$HzBqLkW}j*jK!XP(h@b<_t3!{OhI{$_M^^ytwSf-eOBSNs2JzZ1NpBlqFk!M7p4 z`j+ne*fC%qg+e1E>GY50e}oe}$gJ?khYpD(sNM1BqemkV{dzb|@0aflyf+}k&}t=< zLqj^0B2PN((deD_JMB=~;B&#{<<->>+dpi7E%aLG)~!I>u|EfcU;eUau&6Uv;%xj@ zixi1uGN(>Oqdu`67t)`d;ljbZBTuEJ9d4I{In*r<|vF{OPAVI)ulO$TQFAeCBfc_WKXM|KLA&{&VN- z>~F9977DYz9)?an_0+fH-;RrsUAnZi^iJR%DB9t}%gb7>KL!5ek^Aqre;+(pUjB`E zYv?yDY$!B1c;v`aPrdcu0{r*(p|=m6KOc{amVWiCIt)J?8+$SMVo(nj3XPBd;piWZ zYUzT(8#mtSe5>>I;OoKf^Y4KNPM!LB`_J2j)*hNcl(eIq23Yv@>m40p1$}*b=nEHg z5=G3|i$A{J`8ve%$6Y_}(jrEqLawiVHJKFm4YWnyotSv!k&go(2hN@qmwoThdxu1( zdwQm)e;)Zc=kfgczjXYiLqzZ^Ul|$s@#v37|9tGv$BrJov**s9_h;Xq{WS0?NJBLj$BqeyBwgs7A34(3C*08*|7_@ILmeGQjy&+dE4{Du zKJ`>I`pf=b_DgU%e_rb>+|(}%J3=1|3tr##I!yMNXViZ=_i}D%XktRE82b1dp>KH4 zSY)1`h|!!n#rl3*-_yZcTDpGy;fJHq+1XR4zSI4k?qE)+>}SXMOJ*LUjF z2fIGl_0&_p3;ix66ghTmdRn;uy{_+dy|wGDU4QKUW4BnIe&eO!OTpLpr^EX_{yp%( zg$oZn5a|ea^tFvdz0bhNu=g1p8TCHfN5=SUV?5-22Kej@2m1me@9YMh0yKW|{{_9F zpf?og3y%Ctzyq`w0qrCIJU9^2MH>H(wx79pEixC`6@GX3m9|JX9}A)ItN$lNXE&XP z!r@;$7wkjC1<(&*Ai}T1(nsF^pO0N>3%B=u^o!tVl+S-Z@<0A}@X&LCWAE-hd@$TI z&>jf3kA~8iFh@gi>=UCQB1W=@+K#nNv>gtI!{yfQa;x2MP4Kh+{XA5CKM&R4&qIy( lbL!PgX9MB?9$>xh+zA~HwsjwSXrkk}kJ{TWUtJve{{i!2eoFuV diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.parquet b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data.parquet deleted file mode 100644 index 52a08e0cfff898f26941de0cff52ae8955eb7a6d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23974 zcmaHTd3+OP9{vO;I&G(K(`h4Y*6JIp3FQ3rlytqt}v@G7vs#CKqH+BkwD5PZr7cs?tL(xsKebykwS?-QT!a-5d zL{-6GOWB+ums(nb2}v>hVw;R-lIL3HE@^EEimP+UfxOr)OUayUc}va8qCP0DGL$6# zIxS`6sao7pyRPQan#H-H4{tAt9kQy#m8|3!J5=3}wYXgFD|Q5%#dbX-l}}Jk%drv3 z5EHVb%0}T~P0Q7eY)Tf_^a&PsBC4eF2}4ikapQ6p?_mD1>biAu!+0=<$X%N@6>zXDG>UwrSQq{mKl(%R%)l$BL(n7PAM3rI1EbK%d)>AfF z%f<~_R#BCVB&U^lap7GCWMfoM4=A#ykq!qf9T5||A+9s9MXI7D!`m&LrVlUdkg?yA ziq$X-IWEk!mPXPURo>(mdv#US<3pA-+b(Na%RF{KPvkvXv`x_l3|$}ci>t7{*vNq8 z9RH|n3`ojGyoHqYDX~-gm*)~X0z=XgVy~fW68ogInbRz5>jg74s2d5fTN?81h=y7t zVR5;xXjufOp$y40Eo)sJ?czy#E}IHJL0SGlhmW?*whg5eLz0hMVi>xn7`iAJs)A>) zu(+eDY^G!-Ew<&1q$#OVJY~uI!_C2<7*{3JRN}seBCFR2TJVnYFyd6vJTrr#*4B`? zQXVl!WO=Ci^cJxl5gicwur#<{l_lPvNy*rkgdvHhqTx!4fd!NVr{`?DxI)*Gi+r!! zLSeB>lQvu`jbby9A!5I*X^JVX#ey!A3~ah2_T>^;+_73_X`mv4;*zprZ_dc5v2=|P z$Ip?mrseA;Z8;>KDdn>IfRxh228*2$`rEr6aP$R3!uoK?dNZYZ{#&2bHNV)68`9M= zJ8j6;pIhR{Ji*K;hKvo7iRGp8HhXt4BD7b^chXI*9yTbRBqPE*bxqG1ru138py4u# zF0Pa^mMa-a)2%<$q!BQKikWg)m-II`A$-$fSnScWx*@JoR520^S~t2f2EGk9`aLgD z&DOuzhoqElDC4CJp5E&74@CyaIlRjqPpkv#fV@AaF*-XwJt{S}dSE=3k~K1`X7d)w-0hzF0FL zF4NVtq`5q^Xd^q;u@n!e=EXEJu~2{FdRCYrReM9NO)cU&-B1(hJ1j$Ns3qKDX&OI+ zTH*N{6Wf#-dyFvxA<15oMz(88Rt!%#{m8zE7{yi$D*A~*Djq{1B~Y?rX4XJNnAUrv z>4BV@-z1v30X;6Os+5%`Zk2!S?3B`qij@`nl_Y9tyOcFAk;g6%M&%p|o1Vn4&8+7` z%j?0FdTv;ja^hgl(3FWsEiVa6C{tMM!qVti6W#KJ^_FN)cb~Xg4#c#qkr$(eG??`* zbfpzND>gJXSl4#8cdsdwzaGRmVm@ce*{qL?2x?XuP-VZ^myr}L_Aryw6gk=(mj+OA zrCG@Ms1ncSbv#Vk@=lAW>BD+5r)V;EPBASTmbB|A`zVE|2pS$aH~uumcy6=j22=8Y zT*Nwtr9mmDN!AyZU}Nz>;((&613IGpv!lErN&WE7ion2c=Pn@4q11BJdLW@RZY z#Okr>sU8&nuC5Z{U8XtM?0J)sjI8gkQLIrK^+b~qcL`Y)n}XKiC3z`}c#%>8Ne$?u zX~}YPO>C2-De0VfjOFg>2k3!(OtPD)q%ve>4N zs2-yx91KRp&ee@gQx72=TAD0>rTg@WFf9yD-{Ey*b=r^vdd73{z0bK&9tL%!rSRy4 z-7lbv$l_{c7}e8rz%Lj^p$E(8TGEuqk-!?Dj9R|;3khUSw=yiNdC&A9YN%vpbQ2FI z4x)sl@jqFaRU#O;LOiA5zVPh&ysui&D-by&*i0F((mWRy+QNW@zl$NQq%n&FHKkux z<>!C1V~Y?<)6Dd;ejza|p`|78t9VL7_$2dq)Uh)>efCgb2$$sAiZ%$Tw3NWb5&>CU zfikrbU;FeyRr2j+^#KI?dzSC&+Yrg}a1N2vK~84al(;N6@w}(Tj|IVvtyJPevg*0u zl7$Y0;s7E{xH}q4B;}F+b^bOTkRX}zdt|$~9BouS*yIpYl$5MKQRYCqK)2#Ozq-apu10&Y}^qwH(RoRqx4vP2sK z4iNiNhB7RfS!p=-GObCTy|kGYRGWmD4GM_}AkJwyC5s5{mXbQTi=nb)UlG-#aMCS| zYz(99qx;B`9Hv!ss(s=Gm#Sw{dBiAL!7!0PC;3NF@1}e_A6NC1j8K*1k};rf%Bz+> zIw5oaDzYaLo2Kl!-rv#?7R`J*EhELpoG4cfA+aNu&uJ6e0YLz|hGc$K3EkQClaIPL;$4Pu=S%RUxr^87ezA2Y5?L%Vr^lm&sb8rz@jaoJ`Kl;`N4@F;T1W zkfvw)j=}@Q+?z{?kD=Eo(?@E={*4CjU+;mfsw>N35nKSGy3tS%AZo<&Wbt zy4565rvwbAqTI$&Dx`RJN@a4Y%ze$+%^?(_8f(rLY!%IsF}-_G&57vb0IJduKxrCS zOgnf~eKXo~BoGV*TI)Ptps%g)Ck#0N#A%@U2D&bf1ePObGbz+R4TWX$(w2B$4Yccm zsBcnGjAhB~n;mV-B}PzzP^$V;z)5(Y*fUFWNj$J5Mx|i@BOuZN&%nWJY^JHlm3iOW zQL=O+ZxxvVIW2wS5)|}2SxpFEj?JMZS=w#+)gktvl;+Z&TWUJ7h(juR8|f@!gj|-) zVLTU6Xj{xduuNkxIGLuk z1i6@DWiy_NEwZVmZjK5k(8JRB{hlTkh&w)guP5L@_9vBG+H!yjg^xMfuAz<(O4D}< zOA0|56ouSC&T!6QD6{SocIt@baTL#{pm2}xBucWZ?i5tOXA^-A7>$A`b{M*Fh?2%G zXo;cPfOvH1o6t}6ArJ3D&<4e}_P(9ll~tbKP>@5-;>zB>?tma%!?sD|95e8`4a=IO01aC(igf-Y_P7BNCv;U6hwH%f-odg&z#{9zQ$P3Y{aQ6O5slFqY-o5Vh$&oY+^ zlg(wpepDwdiH*yN9YX1Ap7w|!VfPZiy@s6BHQ!!u4i(PG`^9!)Y+LmU_2!V0)}Q@wfMLlgTZ4=eGMC?KEG`mpDGE<@~K4QPPm z+Y;eq7GOYbx~J57Iy*2RnO`~Bl$gjb;4N;{@`Ejcm5O`LwjpGM=)@no1#>6`L>g7pbPnGkko959 zYmQJT(u@~)Za5{>+SnlW>xL%lVnPwuBHH9+QqiriM*4e!v{I-wphJngRLlUe_(g$eETPIB%TN`P zisV~nZvfg8dOV-G1ke^8U6y=ZR%a;|ZgmAhP2wsbdU9N`J*TP(J?XZymUaqTPYcVe z?S3tZCbSl<#So+1_azfn7uGI>)GyRfQ&NM{ofZ3Y zn#_M#%xxx=>}i|)3%CHft+=keU+h4AP)*;#h@pVrnmODuVn!*dz=HIayV^kV6fksI zAS$8L5-G3n4Eft7M4+yUod6}Ir?eh9*#X*zJ5$N~MKvq1WPzTXG0N)t>1ffx-OYp1%NzPMkOqtdt!DWok{63d8eFK z^4w{Jf329&1973B#zM|y(w}*Xyaml;li00;3jZk*ZfXp+h|869I*;xfyL;jq+lbf` z?-3H*K9-7-$#P0Q7)1;W5@{)|NWwiWCTJgS6Sodd9?y2w7k1cN%1XG^*gH1p7xC1z zgYL_czcBTve=9V7#8lVA7S zIE$2SrZK=N!IbSEE8Yl!Z&AhW^^?l2P^?(0gKcUN+K5Ih!v$$Mjo5fo~?{SaAQ#-Gkv91a1n8W@2*TxK&eSZMNT zsdcYIL081Jb{?AebNMs0FzjqAwqYAGrf_n-Fq%&S1*-ahEIiT~g|cGA3&hBe4IV{m zDW01m!ca<;P7BC^P6Ur4{JRC<7_C8UH*zMTz!<;Q5pG-yDXGnJjjbJ^Ng$Go_L_oC^t#n>Y3GQhNyWYSlp90hWQ}=DfMo4n1O2-spM5 z22o6{=l@mWXc2{uZgHjcHx?1UhR`-f3^fUVVGG(Fxtc5e)RsmE5Xom12gkf^kSBC; z9Ttfk=4gP%le$5=0xPh{Voz@%7#7<{rHwVB1o1X-tABBF6GE6qD?T zZVdS-hX7D38_BW1(=m-RN^Kb#-A)*BKy$;MWylpJRGys9IHEb|07iP^F)v^v_6JZU zYkk_fg;q|R8g5DDq>YNCi=+5P%PXEx4OR~IN>^0!RhbQ1W^#_tN{50{1&Np0Q7#b< zBvDjruOeHD5q(UXu5&Ys@g?EO0kJ_)SX`^xIsUIzHSJP97tev) zj(-Q`LPKljo+C@KuqayzU|9`32Y0{oL2?S`NFzkVOp2UX#bgwX+rUXNIx3Urc^3-7 zs61>AIiI!FgI|~;vKuJXva`TqS|IlT&Kn26u&a4&L0mY2O<-dV7E`T7M;ze*l)W~I zJW?rkmJ)k<6=d0dI~1SPVP8gh2u1_Lon7Zuxsbt;s@J${WLyvqzdZG;6%iYefZAMNbdnG`vfNa31J&rqSz%&zKeCVG=i3Qb~vxHF>AuX@B+%}wYQ*Sw;AMPhDy1= zbUM(I)U+s!c_@>d#W)(n4I(HnIgwi`ptKFkVlSdMEybaEa5m!Y(vqNP3OYL1ZjV5s zLzVE0T~RUJCrn)DZSPpUGSI&^5DI!;w`2XnVjs7sg8d&^m+Zaz5Ymk(MHAy>q#U4h zIRq@}ELtC2Jj_vXXFo@NW&qaSEi3AF-sR-?I;alvFNQEoFL-_vQ08iJRR+L%HTXAH zdF<-x-7UbBS$zZ|4gL=l!7}ED2H8+hJHh8SlyYRRTE!UK}oz;}ipIAQquO;9yafC)MG);3fKAJoD zC>5JnMW3ullmpKLjNUboYypQ6ajS9w|HY;BVI7tHznlxW&;Tv1Bga>ITf-4hR>Zu? zB}yHmI;7h8(Bccp&V*|i=nw%SPOrEA%I~(-fK08`)zOr~_d30XJOn8QPpopatSySi zlcXnD4egHmpoCmi!t}@n`Oe{JlfX^P?BX;vcEBbKqN0$qO6g>dJm;jesVUYN$R(UJ z9H-5;nFU#wP>#A!T|x<{1yE++h+>*OKDM9|3pco0TQg9Cb;J3@Ouvyu8(l3W(D7N} zGRolIVpwPi>GADL+R%z{+pC~|=_Y>9`#HN}Kp~KKZlO%eOSN>RD+>`3;)@Y@!G`S4 z8dI|=!*hI1Tf2xPLwb0=u#`Xl0tlaksIptm^lKY)SXW@I!DNcq%c<<{j~!7Na1nju zm`WR(a$K>V=WkQ>k&Vzz+CX;Y*{(=qLjtpf{KT)eegutXDWdwuY8hxRp49*{twHC- zEIFe{NbmlUG7o;wf>{rfV3E+uhLB^znG4rcyU-f)XtkTh-llYN$YI%m0?^2vUd6Vj z6qW09*9g$1!MZiE?g(!O9EXlidR#RUM9Yk)KA0=|2 z#GV1E21yMOjiK7fiXyr%MV@7RcOKksTPEdoliyLf1h*tZ9?Y2{q$WV8oo$v|9qnpv z0Ck2;v9u<0vx;m9IUd(7Glca@iM@~}Eb9xQdY6?=McC%4aHb+NgDyw2LA57L=nZnQ+ca#O25o9SsIUtsyPf*vMi7Ybvit#x2Tp-ZAReg%-! zwpnZ-)GCH~hTP8BR$x(5J=p*z2HNuyZORK1`XGY^m zR2Q=TyBFs-R5*Vqk&B48j0bXbyEPU_jl8>}DbMWx2~{+YsH2qSITuxXb2w z+uwz;kqBA}8mvrk{HjVyBypa+!QeSC+!z&1k}jE=#|VK&>2zB-jM0$5kn&3b+LMad1#3dx+&>T2P6j*bW?=!J7{(1q8&%&%n@4Y*%s#K%3pR9$7A3?%6*zZdooxVxO6KK?iD$e8WjiXXn3#DUcObEWn6up9%UDbe zRO|kF+}%b@6Z@ipuymhaNXmd;C~+B8;(A#}W3Z{ES?nbXK@y3biJj+fZEX&;76w-X zfKD=hW-VzWHcU~5O!9pVIpSm?+zyj*yA9e-ULx}ffb6fus0)TphMczeYJjEKR_+)e z-jr;bm#~z?onC2cM~q73-3pto4azFlO30anR(0~FeadeaAfRdHp&`g^6&5*S0(u>N(N#PGQf%D8(*A;yUd1 zsN5|Bw&`{meICW?`C3~XVxaoqZN4k(2QVy_R5S&!m9hGEtUu&*%*aHZ@&PWJA^z$T zs-bSZ(+>gB$U>pQd`u<@23^@z9u{Uyo@x`2Qdu%p%%J*?X^ha*N9=Y~p_Rz#j%%_>JJ>*7gRbu|YZ3CEwF8UE*p(8^!D@$=y)F?W<%~3T@;%M!;1Y(79)qI%hD#W~N&o zA*mV?cTy60g|#+Ny?1U~`1DbHC2KvzcBoR)vV`gIZ*f!0|i;bBjD8&M%%cEEAV&=N9silZoe4?pP;+1L>Y6Uoqo#6RQ#ZrxsDB z^M|SyG|iJF_#=QF0PCdK&y>ooakD`7Iq53~%BJr|?)+lzcLrR005cV`!bbPy__eMG z=1xHKCSC{$OSb-<{IiPDM}~-8!=}?Y?eJMk5J0FCXv(N!@sz52c1bVN;V?PTNyVnq z^({>R3o#9b1(}$$X(>zYs{r2HQV{`bfEJj8 zJ}cbo5-^m|lA`K7Fq8kOlx}Tu&MTvqlV6J{gH+pVbWDa+^sIEaYli$c&rJ-3{|OJH z=?U`O9R6P3mM&O4$d?ZOWEXj>hye)WqaJdU-6jI}rMU*)-UodK1yStxZ+us54^%@V!xEh z$SFU$$H$;U@wKiX_z0kE9H1D(o#`QJ6KTM1jjdSPJ1D86fs-)=0nGrL*Dko24K|=> zE#q;~kU^#9$Ly94Ius$jm7qp5N?6RLyU4M#$Zc*~H%Ne?l5(wG4V;K{;HFtyxUnf1 zAx9U}s=@{9F!WTxp0W~}*)Ts+0YZ>PhwX>{Lw=cQ8E{_Zj4fx8?FMO>g=X)wb7$J+ zO-y@V7q_6=iy^<*rGm=qQzB5!MJ&}vek$|)(fuq(MY(6eQPx33fTH4C=*z-}knrW$ zKJ3!eQ8hwedsmkztnMdgvcx|p8fa}0*YZ(a@PSq8WxrxKUXavPUl0N9TGkZWq~qgeo|Ft@&zJLsGeF;LCwXaeLt4?4D#k-7CVYvA+2 z^cSd-bJW;7z`W6{PPBn4QO+8OH!y1i8|pk&mfxv()752mK*DYaY`<1qGDVqV|C;Zr ztP}i92)N=E4{@-_$jj%91+ZDSxX4Op>80c+AA@NQPqCPgbIU6nxPw0E3H*E~)$goc zU=xDjHo^KjX)ku5dqV`0$+^|zpRf>YAsf#l*UX}{ohzA8b0c?|jWOd28RCUpO&i*B z!JDIzzm7b?Tfz7-Q!1ENGExrA`zji3^4+#^3Os=+iAb6=E(#ZwhRr@b&-3xv2Q0*rXFOkOM}#OvI+Xra?8NzNxqcYk0l zxBfsj)=TEA>7hy?PbMfD47@W)JW_wBV@BAirT1=M=HH$ zKvn}Qb{F% zOG_zmUg|3OkH_N}7Ysx$(0 z-AQP7VZ0$cfid<#MFd(Vs<(m(+y9i#^A@nsafir%=Z5dBpo*qwHiqfUlnYMG(wgoL zA<(;e)pC*d1=xnhrUt%|-FYE{JtwDnNxZT?18^U}WEkTZel37WRw0+xGeBF`Z}>ZB zfg8O;*o~M-#w?Z=2HC9$ir>rJe>Kr>~k= zUA90|v)Io)>9E|xR|SL%>QUh}%oDpHNcnV!na#sjq;QqxNRLQFc;8uFFa4Jm;^eRm zSY|B1qAU8xqU4#`tOmIwz1|iPJKFm0-0dlshn%m>=l|T`exY*Pl@8zoRKm0eqod&B zE3KAqx$ikgzf>6AOJs|WlKA;$)GX;-Hi&5CXFF+V)7@2McQK8;Ki{`r*9xxU7-kz; zFev!WKvCh=BY$Gu54bUYu=nP)jnY8f>pu5=KFe`;K1p6?$;*5VYJ&&@E=&9_wywlg zKu^{@i}7ui|7(#u$1sq$$Q>Szg%&|rmvW?ob)!nZVWYDKxx)#z zX#Ho+DBAl9$xNlm@hwy@DlUz3ku8EVqcL2&zJ!h&ll^uqhi$H}YG{8T1xX=REjtazk+Hq3}&U)N8J|oEjlN)Ucf(N%M&NTo>gh z`&j2Wu6i}l17%gmN~Ff0r?C&KgLZTwjF`DgX4_!!&FbSFv*Y%9L?~}EdBn;EB1D8xV_V#&Tet&TUv=!vF@9eJMjP)5*pn?gu*raz(wuLHVbjq9PNU z3#EW#7~fOQluIh{M$v$zA@o{5H|3&biF6csgN=c9%s`r&TjU3T8p-P++Aos(T0vRw zFLvI{Q7O+y6enyCeFUQxe0(rL4+%eb+fz9$8R$ezPJF=a^*Qu{^F&fTFOGY;MA`f! z&0a$vgl(f6whl6Bcfh_HK$RvvEsm(31MoIUDFfGutuIr+A1BY@KMt||aA}Th^nFoz z3AKvUR52Lea9@XRc)RDygJsl8_A)HXoL zIHAR5V--X%BM(<`>l~EAzu7>(s{qB{6(FnOGafWmw8(_Ak^ic?U?NCCZj{s8mf2O% zsX>YRrO_d#VV9Bep7+25HhWwU6*0GrB`iDn5-cO)6UB`1)i!j5A&E;>)(ZoGljs4c z*8Dx?(uY7N5pG2ZwbF7uST#7oFX+3vk8^ney||M>jZN}?*h84cKqvoB*a4Xp8gZID zHRrPtx>@{bgnVJQ!(}KdhO@4mMTgPI3%uo@z_^&nK5D~2JZEr~M0gv)C=KReZcw;0 zYJm$IT15U`w|fgGB(NIN@l0TaP9B=o_#!3puh$C4I0PJH$g3i`u!hFe=qNfIk;(vm zA1|??E#UUgsA1FrgRJ80BE$wc!9QB;f&&m^ki5yg6!vW(VkuOgb`OvhcOgse45Je)tTK7Dm6b3P zCNJCU(Le|GLO63t1ZK7Z6coY5<-BD0ZUSAFN#`74G2Zv+Qb$}*8o)r@q%(hFp?|{( z$3|M9vHZ+YN$WTZVy5z^F*L}=H^+N+SQN-OC=AQUHKm>>n2l0`|DcGjO2)fF_;{?KjVTUFCw6xfR~E4)?qeaTnKseSxPB=p(UFhzN0Rdx6WAHUT@dM#$RA zgI@~W+*7kLRDr{m{F^yxCpmC}@45&wd>v-|uq1WrhRJu^92h%cHS{r^L6wD=&Oo*% zTYVNf=OMopQA3k%2D+)mNez;J`{XJT!>mZ;?rwynHKV2W!r3@8q7V75L=Sx}!eI6* ztfaFFztlngTwXv}rxe4-&Aq(f=%*k7?jil3PAQWAd=v`K>WmYaY#u_93p?@T?YK|7_rFQe(1DF z2uXStfjQGRb%m`kCQ-TRW^cH;wV}Bgq&zbD88WvfeU4e-@ES3u2c%sKQ>c1fEm3yL_S=rxt=i7;3R`T9W(c33pab|Hur*luZI1(>J#&uw;0GNoaiTu{j!UE;2; zqy}oQJC%E$419?!f$VKpU-wJN4#lZCS-v?zIDZJ1roza z7BfXYz|>*<0!J50H8Nn{mr#X|3e$aYuBD8dsGy^ga1rLJ(C4_X-7wU`=#-Snjq|y& z5}vJS(F&(5AW%a-(O@$4z19Lv2`*;Q$Q2Hw?p4B~h2|Mf%va+Wfbw0P>M=^%>{hDvH0xa#?iP`eIOamZK9@4-aQy?YUvL9Dlt zZKZUeojlM?uTi-h!VHvNZfi|}mbQ|6?FiVjtK3Ig$>40hy=3ed>v7}{JRR4GSp&^Y zxC4C5$WL%s&$vXMbn)S1YYOM$&_DWhewj^r)4s~UxeA&0hpqpmh9}pMHzC)Mq1lW! zs5;+X?&f2um}i44pOnOnh(&UKDGf4ml#82H=7q=& zJ_NF$ktAa|p^#Xg*Yl%|78|`>8v9RST`*|e*|9W(AyxtdOk5c=SwyYy8?=iH7mzbb zXx!3MHaUqlH2!5hG?f7ee>gWFbK3t9-B6C~FSWy~r^>0=scb@52f@3@1w6N9-jYxY zx=x=oSg@O;HtXDDZlInpcVfjAuh2Pz50x@TT<6YpLsfXo&tE#5x6ilrVYry$gL8J4 zwiu^L+G(iccuNdwO>$2W1Aal?vVktWv4mb@)?U?0wUQ^pz>g~k4twAn#vqBbVBJo{ zlz#eeYyjt>2v=hHfQs?6YHd9*plSTcb~z9%#+LBuaKD$5D~@IH`01n9qN^)k4O*|Qf^~2TN!){uMk5w^n3vpi;lY_Gv zIBdu{o}$@Q-nrfZ$+3$hkue%sF$+dGnP_hAYG3X05;D$MKDI-%*TfUaF(J6itTr2t zs0}Ej=~!xr+fa@oHsGNYa%TtNz_C>ph>c00+QnWw#7UUc_#3!_=l!e@Nd#=@4^RmD zyf~qd<4b)7D|J@+yB{`74c4FxK&dwQEG-z^Oa?=(VZM?h*USOn>&!tc!f<>sX~bX| zw7yrDQ;@31J}pd3jbZYd3pAemw`sba1x|_YYz55iDR^aYY_4$phwPsTH!{3g^9zDl z9)N>dW`kZLXKQj&WQc%t!qlX^#)Gm0vdvx43j=L)h=1b(Di|s!@=4&mq|Wj5Gh5i;A$8ki@d_6Q}mG zI3k#sKGlO@Yi%N%XEA8clNWp5wbXN8IZ5LR7FE?#)}I=;0XX7EPxO^Dl2O#ar2 ztDNbzG&_WzD8{;RQs<_ZHCX?;dj?L$=m-qM!>~B!kRkmG7!*VjDj|E9;kd(5L@eqA zg@s8{MtsSe0&O@L4U6zYc$>d=P93JBI&sZ)={O-+IOa!g=NOpdgi3oGjQrhsvZH)# z)RvUYK!)rp1;9kL)+}exaD_E4l1AYU{}-bS>im1&dVfP;AP@~C)m)r^w~k(}bBzLv zF07GroGiRwioBc+!E7%P2FH-Orgqv8`5P*w93~{ekf)hJ?D;0x0cE37TkG|CTq&4@ zQlc4``Yq*5s42*O+REZApTd1oy(DVDRiURbeSy7CQieQ#0?}xUi2U~BUEKg2sFB+I zpnc*!es4Jg525qW9R8~^XM*Dwl#sbBNYrHxN}rN!fTXD$|7Hnn9S+h|hmp~lovfD3 zraa5-k%kCt0@F!X%ciDa8un?jzj*3bQi-NPR#mci<2d=Xm=d|QO%Q!TI4ZF+h$R0O>p&UwBQX1kqSnwbwinFVF2D~Rp);AM(xg!iaf8S{u z`Ae}a9E>!zIzMLtlg_G~>}6oLbY4B1{3A@)QQXQ^RPa1AY(36tHekSl149z$3Syje z=DeLeY=53_Ec3iEOz~A996-c#YbGD!D7$U79ES^bWHAj{?S^8=p&EDn?5U+>5A45j zJ!awVak7Y!-mxF2=Gg?aJ&Ej?PYv-;&gNd8HRY%`aoibgco1$&VL2-V!j0TjF=|Mj z%RT6E!K$t)89B*);&qZ`?)|NtfDtSA?R-j{oX^5wxyt$8Y&x1Fi+xN#hIOlev~=f= zC1ew8*N4SWq~+l8EUf-2`H{m20SW=OP_g_DqJ+7K9{Zd;*CpM{eKm`?=GcbB=tiBp zWC<;1`CXm-aEaw{do%?ZVuZXklh4hj2TejPf#DSP@*#3B!%Zx~(dAywZ|}J^oH(X# zo?UnoQ~H9HsZ$!p0SeDd8iL`nIRG(e@bg->^)bq(0C}#Hd%BiCHH7-UtDXBXOs(aY zbyE@PQBP&EU;XS zL+ThJ!FG-lghX5jcJudEF6yIW5}98~7L=2jUJJvZYdaUz6{f52xeL?ye)3DHeKnY= zB=6i~-Q_!6EAy_dQE0yqk)?Z>4qWW6C3L9S^C9vli~-KykpC#?H4)$)(S2%+-#=5j zoO4!U3HErn;|T0tmP)d%ja*etR2EL+VTG$L;|^7jw^ow3PJoeoHTk*2l7;ODb}(|; zeBUAjJ)lro1;XL6-a@j6r7HJDGpF$ba!n=RclF%sUWRbYKUU%pR`y}YYH+EUctf+U zz8{By^kD<7e^|cIj*~d1*yrpiw{pAj4G0@z_I+8aR}^+-vC zQ5zVR$zNJT0}Gr(tT)gnwNJ-jK3PyhW!yVUxnrB;<5`$rbaJUs@Nk(K{;xBkl*-M% zuWe|0a*iDG;X?`1WK^&v#7I+wzq^Pi9A)O7V|#GSjTkoaYq$W#!*yky^VE~x4SNrUW$g9#b4;8Vbcp6Ba@z*-nNr5 zFomo~9&(eP9S6_iHb*fiU%Y^K9xH5tg<(h`ZMD=8ITvEY3FLC;{4zV_ZH`YPcGUT(cOMyk}WVq@1OL*tfCC;PfumwBm zRJaosaC}OHlo*u*I8vb_!?N5D$68)@uUcz2fdxh+?y({pj!(m?cON-a#a(A(Fw5dv z8(d&5ecc=`O0qs7pG%ADh}Z)q z`;Sf*39AzCOk~2fOVKmBzGv~_6N0mSCDU#w&Og^cNKCWN-+ZK~$af_9kY!7=O5)a6g~f6GU%WhxcOf4S~sy&az)3IEFlrj-d8wdIZz(3+$GJUvQnTB z3m%rciQo#Y4T07OTH>wCa343@r?44P3;C^<2C}?=4#3`qFn0|fGwlHRC|%qMRa{fW zQsjWxt7uT=Ix%V=$mcv3*t2k$J4b%!={&elCFDP{y@)o3_-)6c-=n$xTI!ZPmQgse zlr(vwC5YoMVho!O0VI*zxe|!b*-g%@bwSX``o-l22^YI!g>%4w#n$sD+LsOiR~3%r z%F0j*GJ&RaP1fL0oGyZ?7|!E-Hk=`bW0!^1B5&Qsz)w$(EhZ;$!ckmt$N|iOPY=j) z$1`+~GVzJ?*Cr}JuCkM%Sw*KX$O!V3jZF9~Pcg8Ml5;rj$pz#j7lU(C&ilG-K*eI5 zl&r%JuWP0onn<*wpe%a@X(hSOH=Shra+wVA6jQ7C&q)DFTb3~RC>wGE!@WP7S_k7D zIkSwxp|a`b+J6*T&f-@3b~pDdESw@6TUaAtK#ou6$japWb_R@^EOLQ_AM7EiVk$8G z5{(Nzz7Qws4nsiB;>e7m&ntIePQMyvN$1eoruHHh6)Q^4E#jUc_)r#Za}BuFCW0fh zXCD3pRxEC8CIhV?c0=8J<%d6!4s&M|yMJTol?plE7T!ovZjw13*yl!O{OsEM99**- z2m;R8s%BPzYDX@1lI$Xk)G&tmrVQivL{FV9i9}D5M+ySnRUK2U48)14K2-aQ1v}TI7_ivc8IubWd9qr1#KXpva=9;G% z@8uhwDyqNb#;1z?54`_WNpMHa(`;nlhNnwg4&3-OS@P}sPnRvFJv(esyR@Uc(>t+) z>yAwCsOXJ)cG~;p9Uiw?FW~e-_-cqxL!f1N#P^3p{+_rsozu_U#AH zEqaokvnRO29^Vss)_e1w@Sezrdm8pe=R6tw(mwdY~{^D^z9=Q3%B_`jf$`n4 zw~cK)u(#b7^S%_duSvWVbI7;6)ZxkU^2=4U0 z(j9p<@k&q2!CPKgx#YV8udG^X_3i78R?7QM?DS3UTixAwa9>|<%=c=4{~Gz#HS6Wc zSJ$4BJNW9l4dcH3>l0VV`%g+uPVPTheemG^KV)|LUOOfGs{Goin+{IC_Qx~6JNTM- zwzcl{4VxErj$wRO}_Z?>;Z zO^-VUw@#nu8vb~COU-%pADr*KGWEd)^|x;Q;6ndHAAfLB@R|A#FOKX_eRxUBp{*bO zdCB)5fB2WBN6$NOX_QwE{I#=g+kwAzHyu83S#QU@gO~TORSsUUesJ5tD^D3deDJCb z=gm8Gb>d3p(BD(HZaZ|1`q1G+|HwQu@1tw8`<0Kb+jMB#N7tY6{o#*pIQ!`NAK$o{ z-}v#w`E|E{eAC5EpL~4tr5*DR-*WldjfW?%9=!eVt=A5Ja`?6z&zt|r)?2RJ_{p}d zx8DBA?b{#v|sP*b?~u^~6=_@83w>cIWqRs{j1#`?oT?0zbT+eJ%aNJDWbb^M`lO_~El3-aGr4 zg+IQ(xk~$S`uw?f{rJJf&7c4H;ibzK{&e8-b=pq{uTI|e)1hleKL6>X8@DX{`Quxz z(tbX?^|rfy{$%?A!yU z?A*J5{d!OH7r%b9ciE!fzTLM@|LwcilXw62{o5m7{Px59TNeHPJYGFTx3tU4LUNwMEtgFrx?w|ozo8`hUG8EOkc+|46rSPg-EO^zz zj>4N2-f{88lr7kRyTi_dp@RIf=d%p=@SQ~A>oZ&*AWrF95TsD6#5 z^v_?`FF!Z<=Ye#^)2?%0I;rYD&)ds-*;j&lUfHngpIsGOrRA?K-JJ{-aUtt^scNfh z^-IAu_0s;x*yhbkqPJ$BZI^qt^xx3AxwJ09bgsFsv;2c6`*%yWHM=%<&Fyc0=KSY( z1d2b*obZWOgJ%O+{{cvY#~6m+ue#=Y+Q07}>vzxwKAR_e$mR z$;eIP_2>D*#UCEGBBJu#Ib*ST%#-S6R@!oIX!T{MULK^D-(U29dO81~CeAdDzng5r zCS)NYBqSPj6EF>o!Gr=u1-l7oV+0x$Dp<5p5z*5ce^jlb(@pqcKu}PjRBa6^CwBS+ z+nysmuM^a&P;H~uma#n#IBjXqxuaKYZSQ7!Uutirf3$zxA3cW8JkR^Q&-?E4zPtPG zzPp>Zpf%@OTaM{yx^R1CP?E2>R_s1zmn|!67cGoEQ`{Kw(HBeiJ8RnBv^kFmY?^g= zf!&dj+^?!EKbJ3m-v80O(C?2L8+}&! zu*BUttG3k^VoYa3lw%ns%y#5SN_ML<-S<6Ze<)d6ZEtNYF5)e!O-9+slyPU2%zArl z&6FmIL|wG;FXxP+qXmi~W58tL9%h}j-kr7F&2PDeQA^`R&j)7Hgt|471l<#+KydQR z{)CjP31u&aBy#h*(4eKjBBHAKrmxE7TFHc(jGD=l-2(4CL8&SY)h+4#vXWt~O_R9V z5jH5gO6R_1=Z)+qbasDewL;A;4j0v^;`2YZ)l0Pp&TJg$EoBau#_yFhR+W`%bkbR+ z?mA23J)87nd%+Ufq!w>)c*RDpOP z0{HTTyaA%Yb|41|-~?OPmBhd*Nj`Kn;J`b0I;@Ay16>SwK!aJ}Q#OmpkjH@2z|Qs& zmc<7+z>56&;6*SIEM*zGD@rCKlg6TZ0Tu~(jO`#LD47JE0jq!#q`*6ihPP_WI~JA+ zU_9t$y<|2@ti!e+f-HayuesRQX_AZNJtPxBDv+W%ajLQEvcVyc1#IAP-~iu)HNXd0 zpaK2tMe-DCS_u6D_@=-?Uj1={4AB?($z14TZ#AC;I?xUFfMoD{>v8fO@)Mx<0VjK$ ze1WtX`Tzp1hF%A~5tyug5=Q!8)*(^>In!E1wnB!jr^ph>_11b4gj^0^O+gpA3OQo^ zj+|t>&@vtQGixLBtQ>Zzj(i9nh3`qQ!kSF#A@kt>Z}8!oD~KhF%!H2?>3KlO`p6mh z?6BHNC8WqYg6_Rt%T{wMg7fypEp{br+V>uFn=K-XTutAcE(r)M?*j$LR4RRUM4}%OVLvG;k z`y*@)E04odiC*-e7j2{vw#VS}E$TZC`2*}Tk>3RW0=AS)h2C8df?Y{&H1&`akcKjg z;QJ@!kM))x`Y0$w9XFBQyoSeVvIU1UhdslPQt&Zad4z2yGnyF0r6ZPC>q$DcpNqV| zu~lU0o z%un7#ODw-&h1UpkcLgnbj))i?9}@iHC}om|rZc0X4iZ$#^9&v`6lSO-y4 z_(zHdmCd;qVl>%X%ST&=&cte>gmT{QckJd8wlON(EnaHUHQcqWr|adKIr}OucGvXz z&8bQDzZa;Y6w z!`6d6!e{(AO61D!3st_ie`3x+#mJTO1=5Gs(xDKw!(|Y!FTUj9kL?mBitf1d>pu?= zO%7e$yqM#Rw zRx!=*8}f|LQygSmHC`tXH)xZ+Nj%@4)#R)w>ZO8$f{8Jl8XQq2CA6&SYEJKyc8a6Y zf>{D54!U#bpp6S(?G!JcIKgM+q~>&yP%*uDm~TpZET&w^@z>0SB088A>AU7ve7fR z6bcC9ye+5FeExbuy&mrx=Df@+rG~VNaX3h(EZnZAZC%fdoS~k~dHUTiJ46d!ix#Ca zk;&l;uT50Md$$IXWG&HZdZ63hClE>$WKIdWVR~^wprR)-chBojg|}W3RL2Usqh~4^ zWhGCwL`Rv!r$T`s(b6$Bo^@WGiaIA6P=*e-pAhT4msF8j>F#B0Dq2YNag>NQxXh$i zyIilBqUO98)2K^zq$T7CQ&i!88NV?UBvi`5ZmzBg5tke%5??c9U)0`87b{$BsM zthCkB`MfX{tv|fTUDPL0t&F1Inv>aE%g7efUF~K*>`&a$B~xMqrl!m(E`Pa_@l(Hx zs$!ZwpN%`YIwvNQ<#X_{Yw&=38oEP5eezAH$H(&rqG!b956`X$`=4gBWuu!oaf`UF z{FS)w+s=BEp7(i23wW*1I?k2F3FrO3;dp~?Gf$?OU4d5hU_~q0(3M&4Owwm8RQ zQ-%t!PIo65l*xAgS*7%)fz}!7!!MG}`)H;;aWGt4u_AbSypb1YQ>nxJ;oQWRlAe_p zU)s=Jd$V?H=ACDH9(tF0C%XTiUwrbsvD&YQiOC**zA@M;&P)p_BWL?MH|fVIiz40~ zYHD28-LD@x+b$S?AdBOaqjQ?Y@+`vv&KHsgB!^s8E0nzybv|i888I`vszJ=sEVAcCNc8yoNUi+SwNgy#W3qCh9q@+leW!o?&wfU%p-rty%qi6 z*;>`gjT;`^TU|}pHYb^PeJU~!^otDt!vOXR_gW%-eg@G=@ktz>$7-ZRqrg%;Kh6w) zQpaxTFvlCSjzvH)UDO-&*}utD^0&B~{D1VKcKEUw{bFXcyY7gvp1Q^KU{!#P)@42) n_v5FRYoF%qsTrwu>-<88XTIz4VtqEIn-Bg;9WFc`tkC`+O=q}| diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_csv_types.csv b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_csv_types.csv deleted file mode 100644 index d2f59a757fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_csv_types.csv +++ /dev/null @@ -1,102 +0,0 @@ -"path","month","hits" -"String","Date","UInt32" -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -"AAA_Americas_Trios_Championship","2015-10-01",104 -"1420_in_literature","2016-05-01",20 -"Adair,_Beegie","2017-08-01",2 -"1980_Rugby_League_State_of_Origin_match","2017-07-01",2 -"Column_of_Santa_Felicita,_Florence","2017-06-01",14 -"2007_Copa_America","2016-07-01",178 -"Car_dealerships_in_the_USA","2015-07-01",11 -"Dihydromyricetin_reductase","2015-07-01",1 -"ATCvet_code_QB05BB01","2017-04-01",1 -"City_CarShare","2017-01-01",125 -"Heidenrod","2017-01-01",10 -"Arthur_Henrique","2016-11-01",12 -"Alan_Ebnother","2015-11-01",66 -"2013_UConn_football_season","2017-05-01",2 -"2008_American_League_Division_Series","2016-12-01",376 -"Antilipaemic","2017-09-01",12 -"Aberzombie","2016-12-01",28 -"2008_Asian_Wrestling_Championships","2016-12-01",76 -"Federal_Correctional_Complex,_Pollock","2017-01-01",19 -"Central_body","2015-07-01",32 -"Binbrook,_Ontario","2015-07-01",446 -"Azerbaijan_at_the_2016_Judo_Grand_Prix_Samsun","2016-10-01",25 -"Ashford_Lake","2017-10-01",80 -"1942_Joint_Strike","2015-12-01",3 -"AFC_Youth_Championship_2012","2017-10-01",2 -"Akhira","2016-07-01",64 -"Arroniro_Arlieri","2016-10-01",1 -"Alesheim_Burgsalach","2015-05-01",2 -"2700_classic","2017-05-01",4 -"ARX-8_Laevatein","2015-06-01",14 -"1991_Newsweek_Champions_Cup_-_Singles","2017-06-01",3 -"Aphelandra_sinclairiana","2017-07-01",69 -"Asia_Kong","2015-10-01",2 -"2012_Internazionali_Tennis_Val_Gardena_Sudtirol","2016-02-01",1 -"24_Carat_Purple","2017-06-01",476 -"Acroliths","2017-12-01",9 -"Bundesautobahn_3","2016-04-01",264 -"ATC_code_S01AX21","2016-09-01",1 -"Allington,_Lincolnshire","2015-11-01",188 -"Acer_Aspire_One","2017-06-01",5169 -"ATC_code_L04AC","2015-06-01",1 -"1969_New_Year_Honours","2017-07-01",269 -"Antonio_Napolitano","2017-11-01",44 -"Amberfish","2017-10-01",11 -"1976_Cam_2_Motor_Oil_400","2018-03-01",45 -"April_25,_2017","2018-01-01",2 -"Akahori_Station","2016-06-01",11 -"Abducens_palsy","2016-05-01",28 -"Ancona_cathedral","2018-01-01",2 -"Ajou_Motor_College","2017-02-01",83 -"Brad_Skyes","2016-11-01",1 -"Alegro_PCS","2017-07-01",157 -"Franz_Dunshirn","2017-01-01",1 -"Arthur_Godfrey_Road","2016-11-01",3 -"Ab_Golman","2017-05-01",30 -"Art_in_early_modern_Scotland","2016-03-01",98 -"1968_World_Series","2016-02-01",1960 -"1828_in_the_UK","2017-08-01",3 -"Explorer-1_Prime_Unit_2","2016-11-01",11 -"2014_Desafio_Internacional_das_Estrelas","2017-12-01",31 -"Ambulyx_subocellata","2016-08-01",1 -"2008_Hamilton_Tiger-Cats_season","2015-11-01",153 -"Deuterogamist","2015-07-01",5 -"Art_Nouveau_furniture","2017-12-01",839 -"Allison,_Colorado","2015-10-01",85 -"2014_MLS_Re-Entry_Draft","2017-09-01",36 -"Amiot_353","2015-12-01",8 -"ACLU_of_Massachusetts","2015-11-01",106 -"Altable,_Spain","2016-10-01",1 -"Agnidra_scabiosa","2016-12-01",16 -"Dictyotremella_novoguineensis","2015-07-01",1 -"Compiler_Construction","2015-07-01",42 -"Aufheben","2016-11-01",1080 -"Avafauna","2017-06-01",17 -"Atheist_billboard","2017-01-01",19 -"2011_Indonesia_Super_League_All-Star_team","2015-11-01",15 -"BrahMos_II","2015-07-01",31 -"1707_in_art","2016-04-01",17 -"Aeromarine_Model_60","2016-06-01",34 -"Ayatollah-al-ozma","2015-06-01",12 -"Exanimus","2017-01-01",4 -"Anderby","2017-01-01",29 -"Ashgabat_indoor_tennis_arena","2017-07-01",27 -"1971_Rose_Bowl","2015-12-01",961 -"2004_HR56","2016-05-01",5 -"1886_in_South_Africa","2016-03-01",70 -"Bishop_of_Central_Newfoundland","2016-04-01",1 -"Alice_Rivlin","2016-09-01",1137 -"Arriba_en_la_Cordillera","2017-06-01",39 -"Adam_Lively","2016-06-01",77 -"Colasposoma_fairmairei_fairmairei","2017-06-01",5 -"Archie_Barton","2017-02-01",49 -"Aharon_wasserman","2016-01-01",7 -"Alabama_Educational_Television_Commission","2017-05-01",3 -"Advanced_Technology_Bomber","2016-02-01",67 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.csv b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.csv deleted file mode 100644 index 14a0e1ac080..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.csv +++ /dev/null @@ -1,1000 +0,0 @@ -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -"AAA_Americas_Trios_Championship","2015-10-01",104 -"1420_in_literature","2016-05-01",20 -"Adair,_Beegie","2017-08-01",2 -"1980_Rugby_League_State_of_Origin_match","2017-07-01",2 -"Column_of_Santa_Felicita,_Florence","2017-06-01",14 -"2007_Copa_America","2016-07-01",178 -"Car_dealerships_in_the_USA","2015-07-01",11 -"Dihydromyricetin_reductase","2015-07-01",1 -"ATCvet_code_QB05BB01","2017-04-01",1 -"City_CarShare","2017-01-01",125 -"Heidenrod","2017-01-01",10 -"Arthur_Henrique","2016-11-01",12 -"Alan_Ebnother","2015-11-01",66 -"2013_UConn_football_season","2017-05-01",2 -"2008_American_League_Division_Series","2016-12-01",376 -"Antilipaemic","2017-09-01",12 -"Aberzombie","2016-12-01",28 -"2008_Asian_Wrestling_Championships","2016-12-01",76 -"Federal_Correctional_Complex,_Pollock","2017-01-01",19 -"Central_body","2015-07-01",32 -"Binbrook,_Ontario","2015-07-01",446 -"Azerbaijan_at_the_2016_Judo_Grand_Prix_Samsun","2016-10-01",25 -"Ashford_Lake","2017-10-01",80 -"1942_Joint_Strike","2015-12-01",3 -"AFC_Youth_Championship_2012","2017-10-01",2 -"Akhira","2016-07-01",64 -"Arroniro_Arlieri","2016-10-01",1 -"Alesheim_Burgsalach","2015-05-01",2 -"2700_classic","2017-05-01",4 -"ARX-8_Laevatein","2015-06-01",14 -"1991_Newsweek_Champions_Cup_-_Singles","2017-06-01",3 -"Aphelandra_sinclairiana","2017-07-01",69 -"Asia_Kong","2015-10-01",2 -"2012_Internazionali_Tennis_Val_Gardena_Sudtirol","2016-02-01",1 -"24_Carat_Purple","2017-06-01",476 -"Acroliths","2017-12-01",9 -"Bundesautobahn_3","2016-04-01",264 -"ATC_code_S01AX21","2016-09-01",1 -"Allington,_Lincolnshire","2015-11-01",188 -"Acer_Aspire_One","2017-06-01",5169 -"ATC_code_L04AC","2015-06-01",1 -"1969_New_Year_Honours","2017-07-01",269 -"Antonio_Napolitano","2017-11-01",44 -"Amberfish","2017-10-01",11 -"1976_Cam_2_Motor_Oil_400","2018-03-01",45 -"April_25,_2017","2018-01-01",2 -"Akahori_Station","2016-06-01",11 -"Abducens_palsy","2016-05-01",28 -"Ancona_cathedral","2018-01-01",2 -"Ajou_Motor_College","2017-02-01",83 -"Brad_Skyes","2016-11-01",1 -"Alegro_PCS","2017-07-01",157 -"Franz_Dunshirn","2017-01-01",1 -"Arthur_Godfrey_Road","2016-11-01",3 -"Ab_Golman","2017-05-01",30 -"Art_in_early_modern_Scotland","2016-03-01",98 -"1968_World_Series","2016-02-01",1960 -"1828_in_the_UK","2017-08-01",3 -"Explorer-1_Prime_Unit_2","2016-11-01",11 -"2014_Desafio_Internacional_das_Estrelas","2017-12-01",31 -"Ambulyx_subocellata","2016-08-01",1 -"2008_Hamilton_Tiger-Cats_season","2015-11-01",153 -"Deuterogamist","2015-07-01",5 -"Art_Nouveau_furniture","2017-12-01",839 -"Allison,_Colorado","2015-10-01",85 -"2014_MLS_Re-Entry_Draft","2017-09-01",36 -"Amiot_353","2015-12-01",8 -"ACLU_of_Massachusetts","2015-11-01",106 -"Altable,_Spain","2016-10-01",1 -"Agnidra_scabiosa","2016-12-01",16 -"Dictyotremella_novoguineensis","2015-07-01",1 -"Compiler_Construction","2015-07-01",42 -"Aufheben","2016-11-01",1080 -"Avafauna","2017-06-01",17 -"Atheist_billboard","2017-01-01",19 -"2011_Indonesia_Super_League_All-Star_team","2015-11-01",15 -"BrahMos_II","2015-07-01",31 -"1707_in_art","2016-04-01",17 -"Aeromarine_Model_60","2016-06-01",34 -"Ayatollah-al-ozma","2015-06-01",12 -"Exanimus","2017-01-01",4 -"Anderby","2017-01-01",29 -"Ashgabat_indoor_tennis_arena","2017-07-01",27 -"1971_Rose_Bowl","2015-12-01",961 -"2004_HR56","2016-05-01",5 -"1886_in_South_Africa","2016-03-01",70 -"Bishop_of_Central_Newfoundland","2016-04-01",1 -"Alice_Rivlin","2016-09-01",1137 -"Arriba_en_la_Cordillera","2017-06-01",39 -"Adam_Lively","2016-06-01",77 -"Colasposoma_fairmairei_fairmairei","2017-06-01",5 -"Archie_Barton","2017-02-01",49 -"Aharon_wasserman","2016-01-01",7 -"Alabama_Educational_Television_Commission","2017-05-01",3 -"Advanced_Technology_Bomber","2016-02-01",67 -"1-krona","2017-01-01",4 -"Ahmadabad-e_Kalij-e_Sofla","2017-01-01",3 -"Bob_Dolman","2016-11-01",245 -"Bellevue,_French_Guiana","2017-01-01",5 -"Bison_Nickel","2017-01-01",2 -"Arthur_Drabble","2016-12-01",35 -"Edgewater_Borough,_New_Jersey","2016-11-01",3 -"Alberto_Cambrosio","2017-11-01",31 -"Amalia_Celia_Figueredo","2017-07-01",32 -"1989_-_1992_Rugby_League_World_Cup","2016-01-01",10 -"Admir_Seferagic","2016-06-01",7 -"Adriaan_Loosjes","2015-05-01",46 -"Alfred_Manuel_Martin","2015-06-01",3 -"Academy_of_the_Arabic_Language","2015-08-01",67 -"Ankita_Shrivastav","2018-01-01",7430 -"Anarchism_in_asia","2017-11-01",1 -"Batiquitos_Lagoon_State_Marine_Conservation_Area","2015-07-01",18 -"Alstonia_calophylla","2017-12-01",2 -"4-Hydroxycyclohexanecarboxylate_dehydrogenase","2016-11-01",4 -"832_symmetry","2017-09-01",6 -"1931_Fuyun_earthquake","2016-07-01",64 -"1998_Masters_of_Formula_3","2016-01-01",60 -"2011_LG_Hockey_Games","2016-04-01",7 -"Generalized_pustular_psoriasis","2017-01-01",159 -"2013_European_Cup_Winter_Throwing","2016-07-01",56 -"2008_in_Argentina","2017-06-01",48 -"Apostrophized","2017-10-01",5 -"Algebraically_compact_module","2017-01-01",5 -"Askett","2015-10-01",79 -"2009_swine_flu_outbreak_timeline","2015-08-01",65 -"72704-01-9","2017-12-01",4 -"Alexandre-Charles-Albert-Joseph_Renard","2017-11-01",4 -"Acyl-CoA_oxidase","2016-09-01",250 -"2011_Budweiser_Shootout","2015-08-01",109 -"Augusta_Davies_Webster","2015-07-01",2 -"Association_theory","2017-07-01",112 -"Abemama_Airfield","2015-05-01",8 -"Archaeological_Museum_of_Heraklion","2015-10-01",14 -"Authorized_marches_of_the_Canadian_Armed_Forces","2016-11-01",241 -"1986_in_Portugal","2017-01-01",7 -"Antiziganism_in_Bulgaria","2017-12-01",13 -"Adriana_Martin","2015-09-01",21 -"2004_Green_Bay_Packers_season","2015-05-01",970 -"Agrippa_the_Sceptic","2017-11-01",95 -"Admiral_Island","2016-04-01",1 -"Auxiliary_sign_language","2015-06-01",31 -"2013_Food_City_500","2015-06-01",90 -"Andy_Roesch","2015-08-01",15 -"Alsoszentivan","2017-05-01",4 -"Architecture_of_Belgium","2015-05-01",199 -"1_South_African_Infantry","2017-06-01",5 -"1930_Auburn_Tigers_football_team","2016-12-01",39 -"1860_in_Canada","2017-05-01",269 -"Aldeaseca_de_la_Frontera","2018-03-01",21 -"Elijah_Fox_Cook","2015-07-01",13 -"2010_BCS_Bowl_Games","2016-03-01",1 -"2017_NPSL_season","2017-06-01",2806 -"Bank_of_New_South_Wales_v_Commonwealth","2016-12-01",173 -"American_Enterprise_Association","2016-02-01",4 -"26th_Kentucky_Derby","2018-03-01",1 -"Chaldean_Diocese_of_Amid","2016-11-01",18 -"Ajaran_language","2016-03-01",1 -"1992_Texas_Rangers_season","2017-06-01",113 -"26_SAS","2017-12-01",3 -"2015_Terengganu_FA_season","2016-01-01",537 -"Aagard,_Oregon","2017-03-01",3 -"Auberry,_CA","2017-05-01",13 -"American_Eskimo_spitz","2015-09-01",3 -"Antidiabetic","2016-11-01",75 -"Asinius","2017-11-01",26 -"Andrey_Vasilievich_Abramov","2016-10-01",1 -"Alan_Carrington","2018-03-01",91 -"Colebrook,_Ontario","2017-06-01",2 -"Abbasabad-e_Kheyrabad","2015-08-01",24 -"Arandjelovac_Municipality","2016-02-01",1 -"Aloysius_Valente","2017-12-01",11 -"Almondo_Curry","2016-03-01",86 -"4th_century_AD","2017-03-01",13 -"Askhat_Dilmukhamedov","2016-02-01",77 -"1147_AD","2017-05-01",1 -"1953_Torneo_di_Viareggio","2017-03-01",20 -"ATP_Schenectady","2015-12-01",30 -"Lakarian_City","2017-01-01",3 -"Adam_Ferency","2017-12-01",176 -"AugustDvorak","2016-07-01",5 -"97th_Light_Infantry_Division","2017-07-01",1 -"16th_Connecticut_Infantry_Regiment","2016-05-01",146 -"2011_Somalian_drought","2017-05-01",2 -"Anbargah","2017-12-01",8 -"1921_in_Paraguayan_football","2016-03-01",2 -"Cosmetic_dermatitis","2017-01-01",5 -"Annunciation_Greek_Orthodox_Cathedral,_Atlanta,_Georgia","2015-09-01",9 -"1300_AM","2016-07-01",106 -"A_Promising_Africa","2016-03-01",41 -"2015-16_Odense_Bulldogs_season","2016-10-01",1 -"Aral_AG","2017-12-01",1446 -"Angel_Vivar_Dorado","2015-12-01",6 -"1951_Australian_Championships","2018-03-01",32 -"DJMax_Portable_Hot_Tunes","2017-01-01",27 -"Allinge","2017-03-01",32 -"1986_Buick_WCT_Finals","2016-11-01",14 -"Arimatsu,_Aichi","2015-06-01",112 -"Arthur_Berzinsh","2017-02-01",249 -"Apolima_Uta","2017-04-01",23 -"Capitol_Hill_Pride_Festival","2015-07-01",19 -"Kara-Murza","2017-01-01",5 -"Aigleville,_Alabama","2015-11-01",19 -"Abdullah_bin_al-Hussein","2017-02-01",1 -"2017-18_Inter_Milan_season","2018-03-01",26 -"African_Collared_Dove","2016-07-01",10 -"Achaea_dmoe","2016-11-01",3 -"Aurora,_Utah","2016-06-01",201 -"Architecture_in_Portland,_OR","2017-07-01",1 -"Charchala","2015-07-01",4 -"Around_the_Roses","2015-07-01",3 -"1965_in_music","2016-12-01",3394 -"Alojzije_Jankovic","2017-04-01",5 -"Arisu_Seno","2015-08-01",6 -"ALCO_T-6","2017-01-01",77 -"1998_Royal_Bank_Cup","2015-12-01",32 -"1956_Claxton_Shield","2016-11-01",9 -"Anita_Dube","2017-07-01",233 -"Anderson_Windows","2015-05-01",13 -"Annaquatucket_River","2018-03-01",38 -"Black_salve","2017-01-01",1496 -"Anna_Pendleton_Schenck","2017-02-01",11 -"Asghar_Nadeem_Syed","2017-07-01",146 -"Disarming","2016-11-01",5 -"Antarctic_ice_cap","2017-08-01",7 -"Antonio_Ottone","2017-05-01",11 -"Coralie_Larnack","2017-01-01",9 -"Budha_Subba_Gold_Cup","2016-11-01",24 -"Amphoe_Chaiya","2017-03-01",9 -"Anarcho-capitalism_in_Somalia","2016-10-01",7 -"Felix_Loch","2017-01-01",131 -"26508_Jimmylin","2017-12-01",3 -"Andrew_McMillen","2015-11-01",134 -"Dundee_Canal_Industrial_Historic_District","2017-01-01",2 -"Aula_Baratto","2015-12-01",140 -"Church_of_St_Mary,_Knowsley","2015-07-01",1 -"Aggelakis","2017-10-01",1 -"Al_Badiyah","2017-11-01",157 -"Assault_Gunboat","2016-03-01",21 -"Lachau","2017-01-01",4 -"2008_Pittsburgh_Steelers_season","2016-12-01",10018 -"Apolychrosis_candidus","2018-01-01",24 -"Andrei_Krylov","2017-02-01",192 -"Aldesh_Vadher","2018-02-01",7 -"Alwand","2017-02-01",7 -"Edward_Baker_Lincoln","2015-07-01",4347 -"Aermotor_Corporation","2017-11-01",4 -"Aischylos","2017-01-01",7 -"6th_Assault_Aviation_Corps","2017-07-01",100 -"Azygos_lobe","2016-10-01",1598 -"Demirciler,_Nazilli","2015-07-01",4 -"Akhlaq-e-Hindi","2016-11-01",13 -"Dragon_Crusaders","2016-04-01",122 -"25V_USB","2016-01-01",1 -"Calliophis_melanurus","2017-01-01",31 -"Antonionian","2016-10-01",15 -"Ashley_Richardson","2017-09-01",1216 -"1st_Observation_Group","2018-01-01",6 -"Andrzej_Bargiel","2015-05-01",97 -"2008_AFL_National_Under_16_Championships","2018-03-01",20 -"Ammon_Bundy","2016-09-01",11890 -"Benno_Wandolleck","2016-11-01",5 -"Aero-Kros_MP-02_Czajka","2016-03-01",136 -"A6005_road","2015-10-01",14 -"Eagle_Eye_Networks","2015-07-01",101 -"Aarberg","2017-12-01",277 -"Encyclopedia_of_anthropology","2015-07-01",1 -"Duncormick_railway_station","2016-11-01",7 -"Aiqing_huajiao_zhuanyi","2017-03-01",1 -"Crude_oil_washing","2016-04-01",466 -"2010_Indiana_Hoosiers_football_team","2017-06-01",90 -"Book_of_Bodley_Head_Verse","2015-07-01",18 -"Absence_seizure","2016-05-01",18152 -"Cayucupil","2016-04-01",3 -"Akanabee","2017-03-01",1 -"Grooved_consonant","2017-01-01",5 -"Dellamora_philippinensis","2015-07-01",7 -"Dejan_Blazevski","2017-01-01",1 -"Arabis_armena","2016-08-01",25 -"1988_Summer_Paralympics_medal_table","2016-12-01",90 -"2012-13_Basketball_Championship_of_Bosnia_and_Herzegovina","2017-04-01",2 -"1966_in_music","2017-10-01",3510 -"Antti_Tyrvainen","2015-12-01",2 -"African_desert","2016-06-01",262 -"Bruneau_mariposa_lily","2016-04-01",1 -"Bernie_Parmalee","2017-06-01",221 -"2015_South_American_Youth_Football_Championship_squads","2015-09-01",594 -"1985_IIHF_World_U20_Championship","2015-08-01",7 -"18th_British_Academy_Film_Awards","2018-02-01",270 -"523_Ada","2016-04-01",35 -"Active_Pharmaceutical_Ingredients","2016-02-01",5 -"Burley,_ID_mSA","2015-07-01",2 -"CFRN-TV-10","2017-06-01",2 -"1982_Super_Bowl_of_Poker","2017-08-01",38 -"Australian_Journal_of_Educational_Technology","2017-01-01",1 -"2013_Super_League_Grand_Final","2016-06-01",212 -"2006_BCR_Open_Romania","2015-06-01",25 -"Charlestown_Townies","2016-04-01",319 -"1943_Polish_underground_raid_on_East_Prussia","2017-08-01",8 -"Anthony_Celestino","2018-02-01",182 -"Andrew_Beerwinkel","2018-02-01",73 -"Greigia_atrobrunnea","2017-01-01",1 -"Adrian_Beecham","2017-11-01",1 -"Implementation_of_mathematics_in_set_theory","2017-01-01",12 -"Annastacia_Palaszczuk","2015-05-01",6247 -"Egon_Zimmermann_II","2016-11-01",3 -"Air_aide-de-camp","2018-03-01",137 -"Albert_Murphy","2016-09-01",1 -"1924_Arkansas_Razorbacks_football_team","2016-02-01",28 -"Avondale_Mill","2016-10-01",68 -"Alexander_Volzhin","2015-12-01",25 -"Arek_Monthly","2017-08-01",31 -"Dinka_Blanche","2015-07-01",1 -"1921_Mercer_Baptists_football_team","2016-11-01",10 -"Afro-Antiguan_and_Barbudan","2016-06-01",252 -"American_southern_literature","2016-10-01",3 -"1947_Swiss_Grand_Prix","2016-11-01",32 -"99p_Stores","2017-12-01",3028 -"Artem_Radkov","2018-03-01",21 -"Arctic_brome","2016-12-01",19 -"Battle_Of_Moskova","2015-06-01",6 -"Airdrieonians","2016-06-01",32 -"Advanced_transportation_controller","2018-03-01",79 -"BC_government","2016-12-01",18 -"Antonio_Maura","2017-03-01",457 -"Anjuman,_Afghanistan","2017-09-01",62 -"Deodato_Guinaccia","2015-07-01",13 -"Blowjob_Betty","2016-11-01",28 -"453d_Flying_Training_Squadron","2017-08-01",3 -"1990_Africa_Cup_of_Nations","2016-04-01",22 -"Agenville","2016-08-01",100 -"1202_in_Scotland","2018-01-01",82 -"Calytrix_desolata","2017-06-01",10 -"1957_in_Chile","2016-04-01",13 -"Anglican_Bishop_of_Torres_Strait_people","2017-08-01",1 -"2015_Mexican_Grand_Prix","2015-06-01",528 -"Catalan_parliament","2017-01-01",14 -"Cult_Shaker","2017-01-01",32 -"Ander_Gayoso","2016-11-01",34 -"Ageneiosus_ucayalensis","2017-12-01",20 -"Club_de_Berne","2015-07-01",194 -"Adecco","2016-03-01",9863 -"Anti-unionism","2018-01-01",11 -"Auchindoun_Castle","2017-01-01",102 -"557_in_poetry","2016-07-01",1 -"Abu_ol_Verdi_Rural_District","2017-01-01",1 -"Centro_73","2016-04-01",23 -"Dagger_compact_category","2016-04-01",97 -"Alan_Nunn_May","2017-11-01",770 -"Basal_clade","2015-07-01",44 -"Aizu_Line","2015-08-01",26 -"Edward_Kernan_Campbell","2016-04-01",5 -"865_area_code","2016-12-01",9 -"Bahamas_at_the_1984_Summer_Olympics","2017-06-01",35 -"Gardan_Kalat","2017-01-01",1 -"American_Samoa_national_under-19_football_team","2017-12-01",4 -"Kayah_National_United_League","2017-01-01",14 -"2007_Nordea_Nordic_Light_Open_-_Singles","2016-10-01",2 -"Avondale_Estate","2016-11-01",2 -"Acalolepta_variolaris","2017-02-01",3 -"Anantapur,_Andhra_Pradesh","2017-05-01",1032 -"Amenable_Banach_algebra","2015-08-01",59 -"300_metres","2017-01-01",61 -"Black_Bottom,_Kentucky","2016-04-01",8 -"100_Players_Who_Shook_The_Kop","2018-01-01",1133 -"Adventure_story","2015-07-01",29 -"Anacampsis_lignaria","2017-05-01",5 -"2007_American_Indoor_Football_Association_season","2015-09-01",89 -"Dmitry_Kardovsky","2016-04-01",33 -"A10_autoroute","2015-11-01",27 -"1995_Sydney_Bulldogs_season","2017-04-01",40 -"Ilex_jelskii","2017-01-01",2 -"Adrian_Jose_Hernandez","2016-10-01",2 -"CallAir_A-5","2016-11-01",4 -"22nd_meridian_west","2015-07-01",45 -"Anglican_Diocese_of_Antananarivo","2015-08-01",2 -"Andrew_Kelsey","2016-11-01",14 -"Brownhill_Creek","2017-06-01",4 -"Abunai_Deka","2015-06-01",269 -"Aisha_Jefferson","2017-04-01",115 -"Alonso_Lopez","2017-03-01",7 -"Aeroparque_Ciudad_de_Mendoza","2016-01-01",1 -"Arthur_Ashley_Sykes","2017-12-01",45 -"Holy_Face_Medal","2017-01-01",20 -"1Chronicles","2018-02-01",1 -"2014_CFU_Club_Championship","2017-12-01",108 -"Aetna_class_ironclad_floating_battery","2015-06-01",37 -"Antoine_Delrio","2015-07-01",2 -"Chislet_Windmill","2015-07-01",38 -"Aerojet_SD-2","2017-07-01",59 -"Age_role_play","2015-09-01",2 -"50687_Paultemple","2018-03-01",8 -"1997-98_Cuban_National_Series","2017-02-01",1 -"Aleksandr_Borisovich_Belyavskiy","2017-10-01",42 -"Carol_MacReady","2017-01-01",111 -"18th_Chess_Olympiad","2015-06-01",134 -"Clara_Schonfeld","2015-07-01",1 -"Apollonius_of_Athens","2017-02-01",35 -"ABC_80","2018-03-01",603 -"Apatelodes_damora","2015-08-01",22 -"Ernest_Walbourn","2016-04-01",30 -"428_BCE","2017-04-01",2 -"72nd_Seaforth_Highlanders","2017-12-01",29 -"Broughton_Hackett","2015-07-01",38 -"A_Fazenda_2","2016-12-01",56 -"ATCvet_code_QJ01MQ","2017-05-01",2 -"Abura,_Iran","2017-03-01",3 -"DeLeon_Independent_School_District","2015-07-01",1 -"Abby_aldrich","2016-09-01",1 -"Cinema_One_Originals","2016-11-01",359 -"2013_European_Short_Course_Swimming_Championships","2017-09-01",124 -"Ars_technica","2015-11-01",442 -"AMS_Production_Company","2016-02-01",1 -"Joao_Soares","2017-01-01",1 -"Cervical_vertebra_6","2017-06-01",45 -"Kevin_Pugh","2017-01-01",2 -"Alpha-1_antitrypsin","2015-11-01",11845 -"Assyrians_in_iran","2017-07-01",53 -"Boophis_ankarafensis","2016-11-01",2 -"A_View_To_a_Kill","2018-01-01",4 -"Charles_Edouard_Brown-Sequard","2015-07-01",7 -"1919_in_Ireland","2017-04-01",239 -"74th_Foot","2015-06-01",3 -"9275_Persson","2016-07-01",22 -"Dalcerides_mesoa","2015-07-01",11 -"A_Summer_Bird-Cage","2016-03-01",248 -"2011_NAB_Cup","2017-10-01",127 -"13th_Parliament_of_Lower_Canada","2015-08-01",41 -"2011_Players_Championship_Finals","2015-07-01",25 -"Flag_of_Tenerife","2017-01-01",128 -"Hypopta_corrientina","2017-01-01",1 -"Jalatarangam","2017-01-01",16 -"Adjoint_endomorphism","2018-01-01",330 -"Anime_conventions","2015-06-01",18 -"2004_Grammy_Award","2015-06-01",13 -"American_war","2015-07-01",80 -"Beynes,_Yvelines","2016-11-01",32 -"Agriculture_Department","2016-06-01",16 -"Andrey_Chisty","2015-10-01",58 -"Ait_Yahia_Moussa","2017-08-01",7 -"Alfred_Blau","2017-03-01",57 -"1869_in_sports","2017-08-01",73 -"Ambolodia_Sud","2016-04-01",6 -"Animal_slaughter","2017-06-01",6423 -"Adamowka_Commune","2018-01-01",2 -"Arsenic_pentachloride","2016-03-01",467 -"220_BCE","2016-01-01",3 -"863d_Engineer_Battalion","2015-11-01",160 -"Amer_Abu-Hudaib","2017-04-01",31 -"Aaina_tv","2017-08-01",3 -"Arnhem,_Netherlands","2015-08-01",67 -"Antoine_de_sartine","2015-08-01",4 -"ATC_code_A16","2016-01-01",155 -"Eastern_Front","2017-01-01",70 -"Ashy-headed_tyrannulet","2016-12-01",44 -"Aoheng_language","2015-08-01",64 -"1996_World_Junior_Canoe_Slalom_Championships","2017-11-01",15 -"Agriophara_nodigera","2017-11-01",12 -"Amsterdam_Island_cattle","2015-12-01",675 -"Aliyah_from_the_Soviet_Union_in_the_1990s","2017-08-01",54 -"Abandoned_and_Little_Known_Airfields","2018-01-01",2 -"Church_numerals","2015-07-01",57 -"Ankeny_Christian_Academy","2015-09-01",74 -"2010_FIFA_World_Cup_qualification_-_AFC_First_Round","2017-06-01",58 -"1ESS_switch","2015-07-01",514 -"Chelys_boulengerii","2016-04-01",1 -"Bivalent_logic","2016-11-01",25 -"Ivan_Skavinsky_Skavar","2017-01-01",1 -"Fergus_Sings_the_Blues","2016-04-01",62 -"2015-16_Libyan_Premier_League","2017-02-01",4 -"Dutch_Chess_Championship","2017-01-01",35 -"Every_Man_in_His_Humor","2016-11-01",1 -"2008_Allstate_BCS_National_Championship_Game","2015-08-01",11 -"Aq_Tappeh,_Hamadan","2015-09-01",25 -"Agrotractor","2016-02-01",1 -"Alexander_of_Pfalz-Zweibrucken","2017-12-01",2 -"2003_Mistral_World_Championships","2016-04-01",6 -"146th_Fighter-Interceptor_Wing","2015-11-01",49 -"Al-Qahir","2016-04-01",328 -"25604_Karlin","2015-05-01",20 -"Allen_taflove","2017-12-01",3 -"Aretha_Thurmond","2017-05-01",109 -"Atlanta_and_lagrange_rail_road","2015-07-01",1 -"ACSI_College_Iloilo","2015-10-01",1 -"Alan_Sacks","2015-07-01",150 -"African_Desert_Warbler","2017-02-01",11 -"A_Man_and_His_Soul","2018-02-01",89 -"ASCII_ART","2015-05-01",9 -"1992-93_VMI_Keydets_basketball_team","2016-10-01",1 -"George_and_the_Dragon","2017-01-01",18 -"2012_NAB_Cup","2016-12-01",99 -"1965_Indy_500","2016-05-01",51 -"Forest_Glen,_Nova_Scotia","2016-04-01",9 -"A_Critical_Dictionary_of_English_Literature","2016-08-01",4 -"Aquion_Energy","2015-08-01",1077 -"Alibeyce,_Emirdag","2017-09-01",1 -"Blauhu00F6hle","2015-07-01",1 -"Ian_Sommerville","2017-01-01",1 -"Air_propulsion","2017-07-01",474 -"2016_12_Hours_of_Sebring","2016-10-01",187 -"Asites","2017-07-01",4 -"Al-Kini","2017-03-01",1 -"Austin_Aztex_2009_season","2016-03-01",10 -"Alto_Vista_Chapel","2015-12-01",833 -"Abecedaria","2017-04-01",22 -"Farm_to_Market_Road_2503","2016-11-01",3 -"Anglican_Bishop_of_The_Leeward_Islands","2015-09-01",2 -"Basketball_at_the_2011_Pan_American_Games","2017-06-01",120 -"Angela_Peel","2016-08-01",7 -"Amber_Frey","2018-02-01",728 -"Afraid_to_Sleep","2017-06-01",51 -"ATC_code_A02BA","2018-02-01",7 -"Apateon_pedestris","2015-11-01",5 -"Alois_Estermann","2015-12-01",1155 -"1752_in_science","2016-01-01",78 -"Baldassin","2017-06-01",3 -"Camilla_Hildegarde_Wedgwood","2017-01-01",1 -"B-A-C-H_motive","2016-10-01",3 -"AI_Velorum_star","2016-09-01",1 -"Ali_Zayn_al-Abidin","2017-04-01",71 -"Ailurarctos_lufengensis","2015-07-01",1 -"Clearview,_Philadelphia","2017-06-01",67 -"Adam_Sender","2016-08-01",759 -"Apriona_paucigranula","2018-02-01",7 -"Dark_at_the_Top_of_the_Stairs","2015-07-01",10 -"Acanthio","2017-12-01",11 -"1980_Labatt_Brier","2018-01-01",111 -"2016-17_New_York_Knicks_season","2017-10-01",21 -"1995_CAF_Cup","2015-10-01",48 -"Boiled_linseed_oil","2016-04-01",79 -"2015_Kumanovo_clashes","2016-07-01",6 -"David_Jamieson","2017-01-01",3 -"1915_Florida_Gators_football_team","2015-08-01",32 -"2010-11_New_Zealand_Football_Championship","2017-03-01",1 -"Ashley_Church","2015-08-01",27 -"Acanthoxylini","2017-06-01",27 -"American_Hindu","2016-10-01",33 -"Amylosporomyces","2015-12-01",20 -"2007_Southeast_Asia_Basketball_Association_Championship","2018-01-01",1 -"Aethelred_I","2017-08-01",1 -"2-methyl-GPP_synthase","2018-02-01",1 -"Dave_Aspin","2016-11-01",6 -"Descent_of_the_Nine","2016-04-01",1 -"2010_Kleen_Energy_Systems_disaster","2017-08-01",3 -"1978_in_Japanese_television","2017-08-01",70 -"Alexandros_Falekas","2018-01-01",1 -"1910_in_Afghanistan","2016-02-01",32 -"Abd-ru-shin","2017-09-01",681 -"610_in_poetry","2017-05-01",3 -"2015_arrests_of_FIFA_officials","2017-12-01",46 -"ATmega328P","2017-09-01",26 -"A_G_Mathews","2017-12-01",3 -"Attack_on_Mers-el-Kebir","2016-12-01",511 -"2016_in_Estonia","2016-05-01",89 -"Adidas-Salomon","2015-09-01",574 -"Education_and_Skills_Act_2008","2016-11-01",141 -"1789_in_the_United_States","2015-07-01",845 -"Apple_Computer_advertising","2015-09-01",7 -"9th_US_Army","2016-12-01",17 -"Ad_Rotas","2016-02-01",16 -"Agios_Ioannis,_Paphos","2018-03-01",97 -"Arabian_toad","2017-12-01",100 -"Anterior_pituitary_acidophil","2016-06-01",47 -"Arguello,_Christine","2017-12-01",3 -"Amilkar_Ariza","2017-03-01",67 -"Charles_Grierson","2016-11-01",14 -"Achi,_Bolivar","2017-11-01",1 -"Exonym_and_endonym","2017-01-01",1712 -"Abdul_Maroof_Gullestani","2017-12-01",20 -"Fairlawne_Handicap_Chase","2016-04-01",11 -"1963_Virginia_Tech_Hokies_football_team","2016-07-01",6 -"AE_Clarke","2017-12-01",3 -"ALFA-PROJ_Model_3563_sport","2017-10-01",2 -"Aleks_Vanderpool-Wallace","2018-02-01",32 -"Antioxident","2017-05-01",16 -"Calliope_Project","2015-07-01",3 -"Anderson_World","2017-10-01",5 -"Amydria_selvae","2017-11-01",6 -"Antoni_Katski","2016-09-01",1 -"Bera_District","2017-06-01",85 -"80_South_Street_New_Design","2016-07-01",86 -"Askizsky","2015-08-01",2 -"Amausi_metro_station","2015-11-01",44 -"9486_Utemorrah","2017-04-01",5 -"Army_CIS","2018-01-01",2 -"1851_Chilean_Revolution","2017-06-01",255 -"Jens_Robert_Dahlqvist","2017-01-01",6 -"1966-67_Tercera_Division","2017-05-01",1 -"Chanel_Iman","2017-06-01",9434 -"Astydamia","2017-06-01",34 -"1944_in_Belgium","2016-09-01",27 -"Acton_Baronets,_of_Aldenham","2017-01-01",1 -"2014_FBS_season","2016-12-01",5 -"2016_Winter_Youth_Olympics","2017-09-01",2090 -"1903_Clemson_Tigers_football_team","2017-06-01",50 -"2014_Taca_da_Liga_Final","2017-04-01",2 -"10th_Alberta_general_election","2016-11-01",4 -"Edertalschule_Frankenberg","2016-04-01",16 -"4th_Punjab_Infantry_Regiment","2017-09-01",136 -"America_Air_Linhas_Aereas","2018-02-01",1 -"Australian_Liberal_Party","2015-06-01",146 -"American_licorice","2017-05-01",15 -"2013_NASCAR_Cup_Series","2015-10-01",49 -"Anja_Lundqvist","2016-03-01",93 -"Amauris_dannfelti","2016-01-01",12 -"Abandoned_shipwrecks_act","2015-06-01",3 -"11086_Nagatayuji","2017-02-01",3 -"Advertising_tissues","2017-06-01",1 -"Anti_corn-law_league","2016-10-01",1 -"Always_Guaranteed","2017-09-01",445 -"Alfredo_Palacio_Moreno","2018-01-01",48 -"Antonio_Puche_Vicente","2015-06-01",1 -"Elazig_Province","2017-01-01",1 -"ATC_code_C02AC01","2017-05-01",1 -"Alexander_Mattock_Thompson","2016-08-01",2 -"Cocos_Islands_Malay","2017-06-01",63 -"Aftonbladet_antisemitism_controversy","2016-10-01",1 -"Azad_Kashmir,_Pakistan","2015-07-01",14 -"1852_English_cricket_season","2016-10-01",24 -"Birmingham_Pride","2015-07-01",129 -"Air-pollution_controls","2015-08-01",4 -"James_Southerton","2017-01-01",20 -"Architecture_of_Chiswick_House","2015-06-01",240 -"Alexander,_Colin","2015-12-01",1 -"Al-Mansooreh","2016-10-01",1 -"Arielle_Gastineau_Ashton","2017-12-01",18 -"Blue_Ben","2017-06-01",240 -"1911_Michigan_State_Normal_Normalites_football_season","2017-11-01",1 -"Arctictis_binturong","2017-04-01",334 -"Fornaldarsaga","2016-04-01",18 -"Bibasis_gomata","2017-06-01",35 -"Anna_Schchian","2017-06-01",19 -"2005_in_Rwanda","2016-08-01",69 -"Archaeology_in_ethiopia","2016-01-01",1 -"23277_Benhughes","2016-12-01",2 -"Bahrain_-_USA_relations","2017-06-01",1 -"Dieter_Korn","2015-07-01",13 -"Antidynamo_theorem","2016-10-01",222 -"An_Jae-Won","2016-12-01",1 -"Bruray","2015-07-01",82 -"Gosport_Council_election,_2004","2017-01-01",2 -"1856_in_South_Africa","2017-03-01",60 -"Dialakoro,_Guinea","2017-01-01",1 -"05-CV-1678","2016-02-01",1 -"Allison,_Henry","2016-12-01",5 -"Animal_house","2016-06-01",1399 -"Alexander_Tabarrok","2017-03-01",5 -"Chung-Ho_Memorial_Hospital","2017-06-01",50 -"2013_Internazionali_Trofeo_Lame_Perrel-Faip_-_Doubles","2016-03-01",4 -"1965_Speedway_World_Team_Cup","2017-11-01",13 -"Alexander_Ollongren","2017-11-01",788 -"Amore_traditore,_BWV_203","2016-06-01",83 -"Arthur_William_Rogers","2015-10-01",31 -"Ashoka_pillar","2017-02-01",265 -"1_62_honeycomb","2018-02-01",10 -"1926_Australasian_Championships","2016-05-01",47 -"Export_award","2016-04-01",3 -"5000_Days_Project","2016-07-01",75 -"2012_UCI_Europe_Tour","2017-03-01",65 -"1985_Toronto_Indoor_-_Singles","2015-08-01",4 -"Cedar_Grove,_North_Carolina","2017-06-01",18 -"Battle_of_The_Afsluitdijk","2016-04-01",15 -"Arishtanemi","2017-03-01",7 -"Alfalfa_bill_murray","2016-12-01",7 -"Elisha_Jay_Edwards","2015-07-01",28 -"Arturas_Paulauskas","2016-01-01",10 -"Abdelrahman_Hamad","2015-09-01",2 -"1948_in_Northern_Ireland","2015-07-01",29 -"1988_in_philosophy","2015-05-01",70 -"5-Hydroxytryptaminen","2016-01-01",4 -"2017_FBS_season","2017-10-01",124 -"Areeiro","2016-04-01",2 -"Alemonides","2016-03-01",6 -"Abrochia_caurensis","2016-10-01",1 -"Anafylaxia","2018-01-01",2 -"1938_Grand_National","2018-02-01",80 -"China-Korea_Champions_League","2015-07-01",4 -"Acetyl_bromide","2017-11-01",448 -"24_hours_of_lemans","2015-05-01",37 -"Albright_hereditary_osteodystrophy","2017-02-01",153 -"Ashland_Bus_System","2015-08-01",115 -"1,8-Cineole_2-endo-monooxygenase","2016-10-01",8 -"2005-2006_NHL_Season","2015-11-01",6 -"Cammie_Dunaway","2015-07-01",344 -"D-Fish","2016-11-01",2 -"4_sister_vineyard","2015-09-01",1 -"Alessia_Cara_discography","2017-03-01",100 -"Alexander_Berg","2017-08-01",63 -"4822_Karge","2018-02-01",32 -"Emile_Francis_Trophy","2017-01-01",8 -"Amin_Ghaseminejad","2017-06-01",45 -"Artichia","2017-09-01",19 -"Cividale","2016-11-01",41 -"2007_Orissa_Violence","2016-05-01",1 -"Australian_Saltbush","2016-12-01",5 -"Asian_Food_Channel","2016-09-01",727 -"Camp_iawah","2015-07-01",1 -"ATC_code_J01MA04","2017-11-01",1 -"Arpad_Balazs","2017-10-01",2 -"Angel_of_Music,_or_The_Private_Life_of_Giselle","2018-02-01",56 -"1983_Torneo_di_Viareggio","2016-03-01",22 -"Arellano_University","2017-09-01",1699 -"ATC_code_B03AA","2017-11-01",1 -"FS5000","2016-11-01",1 -"Abd-Allah_ibn_Zubayr","2017-05-01",2 -"1889_SAFA_season","2016-04-01",28 -"Aloha_bowl_broadcasters","2015-05-01",2 -"1994_All_England_Open_Badminton_Championships","2016-07-01",75 -"Are_We_Not_Horses","2015-07-01",79 -"Angiolo_Torchi","2018-02-01",5 -"Chimanimani_National_Park","2017-06-01",37 -"Art_manifesto","2017-09-01",2619 -"Adrian_Apostol","2016-10-01",62 -"Adventure_book","2015-10-01",14 -"Albemarle_Bertie","2016-06-01",20 -"Adam_Deibert","2017-08-01",611 -"Alberta_association_of_architects","2017-10-01",2 -"Alloschmidia","2017-11-01",15 -"Administrative_department_of_security","2016-05-01",1 -"Archdeaconry_of_Dudley","2017-07-01",19 -"Ammayenna_Sthree","2015-12-01",38 -"Aaron_Spelling","2016-05-01",25128 -"Anatolian_hieroglyph","2016-07-01",308 -"Central_University_of_Rajasthan","2016-11-01",323 -"Annamanum_touzalini","2017-08-01",7 -"Acleris_hispidana","2016-11-01",2 -"Frisco_kid","2016-04-01",15 -"Allerheiligenberg_monastery","2017-12-01",2 -"Arctic_comb_jelly","2017-03-01",3 -"279377_Lechmankiewicz","2016-06-01",1 -"AEGON_Pro-Series_Loughborough","2018-02-01",7 -"Firefly_Space_Systems","2017-01-01",235 -"2000-01_Hong_Kong_League_Cup","2017-12-01",6 -"British_supermarkets","2017-01-01",2 -"A_description_of_New_England","2016-10-01",13 -"Artificial_Flavoring","2016-06-01",2 -"Anglican_bishop_of_the_Torres_people","2018-02-01",1 -"Antonio_Diaz_Cardoso","2018-02-01",1 -"Johan_Patriksson","2017-01-01",3 -"Ashutosh_Morya","2017-07-01",1 -"Iron_ore","2017-01-01",3682 -"AT-16_Scallion","2015-08-01",594 -"Data_analyst","2015-07-01",134 -"Cabbageball","2016-04-01",3 -"Acanthonyx_seriopuncta","2017-04-01",2 -"Aegeria_ruficauda","2017-10-01",1 -"Archibald_Douglas,_1st_Earl_of_Ormond","2016-06-01",100 -"2014_European_Championships_in_Athletics","2017-01-01",3 -"1Co-Co1","2017-08-01",77 -"Arthur_Abba_Goldberg","2015-10-01",2 -"Ameri-Cana_Ultralights","2015-05-01",33 -"1979_British_Formula_One_season","2015-12-01",218 -"American_colonial_history","2016-06-01",6 -"Arcadia_Martin_Wesay_Toe","2015-06-01",73 -"Adam_Ornstein","2017-08-01",2 -"Archive_of_Modern_Conflict","2016-12-01",307 -"Ciro_Urriola","2015-07-01",12 -"Acanthosyris","2015-12-01",53 -"Eriopyga_jamaicensis","2015-07-01",1 -"10th_parallel_north","2016-06-01",1412 -"Derek_Almond","2017-01-01",2 -"Jaimanglapur","2017-01-01",4 -"Aphroditeola_olida","2018-02-01",6 -"18th_dynasty_of_egypt","2017-06-01",2 -"Ali_ben_Ahmed","2016-08-01",62 -"Ashkur_Mahalleh","2018-02-01",8 -"Adolf_Mosengel","2017-02-01",54 -"1838_Safed_pogrom","2016-02-01",1 -"1829_in_architecture","2017-05-01",24 -"Arcones,_Segovia","2016-05-01",3 -"Albert_Smith_Medal","2018-02-01",30 -"Arqanqergen_mass_murder","2015-10-01",60 -"Jaan_Usin","2017-01-01",4 -"2009_Bangladesh_Rifles_revolt","2016-03-01",269 -"-coltore","2015-11-01",9 -"Ernest_Makins","2017-01-01",10 -"Amsterdam_Bijlmer_Arena","2016-07-01",87 -"Apostolic_assemblies_of_christ","2018-01-01",1 -"Abirabad,_Razavi_Khorasan","2015-08-01",26 -"2016_All-Ireland_Senior_Football_Championship","2015-10-01",883 -"Asylum_seeking","2016-06-01",36 -"56th_parallel","2015-07-01",12 -"Junior_roller_derby","2017-01-01",19 -"Ana_Goncalves","2016-03-01",2 -"Alekseevskiy_Raion","2017-11-01",1 -"2009_Vietnam_national_football_team_results","2017-07-01",15 -"Chicago,_Burlington_and_Quincy_Railroad_Depot","2017-01-01",2 -"Fox_Valley_Conference","2016-04-01",84 -"Brachioplasty","2017-06-01",304 -"Arnold_Doren","2017-06-01",11 -"All_Ireland_mandolin_Champion","2015-07-01",2 -"Deborah_Rennard","2016-04-01",814 -"Anthony_Macdonnell","2016-02-01",2 -"Azerbaijan_Pakistan_relations","2017-01-01",1 -"A_Girl_Named_Zippy","2018-03-01",346 -"Academic_OneFile","2018-02-01",109 -"East_Point_Academy","2017-01-01",48 -"2011_Italian_Figure_Skating_Championships","2017-03-01",47 -"Chen_Qiao_En","2016-04-01",52 -"Canobie_lake","2016-04-01",1 -"Andrei_Arlashin","2017-11-01",13 -"Again_Into_Eyes","2017-12-01",54 -"Andropogon_curtipendulus","2018-02-01",1 -"Abbath","2016-05-01",927 -"Alien_Opponent","2016-05-01",160 -"Art_of_Love","2016-02-01",3 -"Ariana_Huffington","2017-05-01",84 -"Amy_Poehler","2016-04-01",62732 -"Cherven,_Rousse_Province","2015-07-01",2 -"1_Month_2_Live","2018-03-01",306 -"Country_Day_School_of_the_Sacred_Heart","2017-06-01",132 -"Cooperative_institute_for_arctic_research","2015-07-01",2 -"Depression_symptoms","2017-01-01",7 -"Brent_Skoda","2016-04-01",31 -"American_Christians","2016-12-01",10 -"Counterbleed","2017-01-01",1 -"Abarka","2016-05-01",325 -"Aleksander_Povetkin","2017-02-01",89 -"Austin_TX","2016-03-01",119 -"Aleksandr_Tretyakov","2017-01-01",40 -"Connecticut_congressional_districts","2016-11-01",3 -"Alessio_de_Marchis","2015-10-01",66 -"Capel_Salem,_Pwllheli","2016-04-01",6 -"5-alpha_reductase_deficiency","2016-10-01",30 -"Annabelle_Croft","2016-01-01",32 -"Aeronca_Aircraft_Corporation","2017-05-01",9 -"1597_in_Scotland","2016-07-01",18 -"Alf_Somerfield","2017-11-01",10 -"Agapanthia_villosoviridescens","2018-02-01",53 -"Adam_Goldberg","2015-12-01",42338 -"1961_Paris_massacre","2017-01-01",52 -"2007_in_radio","2017-04-01",131 -"Arthur_French,_5th_Baron_de_Freyne","2015-12-01",44 -"AMD_Socket_G3","2017-04-01",121 -"Albert_geouffre_de_lapradelle","2016-02-01",1 -"Collaborations_between_ex-Beatles","2015-07-01",1279 -"Betty_Ireland","2016-04-01",40 -"Domingo_Tirado_Benedi","2015-07-01",1 -"Bac_Ly","2016-04-01",1 -"All_gas-phase_iodine_laser","2015-07-01",136 -"Andre_Salifou","2017-01-01",1 -"1,3-b-D-glucan","2017-05-01",2 -"Joseph_Johnston_Muir","2017-01-01",3 -"17th_of_Shahrivar_league","2016-05-01",63 -"2001_in_art","2018-03-01",131 -"Abiji_language","2017-10-01",6 -"Ahliah_school","2018-03-01",133 -"1605_in_India","2017-12-01",83 -"Dr_Jeom_Kee_Paik","2015-07-01",1 -"1954_Texas_Longhorns_football_team","2018-01-01",69 -"1985_Little_League_World_Series","2016-07-01",226 -"Eleanor_de_bohun","2015-07-01",1 -"Adrenaline_strength","2016-03-01",8 -"434_BC","2018-02-01",97 -"8x60mm_S","2015-06-01",61 -"2016-17_South_Pacific_cyclone_season","2017-09-01",101 -"Beth_Aala","2017-06-01",15 -"Al_Shaver","2017-07-01",138 -"Adelphoi_Zangaki","2018-01-01",89 -"Cyclopropyl_group","2016-11-01",167 -"216_Sqn","2017-08-01",11 -"20469_Dudleymoore","2017-05-01",5 -"Attila_Hildmann","2017-06-01",103 -"1970_Arkansas_Razorbacks_football_team","2016-11-01",66 -"Anthony_Fairfax","2017-08-01",24 -"Fort_Point,_Boston","2016-04-01",384 -"Epsilon_numbers","2016-04-01",3 -"2013_Recopa_Sudamericana","2016-05-01",202 -"Italo_Disco","2017-01-01",27 -"Andersen_Press","2015-09-01",228 -"Amasa_Walker","2017-09-01",146 -"2010_in_Israeli_film","2015-09-01",234 -"A-25_Shrike","2017-12-01",90 -"2009_Winnipeg_Blue_Bombers_season","2017-06-01",66 -"Ashland_County,_Ohio","2016-10-01",1298 -"Dusky_Turtle_Dove","2017-01-01",3 -"Antonov_148","2017-02-01",129 -"Abdul_Hamid_Lahori","2017-08-01",458 -"Amadeo_of_Spain","2015-11-01",1701 -"2015_Novak_Djokovic_tennis_season","2017-07-01",2484 -"Dhabawallah","2016-04-01",4 -"Afshar_Beylik","2017-06-01",4 -"1998_ATP_Tour_World_Championships_-_Singles","2017-03-01",20 -"Beach_Haven_Terrace,_New_Jersey","2016-11-01",4 -"Aix-la_Chapelle","2018-03-01",66 -"Ackerman,_Val","2017-05-01",2 -"47th_Ohio_Infantry","2016-12-01",59 -"100_People,_100_Songs","2017-11-01",517 -"2007_Masters_of_Formula_3","2016-01-01",63 -"1832_US_presidential_election","2016-05-01",6 -"Aaron_Baker","2016-05-01",113 -"2015-16_FIBA_Europe_Club_Competition","2017-11-01",2 -"Alebra","2018-02-01",27 -"Asilus_crabroniformis","2016-11-01",4 -"Earth_and_Air_and_Rain","2016-11-01",31 -"2014_Stade_Tata_Raphael_disaster","2018-02-01",1 -"Alexander_Izvolski","2017-01-01",7 -"Fabric_17","2017-01-01",13 -"1925_Campeonato_de_Portugal_Final","2018-01-01",37 -"1948_Ashes_series","2017-01-01",121 -"Abraham_ben_david","2016-09-01",4 -"2006_Acropolis_Rally","2017-01-01",12 -"Alottment","2017-03-01",6 -"Angolanness","2015-07-01",11 -"2002_in_NASCAR_Craftsman_Truck_Series","2016-01-01",12 -"Aces_of_ANSI_Art","2015-08-01",77 -"Alan_Tskhovrebov","2015-08-01",13 -"Aegis_Security","2015-10-01",1 -"Alec_the_Great","2015-05-01",69 -"Corel_SnapFire","2016-11-01",9 -"AbdulMagid_Breish","2016-03-01",276 -"A_Night_in_NYC","2015-10-01",232 -"79th_parallel_south","2016-11-01",17 -"Alphonse_Crespo","2016-06-01",50 -"Acacia_petite_feuille","2016-05-01",1 -"Amstrad_464","2017-12-01",18 -"Charles_County,_Maryland","2017-06-01",2079 -"1972_outbreak_of_smallpox_in_Yugoslavia","2018-03-01",375 -"Alungili","2017-09-01",37 -"Brontispalaelaps_froggatti","2016-04-01",1 -"Alison_Lacey","2016-12-01",94 -"Alessandro_Capra","2017-07-01",21 -"2012_UCF_Knights_baseball_team","2016-08-01",46 -"16_Candles_Down_the_Drain","2017-05-01",2 -"Anandra_strandi","2015-08-01",11 -"Brigitte_Rohde","2017-01-01",9 -"Agenda_VR3","2015-09-01",93 -"1641_in_architecture","2015-11-01",32 -"ALF_Tales","2016-04-01",280 -"A_Woman_Scorned","2015-07-01",164 -"Air-free_techniques","2016-04-01",5 -"1973_in_British_television","2016-04-01",96 -"All_Saints_Cemetery","2017-04-01",345 -"1981_in_Swedish_football","2016-06-01",21 -"Apple_Dictionary","2016-10-01",19 -"2015_PBZ_Zagreb_Indoors","2016-08-01",121 -"16th_IIFA_Awards","2017-02-01",1194 -"Duki,_Pakistan","2016-04-01",14 -"Administration_of_Borderchek_points,_Population_and_Immigration","2015-09-01",2 -"Alonia,_Zante","2017-10-01",1 -"African_United_Club","2017-10-01",50 -"Burjanadze-Democrats","2016-04-01",19 -"Application_software_development","2015-06-01",27 -"Almonacid_de_la_Sierra,_Zaragoza","2015-06-01",1 -"Baissour","2016-12-01",100 -"Coti_Sorokin","2016-04-01",46 -"Alberta_and_Great_Waterways_Railway_scandal","2017-05-01",70 -"1942_Alabama_Crimson_Tide_football_team","2015-09-01",144 -"Adam_Art_Gallery","2016-08-01",80 -"Akshinski_Raion","2016-09-01",1 -"Edwin_of_Deira","2015-07-01",34 -"Altaf_Mahmud","2015-10-01",245 -"Astana_cycling_team","2017-12-01",7 -"1982_CART_World_Series_season","2015-12-01",3 -"3_Rotaxane","2017-03-01",1 -"1924_Eastern_Suburbs_season","2015-08-01",32 -"Downtown_Science","2016-11-01",6 -"1993-94_Slovak_Cup","2017-04-01",1 -"Brandon_Wayne_Hedrick","2016-04-01",32 -"2015_Brasil_Open","2016-01-01",403 -"Aung_Pinle_Hsinbyushin","2016-02-01",69 -"An_Numaniyah","2016-06-01",185 -"24th_Arkansas_Infantry_Regiment","2016-03-01",64 -"Adimchinobe_Echemandu","2017-05-01",90 -"August_Belmont,_Jr","2017-06-01",8 -"Empacher","2016-11-01",102 -"Abdulkadir_Sheikh_Dini","2017-01-01",70 -"Alvaro_Quiros","2017-08-01",12 -"Algernon_May","2017-11-01",35 -"Athol_Shmith","2016-02-01",188 -"2004_Indesit_ATP_Milan_Indoor_-_Doubles","2015-09-01",1 -"Alfred_Dennis","2016-11-01",9 -"2nd_Medical_Battalion","2017-05-01",380 -"Atom_clocks","2016-03-01",12 -"368th_Expeditionary_Air_Support_Operations_Group","2015-06-01",48 -"1911_Washington_Senators_season","2017-06-01",46 -"1963_Night_Series_Cup","2015-07-01",26 -"Aromobates_capurinensis","2017-12-01",21 -"2013-14_Super_Lig","2017-05-01",14 -"Al_taglio","2016-09-01",2 -"2015_RBC_Tennis_Championships_of_Dallas","2016-04-01",18 -"2011_Mirabella_Cup","2017-11-01",15 -"1996_NHL_Western_Conference_Final","2015-06-01",1 -"2009_Formula_Nippon_Championship","2016-11-01",44 -"Information_security_awareness","2017-01-01",56 -"A_Noiseless_Patient_Spider","2018-03-01",757 -"Aggregate_field_theory","2017-06-01",3 -"Armenians_in_Central_Asia","2015-10-01",351 -"Acona,_Mississippi","2017-10-01",33 -"Apozomus","2017-12-01",19 -"Antwun_Echols","2016-11-01",87 -"1949_Albanian_Cup","2016-11-01",11 -"Aesychlus","2016-10-01",4 -"1961_Pulitzer_Prize","2015-09-01",879 -"East_Midlands_Conference_Centre","2016-04-01",13 -"Blumen","2016-11-01",11 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.tsv b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.tsv deleted file mode 100644 index 407b9ddafba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small.tsv +++ /dev/null @@ -1,1000 +0,0 @@ -Akiba_Hebrew_Academy 2017-08-01 241 -Aegithina_tiphia 2018-02-01 34 -1971-72_Utah_Stars_season 2016-10-01 1 -2015_UEFA_European_Under-21_Championship_qualification_Group_8 2015-12-01 73 -2016_Greater_Western_Sydney_Giants_season 2017-05-01 86 -AAA_Americas_Trios_Championship 2015-10-01 104 -1420_in_literature 2016-05-01 20 -Adair,_Beegie 2017-08-01 2 -1980_Rugby_League_State_of_Origin_match 2017-07-01 2 -Column_of_Santa_Felicita,_Florence 2017-06-01 14 -2007_Copa_America 2016-07-01 178 -Car_dealerships_in_the_USA 2015-07-01 11 -Dihydromyricetin_reductase 2015-07-01 1 -ATCvet_code_QB05BB01 2017-04-01 1 -City_CarShare 2017-01-01 125 -Heidenrod 2017-01-01 10 -Arthur_Henrique 2016-11-01 12 -Alan_Ebnother 2015-11-01 66 -2013_UConn_football_season 2017-05-01 2 -2008_American_League_Division_Series 2016-12-01 376 -Antilipaemic 2017-09-01 12 -Aberzombie 2016-12-01 28 -2008_Asian_Wrestling_Championships 2016-12-01 76 -Federal_Correctional_Complex,_Pollock 2017-01-01 19 -Central_body 2015-07-01 32 -Binbrook,_Ontario 2015-07-01 446 -Azerbaijan_at_the_2016_Judo_Grand_Prix_Samsun 2016-10-01 25 -Ashford_Lake 2017-10-01 80 -1942_Joint_Strike 2015-12-01 3 -AFC_Youth_Championship_2012 2017-10-01 2 -Akhira 2016-07-01 64 -Arroniro_Arlieri 2016-10-01 1 -Alesheim_Burgsalach 2015-05-01 2 -2700_classic 2017-05-01 4 -ARX-8_Laevatein 2015-06-01 14 -1991_Newsweek_Champions_Cup_-_Singles 2017-06-01 3 -Aphelandra_sinclairiana 2017-07-01 69 -Asia_Kong 2015-10-01 2 -2012_Internazionali_Tennis_Val_Gardena_Sudtirol 2016-02-01 1 -24_Carat_Purple 2017-06-01 476 -Acroliths 2017-12-01 9 -Bundesautobahn_3 2016-04-01 264 -ATC_code_S01AX21 2016-09-01 1 -Allington,_Lincolnshire 2015-11-01 188 -Acer_Aspire_One 2017-06-01 5169 -ATC_code_L04AC 2015-06-01 1 -1969_New_Year_Honours 2017-07-01 269 -Antonio_Napolitano 2017-11-01 44 -Amberfish 2017-10-01 11 -1976_Cam_2_Motor_Oil_400 2018-03-01 45 -April_25,_2017 2018-01-01 2 -Akahori_Station 2016-06-01 11 -Abducens_palsy 2016-05-01 28 -Ancona_cathedral 2018-01-01 2 -Ajou_Motor_College 2017-02-01 83 -Brad_Skyes 2016-11-01 1 -Alegro_PCS 2017-07-01 157 -Franz_Dunshirn 2017-01-01 1 -Arthur_Godfrey_Road 2016-11-01 3 -Ab_Golman 2017-05-01 30 -Art_in_early_modern_Scotland 2016-03-01 98 -1968_World_Series 2016-02-01 1960 -1828_in_the_UK 2017-08-01 3 -Explorer-1_Prime_Unit_2 2016-11-01 11 -2014_Desafio_Internacional_das_Estrelas 2017-12-01 31 -Ambulyx_subocellata 2016-08-01 1 -2008_Hamilton_Tiger-Cats_season 2015-11-01 153 -Deuterogamist 2015-07-01 5 -Art_Nouveau_furniture 2017-12-01 839 -Allison,_Colorado 2015-10-01 85 -2014_MLS_Re-Entry_Draft 2017-09-01 36 -Amiot_353 2015-12-01 8 -ACLU_of_Massachusetts 2015-11-01 106 -Altable,_Spain 2016-10-01 1 -Agnidra_scabiosa 2016-12-01 16 -Dictyotremella_novoguineensis 2015-07-01 1 -Compiler_Construction 2015-07-01 42 -Aufheben 2016-11-01 1080 -Avafauna 2017-06-01 17 -Atheist_billboard 2017-01-01 19 -2011_Indonesia_Super_League_All-Star_team 2015-11-01 15 -BrahMos_II 2015-07-01 31 -1707_in_art 2016-04-01 17 -Aeromarine_Model_60 2016-06-01 34 -Ayatollah-al-ozma 2015-06-01 12 -Exanimus 2017-01-01 4 -Anderby 2017-01-01 29 -Ashgabat_indoor_tennis_arena 2017-07-01 27 -1971_Rose_Bowl 2015-12-01 961 -2004_HR56 2016-05-01 5 -1886_in_South_Africa 2016-03-01 70 -Bishop_of_Central_Newfoundland 2016-04-01 1 -Alice_Rivlin 2016-09-01 1137 -Arriba_en_la_Cordillera 2017-06-01 39 -Adam_Lively 2016-06-01 77 -Colasposoma_fairmairei_fairmairei 2017-06-01 5 -Archie_Barton 2017-02-01 49 -Aharon_wasserman 2016-01-01 7 -Alabama_Educational_Television_Commission 2017-05-01 3 -Advanced_Technology_Bomber 2016-02-01 67 -1-krona 2017-01-01 4 -Ahmadabad-e_Kalij-e_Sofla 2017-01-01 3 -Bob_Dolman 2016-11-01 245 -Bellevue,_French_Guiana 2017-01-01 5 -Bison_Nickel 2017-01-01 2 -Arthur_Drabble 2016-12-01 35 -Edgewater_Borough,_New_Jersey 2016-11-01 3 -Alberto_Cambrosio 2017-11-01 31 -Amalia_Celia_Figueredo 2017-07-01 32 -1989_-_1992_Rugby_League_World_Cup 2016-01-01 10 -Admir_Seferagic 2016-06-01 7 -Adriaan_Loosjes 2015-05-01 46 -Alfred_Manuel_Martin 2015-06-01 3 -Academy_of_the_Arabic_Language 2015-08-01 67 -Ankita_Shrivastav 2018-01-01 7430 -Anarchism_in_asia 2017-11-01 1 -Batiquitos_Lagoon_State_Marine_Conservation_Area 2015-07-01 18 -Alstonia_calophylla 2017-12-01 2 -4-Hydroxycyclohexanecarboxylate_dehydrogenase 2016-11-01 4 -832_symmetry 2017-09-01 6 -1931_Fuyun_earthquake 2016-07-01 64 -1998_Masters_of_Formula_3 2016-01-01 60 -2011_LG_Hockey_Games 2016-04-01 7 -Generalized_pustular_psoriasis 2017-01-01 159 -2013_European_Cup_Winter_Throwing 2016-07-01 56 -2008_in_Argentina 2017-06-01 48 -Apostrophized 2017-10-01 5 -Algebraically_compact_module 2017-01-01 5 -Askett 2015-10-01 79 -2009_swine_flu_outbreak_timeline 2015-08-01 65 -72704-01-9 2017-12-01 4 -Alexandre-Charles-Albert-Joseph_Renard 2017-11-01 4 -Acyl-CoA_oxidase 2016-09-01 250 -2011_Budweiser_Shootout 2015-08-01 109 -Augusta_Davies_Webster 2015-07-01 2 -Association_theory 2017-07-01 112 -Abemama_Airfield 2015-05-01 8 -Archaeological_Museum_of_Heraklion 2015-10-01 14 -Authorized_marches_of_the_Canadian_Armed_Forces 2016-11-01 241 -1986_in_Portugal 2017-01-01 7 -Antiziganism_in_Bulgaria 2017-12-01 13 -Adriana_Martin 2015-09-01 21 -2004_Green_Bay_Packers_season 2015-05-01 970 -Agrippa_the_Sceptic 2017-11-01 95 -Admiral_Island 2016-04-01 1 -Auxiliary_sign_language 2015-06-01 31 -2013_Food_City_500 2015-06-01 90 -Andy_Roesch 2015-08-01 15 -Alsoszentivan 2017-05-01 4 -Architecture_of_Belgium 2015-05-01 199 -1_South_African_Infantry 2017-06-01 5 -1930_Auburn_Tigers_football_team 2016-12-01 39 -1860_in_Canada 2017-05-01 269 -Aldeaseca_de_la_Frontera 2018-03-01 21 -Elijah_Fox_Cook 2015-07-01 13 -2010_BCS_Bowl_Games 2016-03-01 1 -2017_NPSL_season 2017-06-01 2806 -Bank_of_New_South_Wales_v_Commonwealth 2016-12-01 173 -American_Enterprise_Association 2016-02-01 4 -26th_Kentucky_Derby 2018-03-01 1 -Chaldean_Diocese_of_Amid 2016-11-01 18 -Ajaran_language 2016-03-01 1 -1992_Texas_Rangers_season 2017-06-01 113 -26_SAS 2017-12-01 3 -2015_Terengganu_FA_season 2016-01-01 537 -Aagard,_Oregon 2017-03-01 3 -Auberry,_CA 2017-05-01 13 -American_Eskimo_spitz 2015-09-01 3 -Antidiabetic 2016-11-01 75 -Asinius 2017-11-01 26 -Andrey_Vasilievich_Abramov 2016-10-01 1 -Alan_Carrington 2018-03-01 91 -Colebrook,_Ontario 2017-06-01 2 -Abbasabad-e_Kheyrabad 2015-08-01 24 -Arandjelovac_Municipality 2016-02-01 1 -Aloysius_Valente 2017-12-01 11 -Almondo_Curry 2016-03-01 86 -4th_century_AD 2017-03-01 13 -Askhat_Dilmukhamedov 2016-02-01 77 -1147_AD 2017-05-01 1 -1953_Torneo_di_Viareggio 2017-03-01 20 -ATP_Schenectady 2015-12-01 30 -Lakarian_City 2017-01-01 3 -Adam_Ferency 2017-12-01 176 -AugustDvorak 2016-07-01 5 -97th_Light_Infantry_Division 2017-07-01 1 -16th_Connecticut_Infantry_Regiment 2016-05-01 146 -2011_Somalian_drought 2017-05-01 2 -Anbargah 2017-12-01 8 -1921_in_Paraguayan_football 2016-03-01 2 -Cosmetic_dermatitis 2017-01-01 5 -Annunciation_Greek_Orthodox_Cathedral,_Atlanta,_Georgia 2015-09-01 9 -1300_AM 2016-07-01 106 -A_Promising_Africa 2016-03-01 41 -2015-16_Odense_Bulldogs_season 2016-10-01 1 -Aral_AG 2017-12-01 1446 -Angel_Vivar_Dorado 2015-12-01 6 -1951_Australian_Championships 2018-03-01 32 -DJMax_Portable_Hot_Tunes 2017-01-01 27 -Allinge 2017-03-01 32 -1986_Buick_WCT_Finals 2016-11-01 14 -Arimatsu,_Aichi 2015-06-01 112 -Arthur_Berzinsh 2017-02-01 249 -Apolima_Uta 2017-04-01 23 -Capitol_Hill_Pride_Festival 2015-07-01 19 -Kara-Murza 2017-01-01 5 -Aigleville,_Alabama 2015-11-01 19 -Abdullah_bin_al-Hussein 2017-02-01 1 -2017-18_Inter_Milan_season 2018-03-01 26 -African_Collared_Dove 2016-07-01 10 -Achaea_dmoe 2016-11-01 3 -Aurora,_Utah 2016-06-01 201 -Architecture_in_Portland,_OR 2017-07-01 1 -Charchala 2015-07-01 4 -Around_the_Roses 2015-07-01 3 -1965_in_music 2016-12-01 3394 -Alojzije_Jankovic 2017-04-01 5 -Arisu_Seno 2015-08-01 6 -ALCO_T-6 2017-01-01 77 -1998_Royal_Bank_Cup 2015-12-01 32 -1956_Claxton_Shield 2016-11-01 9 -Anita_Dube 2017-07-01 233 -Anderson_Windows 2015-05-01 13 -Annaquatucket_River 2018-03-01 38 -Black_salve 2017-01-01 1496 -Anna_Pendleton_Schenck 2017-02-01 11 -Asghar_Nadeem_Syed 2017-07-01 146 -Disarming 2016-11-01 5 -Antarctic_ice_cap 2017-08-01 7 -Antonio_Ottone 2017-05-01 11 -Coralie_Larnack 2017-01-01 9 -Budha_Subba_Gold_Cup 2016-11-01 24 -Amphoe_Chaiya 2017-03-01 9 -Anarcho-capitalism_in_Somalia 2016-10-01 7 -Felix_Loch 2017-01-01 131 -26508_Jimmylin 2017-12-01 3 -Andrew_McMillen 2015-11-01 134 -Dundee_Canal_Industrial_Historic_District 2017-01-01 2 -Aula_Baratto 2015-12-01 140 -Church_of_St_Mary,_Knowsley 2015-07-01 1 -Aggelakis 2017-10-01 1 -Al_Badiyah 2017-11-01 157 -Assault_Gunboat 2016-03-01 21 -Lachau 2017-01-01 4 -2008_Pittsburgh_Steelers_season 2016-12-01 10018 -Apolychrosis_candidus 2018-01-01 24 -Andrei_Krylov 2017-02-01 192 -Aldesh_Vadher 2018-02-01 7 -Alwand 2017-02-01 7 -Edward_Baker_Lincoln 2015-07-01 4347 -Aermotor_Corporation 2017-11-01 4 -Aischylos 2017-01-01 7 -6th_Assault_Aviation_Corps 2017-07-01 100 -Azygos_lobe 2016-10-01 1598 -Demirciler,_Nazilli 2015-07-01 4 -Akhlaq-e-Hindi 2016-11-01 13 -Dragon_Crusaders 2016-04-01 122 -25V_USB 2016-01-01 1 -Calliophis_melanurus 2017-01-01 31 -Antonionian 2016-10-01 15 -Ashley_Richardson 2017-09-01 1216 -1st_Observation_Group 2018-01-01 6 -Andrzej_Bargiel 2015-05-01 97 -2008_AFL_National_Under_16_Championships 2018-03-01 20 -Ammon_Bundy 2016-09-01 11890 -Benno_Wandolleck 2016-11-01 5 -Aero-Kros_MP-02_Czajka 2016-03-01 136 -A6005_road 2015-10-01 14 -Eagle_Eye_Networks 2015-07-01 101 -Aarberg 2017-12-01 277 -Encyclopedia_of_anthropology 2015-07-01 1 -Duncormick_railway_station 2016-11-01 7 -Aiqing_huajiao_zhuanyi 2017-03-01 1 -Crude_oil_washing 2016-04-01 466 -2010_Indiana_Hoosiers_football_team 2017-06-01 90 -Book_of_Bodley_Head_Verse 2015-07-01 18 -Absence_seizure 2016-05-01 18152 -Cayucupil 2016-04-01 3 -Akanabee 2017-03-01 1 -Grooved_consonant 2017-01-01 5 -Dellamora_philippinensis 2015-07-01 7 -Dejan_Blazevski 2017-01-01 1 -Arabis_armena 2016-08-01 25 -1988_Summer_Paralympics_medal_table 2016-12-01 90 -2012-13_Basketball_Championship_of_Bosnia_and_Herzegovina 2017-04-01 2 -1966_in_music 2017-10-01 3510 -Antti_Tyrvainen 2015-12-01 2 -African_desert 2016-06-01 262 -Bruneau_mariposa_lily 2016-04-01 1 -Bernie_Parmalee 2017-06-01 221 -2015_South_American_Youth_Football_Championship_squads 2015-09-01 594 -1985_IIHF_World_U20_Championship 2015-08-01 7 -18th_British_Academy_Film_Awards 2018-02-01 270 -523_Ada 2016-04-01 35 -Active_Pharmaceutical_Ingredients 2016-02-01 5 -Burley,_ID_mSA 2015-07-01 2 -CFRN-TV-10 2017-06-01 2 -1982_Super_Bowl_of_Poker 2017-08-01 38 -Australian_Journal_of_Educational_Technology 2017-01-01 1 -2013_Super_League_Grand_Final 2016-06-01 212 -2006_BCR_Open_Romania 2015-06-01 25 -Charlestown_Townies 2016-04-01 319 -1943_Polish_underground_raid_on_East_Prussia 2017-08-01 8 -Anthony_Celestino 2018-02-01 182 -Andrew_Beerwinkel 2018-02-01 73 -Greigia_atrobrunnea 2017-01-01 1 -Adrian_Beecham 2017-11-01 1 -Implementation_of_mathematics_in_set_theory 2017-01-01 12 -Annastacia_Palaszczuk 2015-05-01 6247 -Egon_Zimmermann_II 2016-11-01 3 -Air_aide-de-camp 2018-03-01 137 -Albert_Murphy 2016-09-01 1 -1924_Arkansas_Razorbacks_football_team 2016-02-01 28 -Avondale_Mill 2016-10-01 68 -Alexander_Volzhin 2015-12-01 25 -Arek_Monthly 2017-08-01 31 -Dinka_Blanche 2015-07-01 1 -1921_Mercer_Baptists_football_team 2016-11-01 10 -Afro-Antiguan_and_Barbudan 2016-06-01 252 -American_southern_literature 2016-10-01 3 -1947_Swiss_Grand_Prix 2016-11-01 32 -99p_Stores 2017-12-01 3028 -Artem_Radkov 2018-03-01 21 -Arctic_brome 2016-12-01 19 -Battle_Of_Moskova 2015-06-01 6 -Airdrieonians 2016-06-01 32 -Advanced_transportation_controller 2018-03-01 79 -BC_government 2016-12-01 18 -Antonio_Maura 2017-03-01 457 -Anjuman,_Afghanistan 2017-09-01 62 -Deodato_Guinaccia 2015-07-01 13 -Blowjob_Betty 2016-11-01 28 -453d_Flying_Training_Squadron 2017-08-01 3 -1990_Africa_Cup_of_Nations 2016-04-01 22 -Agenville 2016-08-01 100 -1202_in_Scotland 2018-01-01 82 -Calytrix_desolata 2017-06-01 10 -1957_in_Chile 2016-04-01 13 -Anglican_Bishop_of_Torres_Strait_people 2017-08-01 1 -2015_Mexican_Grand_Prix 2015-06-01 528 -Catalan_parliament 2017-01-01 14 -Cult_Shaker 2017-01-01 32 -Ander_Gayoso 2016-11-01 34 -Ageneiosus_ucayalensis 2017-12-01 20 -Club_de_Berne 2015-07-01 194 -Adecco 2016-03-01 9863 -Anti-unionism 2018-01-01 11 -Auchindoun_Castle 2017-01-01 102 -557_in_poetry 2016-07-01 1 -Abu_ol_Verdi_Rural_District 2017-01-01 1 -Centro_73 2016-04-01 23 -Dagger_compact_category 2016-04-01 97 -Alan_Nunn_May 2017-11-01 770 -Basal_clade 2015-07-01 44 -Aizu_Line 2015-08-01 26 -Edward_Kernan_Campbell 2016-04-01 5 -865_area_code 2016-12-01 9 -Bahamas_at_the_1984_Summer_Olympics 2017-06-01 35 -Gardan_Kalat 2017-01-01 1 -American_Samoa_national_under-19_football_team 2017-12-01 4 -Kayah_National_United_League 2017-01-01 14 -2007_Nordea_Nordic_Light_Open_-_Singles 2016-10-01 2 -Avondale_Estate 2016-11-01 2 -Acalolepta_variolaris 2017-02-01 3 -Anantapur,_Andhra_Pradesh 2017-05-01 1032 -Amenable_Banach_algebra 2015-08-01 59 -300_metres 2017-01-01 61 -Black_Bottom,_Kentucky 2016-04-01 8 -100_Players_Who_Shook_The_Kop 2018-01-01 1133 -Adventure_story 2015-07-01 29 -Anacampsis_lignaria 2017-05-01 5 -2007_American_Indoor_Football_Association_season 2015-09-01 89 -Dmitry_Kardovsky 2016-04-01 33 -A10_autoroute 2015-11-01 27 -1995_Sydney_Bulldogs_season 2017-04-01 40 -Ilex_jelskii 2017-01-01 2 -Adrian_Jose_Hernandez 2016-10-01 2 -CallAir_A-5 2016-11-01 4 -22nd_meridian_west 2015-07-01 45 -Anglican_Diocese_of_Antananarivo 2015-08-01 2 -Andrew_Kelsey 2016-11-01 14 -Brownhill_Creek 2017-06-01 4 -Abunai_Deka 2015-06-01 269 -Aisha_Jefferson 2017-04-01 115 -Alonso_Lopez 2017-03-01 7 -Aeroparque_Ciudad_de_Mendoza 2016-01-01 1 -Arthur_Ashley_Sykes 2017-12-01 45 -Holy_Face_Medal 2017-01-01 20 -1Chronicles 2018-02-01 1 -2014_CFU_Club_Championship 2017-12-01 108 -Aetna_class_ironclad_floating_battery 2015-06-01 37 -Antoine_Delrio 2015-07-01 2 -Chislet_Windmill 2015-07-01 38 -Aerojet_SD-2 2017-07-01 59 -Age_role_play 2015-09-01 2 -50687_Paultemple 2018-03-01 8 -1997-98_Cuban_National_Series 2017-02-01 1 -Aleksandr_Borisovich_Belyavskiy 2017-10-01 42 -Carol_MacReady 2017-01-01 111 -18th_Chess_Olympiad 2015-06-01 134 -Clara_Schonfeld 2015-07-01 1 -Apollonius_of_Athens 2017-02-01 35 -ABC_80 2018-03-01 603 -Apatelodes_damora 2015-08-01 22 -Ernest_Walbourn 2016-04-01 30 -428_BCE 2017-04-01 2 -72nd_Seaforth_Highlanders 2017-12-01 29 -Broughton_Hackett 2015-07-01 38 -A_Fazenda_2 2016-12-01 56 -ATCvet_code_QJ01MQ 2017-05-01 2 -Abura,_Iran 2017-03-01 3 -DeLeon_Independent_School_District 2015-07-01 1 -Abby_aldrich 2016-09-01 1 -Cinema_One_Originals 2016-11-01 359 -2013_European_Short_Course_Swimming_Championships 2017-09-01 124 -Ars_technica 2015-11-01 442 -AMS_Production_Company 2016-02-01 1 -Joao_Soares 2017-01-01 1 -Cervical_vertebra_6 2017-06-01 45 -Kevin_Pugh 2017-01-01 2 -Alpha-1_antitrypsin 2015-11-01 11845 -Assyrians_in_iran 2017-07-01 53 -Boophis_ankarafensis 2016-11-01 2 -A_View_To_a_Kill 2018-01-01 4 -Charles_Edouard_Brown-Sequard 2015-07-01 7 -1919_in_Ireland 2017-04-01 239 -74th_Foot 2015-06-01 3 -9275_Persson 2016-07-01 22 -Dalcerides_mesoa 2015-07-01 11 -A_Summer_Bird-Cage 2016-03-01 248 -2011_NAB_Cup 2017-10-01 127 -13th_Parliament_of_Lower_Canada 2015-08-01 41 -2011_Players_Championship_Finals 2015-07-01 25 -Flag_of_Tenerife 2017-01-01 128 -Hypopta_corrientina 2017-01-01 1 -Jalatarangam 2017-01-01 16 -Adjoint_endomorphism 2018-01-01 330 -Anime_conventions 2015-06-01 18 -2004_Grammy_Award 2015-06-01 13 -American_war 2015-07-01 80 -Beynes,_Yvelines 2016-11-01 32 -Agriculture_Department 2016-06-01 16 -Andrey_Chisty 2015-10-01 58 -Ait_Yahia_Moussa 2017-08-01 7 -Alfred_Blau 2017-03-01 57 -1869_in_sports 2017-08-01 73 -Ambolodia_Sud 2016-04-01 6 -Animal_slaughter 2017-06-01 6423 -Adamowka_Commune 2018-01-01 2 -Arsenic_pentachloride 2016-03-01 467 -220_BCE 2016-01-01 3 -863d_Engineer_Battalion 2015-11-01 160 -Amer_Abu-Hudaib 2017-04-01 31 -Aaina_tv 2017-08-01 3 -Arnhem,_Netherlands 2015-08-01 67 -Antoine_de_sartine 2015-08-01 4 -ATC_code_A16 2016-01-01 155 -Eastern_Front 2017-01-01 70 -Ashy-headed_tyrannulet 2016-12-01 44 -Aoheng_language 2015-08-01 64 -1996_World_Junior_Canoe_Slalom_Championships 2017-11-01 15 -Agriophara_nodigera 2017-11-01 12 -Amsterdam_Island_cattle 2015-12-01 675 -Aliyah_from_the_Soviet_Union_in_the_1990s 2017-08-01 54 -Abandoned_and_Little_Known_Airfields 2018-01-01 2 -Church_numerals 2015-07-01 57 -Ankeny_Christian_Academy 2015-09-01 74 -2010_FIFA_World_Cup_qualification_-_AFC_First_Round 2017-06-01 58 -1ESS_switch 2015-07-01 514 -Chelys_boulengerii 2016-04-01 1 -Bivalent_logic 2016-11-01 25 -Ivan_Skavinsky_Skavar 2017-01-01 1 -Fergus_Sings_the_Blues 2016-04-01 62 -2015-16_Libyan_Premier_League 2017-02-01 4 -Dutch_Chess_Championship 2017-01-01 35 -Every_Man_in_His_Humor 2016-11-01 1 -2008_Allstate_BCS_National_Championship_Game 2015-08-01 11 -Aq_Tappeh,_Hamadan 2015-09-01 25 -Agrotractor 2016-02-01 1 -Alexander_of_Pfalz-Zweibrucken 2017-12-01 2 -2003_Mistral_World_Championships 2016-04-01 6 -146th_Fighter-Interceptor_Wing 2015-11-01 49 -Al-Qahir 2016-04-01 328 -25604_Karlin 2015-05-01 20 -Allen_taflove 2017-12-01 3 -Aretha_Thurmond 2017-05-01 109 -Atlanta_and_lagrange_rail_road 2015-07-01 1 -ACSI_College_Iloilo 2015-10-01 1 -Alan_Sacks 2015-07-01 150 -African_Desert_Warbler 2017-02-01 11 -A_Man_and_His_Soul 2018-02-01 89 -ASCII_ART 2015-05-01 9 -1992-93_VMI_Keydets_basketball_team 2016-10-01 1 -George_and_the_Dragon 2017-01-01 18 -2012_NAB_Cup 2016-12-01 99 -1965_Indy_500 2016-05-01 51 -Forest_Glen,_Nova_Scotia 2016-04-01 9 -A_Critical_Dictionary_of_English_Literature 2016-08-01 4 -Aquion_Energy 2015-08-01 1077 -Alibeyce,_Emirdag 2017-09-01 1 -Blauhu00F6hle 2015-07-01 1 -Ian_Sommerville 2017-01-01 1 -Air_propulsion 2017-07-01 474 -2016_12_Hours_of_Sebring 2016-10-01 187 -Asites 2017-07-01 4 -Al-Kini 2017-03-01 1 -Austin_Aztex_2009_season 2016-03-01 10 -Alto_Vista_Chapel 2015-12-01 833 -Abecedaria 2017-04-01 22 -Farm_to_Market_Road_2503 2016-11-01 3 -Anglican_Bishop_of_The_Leeward_Islands 2015-09-01 2 -Basketball_at_the_2011_Pan_American_Games 2017-06-01 120 -Angela_Peel 2016-08-01 7 -Amber_Frey 2018-02-01 728 -Afraid_to_Sleep 2017-06-01 51 -ATC_code_A02BA 2018-02-01 7 -Apateon_pedestris 2015-11-01 5 -Alois_Estermann 2015-12-01 1155 -1752_in_science 2016-01-01 78 -Baldassin 2017-06-01 3 -Camilla_Hildegarde_Wedgwood 2017-01-01 1 -B-A-C-H_motive 2016-10-01 3 -AI_Velorum_star 2016-09-01 1 -Ali_Zayn_al-Abidin 2017-04-01 71 -Ailurarctos_lufengensis 2015-07-01 1 -Clearview,_Philadelphia 2017-06-01 67 -Adam_Sender 2016-08-01 759 -Apriona_paucigranula 2018-02-01 7 -Dark_at_the_Top_of_the_Stairs 2015-07-01 10 -Acanthio 2017-12-01 11 -1980_Labatt_Brier 2018-01-01 111 -2016-17_New_York_Knicks_season 2017-10-01 21 -1995_CAF_Cup 2015-10-01 48 -Boiled_linseed_oil 2016-04-01 79 -2015_Kumanovo_clashes 2016-07-01 6 -David_Jamieson 2017-01-01 3 -1915_Florida_Gators_football_team 2015-08-01 32 -2010-11_New_Zealand_Football_Championship 2017-03-01 1 -Ashley_Church 2015-08-01 27 -Acanthoxylini 2017-06-01 27 -American_Hindu 2016-10-01 33 -Amylosporomyces 2015-12-01 20 -2007_Southeast_Asia_Basketball_Association_Championship 2018-01-01 1 -Aethelred_I 2017-08-01 1 -2-methyl-GPP_synthase 2018-02-01 1 -Dave_Aspin 2016-11-01 6 -Descent_of_the_Nine 2016-04-01 1 -2010_Kleen_Energy_Systems_disaster 2017-08-01 3 -1978_in_Japanese_television 2017-08-01 70 -Alexandros_Falekas 2018-01-01 1 -1910_in_Afghanistan 2016-02-01 32 -Abd-ru-shin 2017-09-01 681 -610_in_poetry 2017-05-01 3 -2015_arrests_of_FIFA_officials 2017-12-01 46 -ATmega328P 2017-09-01 26 -A_G_Mathews 2017-12-01 3 -Attack_on_Mers-el-Kebir 2016-12-01 511 -2016_in_Estonia 2016-05-01 89 -Adidas-Salomon 2015-09-01 574 -Education_and_Skills_Act_2008 2016-11-01 141 -1789_in_the_United_States 2015-07-01 845 -Apple_Computer_advertising 2015-09-01 7 -9th_US_Army 2016-12-01 17 -Ad_Rotas 2016-02-01 16 -Agios_Ioannis,_Paphos 2018-03-01 97 -Arabian_toad 2017-12-01 100 -Anterior_pituitary_acidophil 2016-06-01 47 -Arguello,_Christine 2017-12-01 3 -Amilkar_Ariza 2017-03-01 67 -Charles_Grierson 2016-11-01 14 -Achi,_Bolivar 2017-11-01 1 -Exonym_and_endonym 2017-01-01 1712 -Abdul_Maroof_Gullestani 2017-12-01 20 -Fairlawne_Handicap_Chase 2016-04-01 11 -1963_Virginia_Tech_Hokies_football_team 2016-07-01 6 -AE_Clarke 2017-12-01 3 -ALFA-PROJ_Model_3563_sport 2017-10-01 2 -Aleks_Vanderpool-Wallace 2018-02-01 32 -Antioxident 2017-05-01 16 -Calliope_Project 2015-07-01 3 -Anderson_World 2017-10-01 5 -Amydria_selvae 2017-11-01 6 -Antoni_Katski 2016-09-01 1 -Bera_District 2017-06-01 85 -80_South_Street_New_Design 2016-07-01 86 -Askizsky 2015-08-01 2 -Amausi_metro_station 2015-11-01 44 -9486_Utemorrah 2017-04-01 5 -Army_CIS 2018-01-01 2 -1851_Chilean_Revolution 2017-06-01 255 -Jens_Robert_Dahlqvist 2017-01-01 6 -1966-67_Tercera_Division 2017-05-01 1 -Chanel_Iman 2017-06-01 9434 -Astydamia 2017-06-01 34 -1944_in_Belgium 2016-09-01 27 -Acton_Baronets,_of_Aldenham 2017-01-01 1 -2014_FBS_season 2016-12-01 5 -2016_Winter_Youth_Olympics 2017-09-01 2090 -1903_Clemson_Tigers_football_team 2017-06-01 50 -2014_Taca_da_Liga_Final 2017-04-01 2 -10th_Alberta_general_election 2016-11-01 4 -Edertalschule_Frankenberg 2016-04-01 16 -4th_Punjab_Infantry_Regiment 2017-09-01 136 -America_Air_Linhas_Aereas 2018-02-01 1 -Australian_Liberal_Party 2015-06-01 146 -American_licorice 2017-05-01 15 -2013_NASCAR_Cup_Series 2015-10-01 49 -Anja_Lundqvist 2016-03-01 93 -Amauris_dannfelti 2016-01-01 12 -Abandoned_shipwrecks_act 2015-06-01 3 -11086_Nagatayuji 2017-02-01 3 -Advertising_tissues 2017-06-01 1 -Anti_corn-law_league 2016-10-01 1 -Always_Guaranteed 2017-09-01 445 -Alfredo_Palacio_Moreno 2018-01-01 48 -Antonio_Puche_Vicente 2015-06-01 1 -Elazig_Province 2017-01-01 1 -ATC_code_C02AC01 2017-05-01 1 -Alexander_Mattock_Thompson 2016-08-01 2 -Cocos_Islands_Malay 2017-06-01 63 -Aftonbladet_antisemitism_controversy 2016-10-01 1 -Azad_Kashmir,_Pakistan 2015-07-01 14 -1852_English_cricket_season 2016-10-01 24 -Birmingham_Pride 2015-07-01 129 -Air-pollution_controls 2015-08-01 4 -James_Southerton 2017-01-01 20 -Architecture_of_Chiswick_House 2015-06-01 240 -Alexander,_Colin 2015-12-01 1 -Al-Mansooreh 2016-10-01 1 -Arielle_Gastineau_Ashton 2017-12-01 18 -Blue_Ben 2017-06-01 240 -1911_Michigan_State_Normal_Normalites_football_season 2017-11-01 1 -Arctictis_binturong 2017-04-01 334 -Fornaldarsaga 2016-04-01 18 -Bibasis_gomata 2017-06-01 35 -Anna_Schchian 2017-06-01 19 -2005_in_Rwanda 2016-08-01 69 -Archaeology_in_ethiopia 2016-01-01 1 -23277_Benhughes 2016-12-01 2 -Bahrain_-_USA_relations 2017-06-01 1 -Dieter_Korn 2015-07-01 13 -Antidynamo_theorem 2016-10-01 222 -An_Jae-Won 2016-12-01 1 -Bruray 2015-07-01 82 -Gosport_Council_election,_2004 2017-01-01 2 -1856_in_South_Africa 2017-03-01 60 -Dialakoro,_Guinea 2017-01-01 1 -05-CV-1678 2016-02-01 1 -Allison,_Henry 2016-12-01 5 -Animal_house 2016-06-01 1399 -Alexander_Tabarrok 2017-03-01 5 -Chung-Ho_Memorial_Hospital 2017-06-01 50 -2013_Internazionali_Trofeo_Lame_Perrel-Faip_-_Doubles 2016-03-01 4 -1965_Speedway_World_Team_Cup 2017-11-01 13 -Alexander_Ollongren 2017-11-01 788 -Amore_traditore,_BWV_203 2016-06-01 83 -Arthur_William_Rogers 2015-10-01 31 -Ashoka_pillar 2017-02-01 265 -1_62_honeycomb 2018-02-01 10 -1926_Australasian_Championships 2016-05-01 47 -Export_award 2016-04-01 3 -5000_Days_Project 2016-07-01 75 -2012_UCI_Europe_Tour 2017-03-01 65 -1985_Toronto_Indoor_-_Singles 2015-08-01 4 -Cedar_Grove,_North_Carolina 2017-06-01 18 -Battle_of_The_Afsluitdijk 2016-04-01 15 -Arishtanemi 2017-03-01 7 -Alfalfa_bill_murray 2016-12-01 7 -Elisha_Jay_Edwards 2015-07-01 28 -Arturas_Paulauskas 2016-01-01 10 -Abdelrahman_Hamad 2015-09-01 2 -1948_in_Northern_Ireland 2015-07-01 29 -1988_in_philosophy 2015-05-01 70 -5-Hydroxytryptaminen 2016-01-01 4 -2017_FBS_season 2017-10-01 124 -Areeiro 2016-04-01 2 -Alemonides 2016-03-01 6 -Abrochia_caurensis 2016-10-01 1 -Anafylaxia 2018-01-01 2 -1938_Grand_National 2018-02-01 80 -China-Korea_Champions_League 2015-07-01 4 -Acetyl_bromide 2017-11-01 448 -24_hours_of_lemans 2015-05-01 37 -Albright_hereditary_osteodystrophy 2017-02-01 153 -Ashland_Bus_System 2015-08-01 115 -1,8-Cineole_2-endo-monooxygenase 2016-10-01 8 -2005-2006_NHL_Season 2015-11-01 6 -Cammie_Dunaway 2015-07-01 344 -D-Fish 2016-11-01 2 -4_sister_vineyard 2015-09-01 1 -Alessia_Cara_discography 2017-03-01 100 -Alexander_Berg 2017-08-01 63 -4822_Karge 2018-02-01 32 -Emile_Francis_Trophy 2017-01-01 8 -Amin_Ghaseminejad 2017-06-01 45 -Artichia 2017-09-01 19 -Cividale 2016-11-01 41 -2007_Orissa_Violence 2016-05-01 1 -Australian_Saltbush 2016-12-01 5 -Asian_Food_Channel 2016-09-01 727 -Camp_iawah 2015-07-01 1 -ATC_code_J01MA04 2017-11-01 1 -Arpad_Balazs 2017-10-01 2 -Angel_of_Music,_or_The_Private_Life_of_Giselle 2018-02-01 56 -1983_Torneo_di_Viareggio 2016-03-01 22 -Arellano_University 2017-09-01 1699 -ATC_code_B03AA 2017-11-01 1 -FS5000 2016-11-01 1 -Abd-Allah_ibn_Zubayr 2017-05-01 2 -1889_SAFA_season 2016-04-01 28 -Aloha_bowl_broadcasters 2015-05-01 2 -1994_All_England_Open_Badminton_Championships 2016-07-01 75 -Are_We_Not_Horses 2015-07-01 79 -Angiolo_Torchi 2018-02-01 5 -Chimanimani_National_Park 2017-06-01 37 -Art_manifesto 2017-09-01 2619 -Adrian_Apostol 2016-10-01 62 -Adventure_book 2015-10-01 14 -Albemarle_Bertie 2016-06-01 20 -Adam_Deibert 2017-08-01 611 -Alberta_association_of_architects 2017-10-01 2 -Alloschmidia 2017-11-01 15 -Administrative_department_of_security 2016-05-01 1 -Archdeaconry_of_Dudley 2017-07-01 19 -Ammayenna_Sthree 2015-12-01 38 -Aaron_Spelling 2016-05-01 25128 -Anatolian_hieroglyph 2016-07-01 308 -Central_University_of_Rajasthan 2016-11-01 323 -Annamanum_touzalini 2017-08-01 7 -Acleris_hispidana 2016-11-01 2 -Frisco_kid 2016-04-01 15 -Allerheiligenberg_monastery 2017-12-01 2 -Arctic_comb_jelly 2017-03-01 3 -279377_Lechmankiewicz 2016-06-01 1 -AEGON_Pro-Series_Loughborough 2018-02-01 7 -Firefly_Space_Systems 2017-01-01 235 -2000-01_Hong_Kong_League_Cup 2017-12-01 6 -British_supermarkets 2017-01-01 2 -A_description_of_New_England 2016-10-01 13 -Artificial_Flavoring 2016-06-01 2 -Anglican_bishop_of_the_Torres_people 2018-02-01 1 -Antonio_Diaz_Cardoso 2018-02-01 1 -Johan_Patriksson 2017-01-01 3 -Ashutosh_Morya 2017-07-01 1 -Iron_ore 2017-01-01 3682 -AT-16_Scallion 2015-08-01 594 -Data_analyst 2015-07-01 134 -Cabbageball 2016-04-01 3 -Acanthonyx_seriopuncta 2017-04-01 2 -Aegeria_ruficauda 2017-10-01 1 -Archibald_Douglas,_1st_Earl_of_Ormond 2016-06-01 100 -2014_European_Championships_in_Athletics 2017-01-01 3 -1Co-Co1 2017-08-01 77 -Arthur_Abba_Goldberg 2015-10-01 2 -Ameri-Cana_Ultralights 2015-05-01 33 -1979_British_Formula_One_season 2015-12-01 218 -American_colonial_history 2016-06-01 6 -Arcadia_Martin_Wesay_Toe 2015-06-01 73 -Adam_Ornstein 2017-08-01 2 -Archive_of_Modern_Conflict 2016-12-01 307 -Ciro_Urriola 2015-07-01 12 -Acanthosyris 2015-12-01 53 -Eriopyga_jamaicensis 2015-07-01 1 -10th_parallel_north 2016-06-01 1412 -Derek_Almond 2017-01-01 2 -Jaimanglapur 2017-01-01 4 -Aphroditeola_olida 2018-02-01 6 -18th_dynasty_of_egypt 2017-06-01 2 -Ali_ben_Ahmed 2016-08-01 62 -Ashkur_Mahalleh 2018-02-01 8 -Adolf_Mosengel 2017-02-01 54 -1838_Safed_pogrom 2016-02-01 1 -1829_in_architecture 2017-05-01 24 -Arcones,_Segovia 2016-05-01 3 -Albert_Smith_Medal 2018-02-01 30 -Arqanqergen_mass_murder 2015-10-01 60 -Jaan_Usin 2017-01-01 4 -2009_Bangladesh_Rifles_revolt 2016-03-01 269 --coltore 2015-11-01 9 -Ernest_Makins 2017-01-01 10 -Amsterdam_Bijlmer_Arena 2016-07-01 87 -Apostolic_assemblies_of_christ 2018-01-01 1 -Abirabad,_Razavi_Khorasan 2015-08-01 26 -2016_All-Ireland_Senior_Football_Championship 2015-10-01 883 -Asylum_seeking 2016-06-01 36 -56th_parallel 2015-07-01 12 -Junior_roller_derby 2017-01-01 19 -Ana_Goncalves 2016-03-01 2 -Alekseevskiy_Raion 2017-11-01 1 -2009_Vietnam_national_football_team_results 2017-07-01 15 -Chicago,_Burlington_and_Quincy_Railroad_Depot 2017-01-01 2 -Fox_Valley_Conference 2016-04-01 84 -Brachioplasty 2017-06-01 304 -Arnold_Doren 2017-06-01 11 -All_Ireland_mandolin_Champion 2015-07-01 2 -Deborah_Rennard 2016-04-01 814 -Anthony_Macdonnell 2016-02-01 2 -Azerbaijan_Pakistan_relations 2017-01-01 1 -A_Girl_Named_Zippy 2018-03-01 346 -Academic_OneFile 2018-02-01 109 -East_Point_Academy 2017-01-01 48 -2011_Italian_Figure_Skating_Championships 2017-03-01 47 -Chen_Qiao_En 2016-04-01 52 -Canobie_lake 2016-04-01 1 -Andrei_Arlashin 2017-11-01 13 -Again_Into_Eyes 2017-12-01 54 -Andropogon_curtipendulus 2018-02-01 1 -Abbath 2016-05-01 927 -Alien_Opponent 2016-05-01 160 -Art_of_Love 2016-02-01 3 -Ariana_Huffington 2017-05-01 84 -Amy_Poehler 2016-04-01 62732 -Cherven,_Rousse_Province 2015-07-01 2 -1_Month_2_Live 2018-03-01 306 -Country_Day_School_of_the_Sacred_Heart 2017-06-01 132 -Cooperative_institute_for_arctic_research 2015-07-01 2 -Depression_symptoms 2017-01-01 7 -Brent_Skoda 2016-04-01 31 -American_Christians 2016-12-01 10 -Counterbleed 2017-01-01 1 -Abarka 2016-05-01 325 -Aleksander_Povetkin 2017-02-01 89 -Austin_TX 2016-03-01 119 -Aleksandr_Tretyakov 2017-01-01 40 -Connecticut_congressional_districts 2016-11-01 3 -Alessio_de_Marchis 2015-10-01 66 -Capel_Salem,_Pwllheli 2016-04-01 6 -5-alpha_reductase_deficiency 2016-10-01 30 -Annabelle_Croft 2016-01-01 32 -Aeronca_Aircraft_Corporation 2017-05-01 9 -1597_in_Scotland 2016-07-01 18 -Alf_Somerfield 2017-11-01 10 -Agapanthia_villosoviridescens 2018-02-01 53 -Adam_Goldberg 2015-12-01 42338 -1961_Paris_massacre 2017-01-01 52 -2007_in_radio 2017-04-01 131 -Arthur_French,_5th_Baron_de_Freyne 2015-12-01 44 -AMD_Socket_G3 2017-04-01 121 -Albert_geouffre_de_lapradelle 2016-02-01 1 -Collaborations_between_ex-Beatles 2015-07-01 1279 -Betty_Ireland 2016-04-01 40 -Domingo_Tirado_Benedi 2015-07-01 1 -Bac_Ly 2016-04-01 1 -All_gas-phase_iodine_laser 2015-07-01 136 -Andre_Salifou 2017-01-01 1 -1,3-b-D-glucan 2017-05-01 2 -Joseph_Johnston_Muir 2017-01-01 3 -17th_of_Shahrivar_league 2016-05-01 63 -2001_in_art 2018-03-01 131 -Abiji_language 2017-10-01 6 -Ahliah_school 2018-03-01 133 -1605_in_India 2017-12-01 83 -Dr_Jeom_Kee_Paik 2015-07-01 1 -1954_Texas_Longhorns_football_team 2018-01-01 69 -1985_Little_League_World_Series 2016-07-01 226 -Eleanor_de_bohun 2015-07-01 1 -Adrenaline_strength 2016-03-01 8 -434_BC 2018-02-01 97 -8x60mm_S 2015-06-01 61 -2016-17_South_Pacific_cyclone_season 2017-09-01 101 -Beth_Aala 2017-06-01 15 -Al_Shaver 2017-07-01 138 -Adelphoi_Zangaki 2018-01-01 89 -Cyclopropyl_group 2016-11-01 167 -216_Sqn 2017-08-01 11 -20469_Dudleymoore 2017-05-01 5 -Attila_Hildmann 2017-06-01 103 -1970_Arkansas_Razorbacks_football_team 2016-11-01 66 -Anthony_Fairfax 2017-08-01 24 -Fort_Point,_Boston 2016-04-01 384 -Epsilon_numbers 2016-04-01 3 -2013_Recopa_Sudamericana 2016-05-01 202 -Italo_Disco 2017-01-01 27 -Andersen_Press 2015-09-01 228 -Amasa_Walker 2017-09-01 146 -2010_in_Israeli_film 2015-09-01 234 -A-25_Shrike 2017-12-01 90 -2009_Winnipeg_Blue_Bombers_season 2017-06-01 66 -Ashland_County,_Ohio 2016-10-01 1298 -Dusky_Turtle_Dove 2017-01-01 3 -Antonov_148 2017-02-01 129 -Abdul_Hamid_Lahori 2017-08-01 458 -Amadeo_of_Spain 2015-11-01 1701 -2015_Novak_Djokovic_tennis_season 2017-07-01 2484 -Dhabawallah 2016-04-01 4 -Afshar_Beylik 2017-06-01 4 -1998_ATP_Tour_World_Championships_-_Singles 2017-03-01 20 -Beach_Haven_Terrace,_New_Jersey 2016-11-01 4 -Aix-la_Chapelle 2018-03-01 66 -Ackerman,_Val 2017-05-01 2 -47th_Ohio_Infantry 2016-12-01 59 -100_People,_100_Songs 2017-11-01 517 -2007_Masters_of_Formula_3 2016-01-01 63 -1832_US_presidential_election 2016-05-01 6 -Aaron_Baker 2016-05-01 113 -2015-16_FIBA_Europe_Club_Competition 2017-11-01 2 -Alebra 2018-02-01 27 -Asilus_crabroniformis 2016-11-01 4 -Earth_and_Air_and_Rain 2016-11-01 31 -2014_Stade_Tata_Raphael_disaster 2018-02-01 1 -Alexander_Izvolski 2017-01-01 7 -Fabric_17 2017-01-01 13 -1925_Campeonato_de_Portugal_Final 2018-01-01 37 -1948_Ashes_series 2017-01-01 121 -Abraham_ben_david 2016-09-01 4 -2006_Acropolis_Rally 2017-01-01 12 -Alottment 2017-03-01 6 -Angolanness 2015-07-01 11 -2002_in_NASCAR_Craftsman_Truck_Series 2016-01-01 12 -Aces_of_ANSI_Art 2015-08-01 77 -Alan_Tskhovrebov 2015-08-01 13 -Aegis_Security 2015-10-01 1 -Alec_the_Great 2015-05-01 69 -Corel_SnapFire 2016-11-01 9 -AbdulMagid_Breish 2016-03-01 276 -A_Night_in_NYC 2015-10-01 232 -79th_parallel_south 2016-11-01 17 -Alphonse_Crespo 2016-06-01 50 -Acacia_petite_feuille 2016-05-01 1 -Amstrad_464 2017-12-01 18 -Charles_County,_Maryland 2017-06-01 2079 -1972_outbreak_of_smallpox_in_Yugoslavia 2018-03-01 375 -Alungili 2017-09-01 37 -Brontispalaelaps_froggatti 2016-04-01 1 -Alison_Lacey 2016-12-01 94 -Alessandro_Capra 2017-07-01 21 -2012_UCF_Knights_baseball_team 2016-08-01 46 -16_Candles_Down_the_Drain 2017-05-01 2 -Anandra_strandi 2015-08-01 11 -Brigitte_Rohde 2017-01-01 9 -Agenda_VR3 2015-09-01 93 -1641_in_architecture 2015-11-01 32 -ALF_Tales 2016-04-01 280 -A_Woman_Scorned 2015-07-01 164 -Air-free_techniques 2016-04-01 5 -1973_in_British_television 2016-04-01 96 -All_Saints_Cemetery 2017-04-01 345 -1981_in_Swedish_football 2016-06-01 21 -Apple_Dictionary 2016-10-01 19 -2015_PBZ_Zagreb_Indoors 2016-08-01 121 -16th_IIFA_Awards 2017-02-01 1194 -Duki,_Pakistan 2016-04-01 14 -Administration_of_Borderchek_points,_Population_and_Immigration 2015-09-01 2 -Alonia,_Zante 2017-10-01 1 -African_United_Club 2017-10-01 50 -Burjanadze-Democrats 2016-04-01 19 -Application_software_development 2015-06-01 27 -Almonacid_de_la_Sierra,_Zaragoza 2015-06-01 1 -Baissour 2016-12-01 100 -Coti_Sorokin 2016-04-01 46 -Alberta_and_Great_Waterways_Railway_scandal 2017-05-01 70 -1942_Alabama_Crimson_Tide_football_team 2015-09-01 144 -Adam_Art_Gallery 2016-08-01 80 -Akshinski_Raion 2016-09-01 1 -Edwin_of_Deira 2015-07-01 34 -Altaf_Mahmud 2015-10-01 245 -Astana_cycling_team 2017-12-01 7 -1982_CART_World_Series_season 2015-12-01 3 -3_Rotaxane 2017-03-01 1 -1924_Eastern_Suburbs_season 2015-08-01 32 -Downtown_Science 2016-11-01 6 -1993-94_Slovak_Cup 2017-04-01 1 -Brandon_Wayne_Hedrick 2016-04-01 32 -2015_Brasil_Open 2016-01-01 403 -Aung_Pinle_Hsinbyushin 2016-02-01 69 -An_Numaniyah 2016-06-01 185 -24th_Arkansas_Infantry_Regiment 2016-03-01 64 -Adimchinobe_Echemandu 2017-05-01 90 -August_Belmont,_Jr 2017-06-01 8 -Empacher 2016-11-01 102 -Abdulkadir_Sheikh_Dini 2017-01-01 70 -Alvaro_Quiros 2017-08-01 12 -Algernon_May 2017-11-01 35 -Athol_Shmith 2016-02-01 188 -2004_Indesit_ATP_Milan_Indoor_-_Doubles 2015-09-01 1 -Alfred_Dennis 2016-11-01 9 -2nd_Medical_Battalion 2017-05-01 380 -Atom_clocks 2016-03-01 12 -368th_Expeditionary_Air_Support_Operations_Group 2015-06-01 48 -1911_Washington_Senators_season 2017-06-01 46 -1963_Night_Series_Cup 2015-07-01 26 -Aromobates_capurinensis 2017-12-01 21 -2013-14_Super_Lig 2017-05-01 14 -Al_taglio 2016-09-01 2 -2015_RBC_Tennis_Championships_of_Dallas 2016-04-01 18 -2011_Mirabella_Cup 2017-11-01 15 -1996_NHL_Western_Conference_Final 2015-06-01 1 -2009_Formula_Nippon_Championship 2016-11-01 44 -Information_security_awareness 2017-01-01 56 -A_Noiseless_Patient_Spider 2018-03-01 757 -Aggregate_field_theory 2017-06-01 3 -Armenians_in_Central_Asia 2015-10-01 351 -Acona,_Mississippi 2017-10-01 33 -Apozomus 2017-12-01 19 -Antwun_Echols 2016-11-01 87 -1949_Albanian_Cup 2016-11-01 11 -Aesychlus 2016-10-01 4 -1961_Pulitzer_Prize 2015-09-01 879 -East_Midlands_Conference_Centre 2016-04-01 13 -Blumen 2016-11-01 11 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_custom.txt b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_custom.txt deleted file mode 100644 index 2d4626327a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_custom.txt +++ /dev/null @@ -1 +0,0 @@ -row('Akiba_Hebrew_Academy';'2017-08-01';241),row('Aegithina_tiphia';'2018-02-01';34),row('1971-72_Utah_Stars_season';'2016-10-01';1),row('2015_UEFA_European_Under-21_Championship_qualification_Group_8';'2015-12-01';73),row('2016_Greater_Western_Sydney_Giants_season';'2017-05-01';86),row('AAA_Americas_Trios_Championship';'2015-10-01';104),row('1420_in_literature';'2016-05-01';20),row('Adair,_Beegie';'2017-08-01';2),row('1980_Rugby_League_State_of_Origin_match';'2017-07-01';2),row('Column_of_Santa_Felicita,_Florence';'2017-06-01';14) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_headers.csv b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_headers.csv deleted file mode 100644 index f01c7b864df..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/data_small_headers.csv +++ /dev/null @@ -1,1001 +0,0 @@ -"path","month","hits" -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -"AAA_Americas_Trios_Championship","2015-10-01",104 -"1420_in_literature","2016-05-01",20 -"Adair,_Beegie","2017-08-01",2 -"1980_Rugby_League_State_of_Origin_match","2017-07-01",2 -"Column_of_Santa_Felicita,_Florence","2017-06-01",14 -"2007_Copa_America","2016-07-01",178 -"Car_dealerships_in_the_USA","2015-07-01",11 -"Dihydromyricetin_reductase","2015-07-01",1 -"ATCvet_code_QB05BB01","2017-04-01",1 -"City_CarShare","2017-01-01",125 -"Heidenrod","2017-01-01",10 -"Arthur_Henrique","2016-11-01",12 -"Alan_Ebnother","2015-11-01",66 -"2013_UConn_football_season","2017-05-01",2 -"2008_American_League_Division_Series","2016-12-01",376 -"Antilipaemic","2017-09-01",12 -"Aberzombie","2016-12-01",28 -"2008_Asian_Wrestling_Championships","2016-12-01",76 -"Federal_Correctional_Complex,_Pollock","2017-01-01",19 -"Central_body","2015-07-01",32 -"Binbrook,_Ontario","2015-07-01",446 -"Azerbaijan_at_the_2016_Judo_Grand_Prix_Samsun","2016-10-01",25 -"Ashford_Lake","2017-10-01",80 -"1942_Joint_Strike","2015-12-01",3 -"AFC_Youth_Championship_2012","2017-10-01",2 -"Akhira","2016-07-01",64 -"Arroniro_Arlieri","2016-10-01",1 -"Alesheim_Burgsalach","2015-05-01",2 -"2700_classic","2017-05-01",4 -"ARX-8_Laevatein","2015-06-01",14 -"1991_Newsweek_Champions_Cup_-_Singles","2017-06-01",3 -"Aphelandra_sinclairiana","2017-07-01",69 -"Asia_Kong","2015-10-01",2 -"2012_Internazionali_Tennis_Val_Gardena_Sudtirol","2016-02-01",1 -"24_Carat_Purple","2017-06-01",476 -"Acroliths","2017-12-01",9 -"Bundesautobahn_3","2016-04-01",264 -"ATC_code_S01AX21","2016-09-01",1 -"Allington,_Lincolnshire","2015-11-01",188 -"Acer_Aspire_One","2017-06-01",5169 -"ATC_code_L04AC","2015-06-01",1 -"1969_New_Year_Honours","2017-07-01",269 -"Antonio_Napolitano","2017-11-01",44 -"Amberfish","2017-10-01",11 -"1976_Cam_2_Motor_Oil_400","2018-03-01",45 -"April_25,_2017","2018-01-01",2 -"Akahori_Station","2016-06-01",11 -"Abducens_palsy","2016-05-01",28 -"Ancona_cathedral","2018-01-01",2 -"Ajou_Motor_College","2017-02-01",83 -"Brad_Skyes","2016-11-01",1 -"Alegro_PCS","2017-07-01",157 -"Franz_Dunshirn","2017-01-01",1 -"Arthur_Godfrey_Road","2016-11-01",3 -"Ab_Golman","2017-05-01",30 -"Art_in_early_modern_Scotland","2016-03-01",98 -"1968_World_Series","2016-02-01",1960 -"1828_in_the_UK","2017-08-01",3 -"Explorer-1_Prime_Unit_2","2016-11-01",11 -"2014_Desafio_Internacional_das_Estrelas","2017-12-01",31 -"Ambulyx_subocellata","2016-08-01",1 -"2008_Hamilton_Tiger-Cats_season","2015-11-01",153 -"Deuterogamist","2015-07-01",5 -"Art_Nouveau_furniture","2017-12-01",839 -"Allison,_Colorado","2015-10-01",85 -"2014_MLS_Re-Entry_Draft","2017-09-01",36 -"Amiot_353","2015-12-01",8 -"ACLU_of_Massachusetts","2015-11-01",106 -"Altable,_Spain","2016-10-01",1 -"Agnidra_scabiosa","2016-12-01",16 -"Dictyotremella_novoguineensis","2015-07-01",1 -"Compiler_Construction","2015-07-01",42 -"Aufheben","2016-11-01",1080 -"Avafauna","2017-06-01",17 -"Atheist_billboard","2017-01-01",19 -"2011_Indonesia_Super_League_All-Star_team","2015-11-01",15 -"BrahMos_II","2015-07-01",31 -"1707_in_art","2016-04-01",17 -"Aeromarine_Model_60","2016-06-01",34 -"Ayatollah-al-ozma","2015-06-01",12 -"Exanimus","2017-01-01",4 -"Anderby","2017-01-01",29 -"Ashgabat_indoor_tennis_arena","2017-07-01",27 -"1971_Rose_Bowl","2015-12-01",961 -"2004_HR56","2016-05-01",5 -"1886_in_South_Africa","2016-03-01",70 -"Bishop_of_Central_Newfoundland","2016-04-01",1 -"Alice_Rivlin","2016-09-01",1137 -"Arriba_en_la_Cordillera","2017-06-01",39 -"Adam_Lively","2016-06-01",77 -"Colasposoma_fairmairei_fairmairei","2017-06-01",5 -"Archie_Barton","2017-02-01",49 -"Aharon_wasserman","2016-01-01",7 -"Alabama_Educational_Television_Commission","2017-05-01",3 -"Advanced_Technology_Bomber","2016-02-01",67 -"1-krona","2017-01-01",4 -"Ahmadabad-e_Kalij-e_Sofla","2017-01-01",3 -"Bob_Dolman","2016-11-01",245 -"Bellevue,_French_Guiana","2017-01-01",5 -"Bison_Nickel","2017-01-01",2 -"Arthur_Drabble","2016-12-01",35 -"Edgewater_Borough,_New_Jersey","2016-11-01",3 -"Alberto_Cambrosio","2017-11-01",31 -"Amalia_Celia_Figueredo","2017-07-01",32 -"1989_-_1992_Rugby_League_World_Cup","2016-01-01",10 -"Admir_Seferagic","2016-06-01",7 -"Adriaan_Loosjes","2015-05-01",46 -"Alfred_Manuel_Martin","2015-06-01",3 -"Academy_of_the_Arabic_Language","2015-08-01",67 -"Ankita_Shrivastav","2018-01-01",7430 -"Anarchism_in_asia","2017-11-01",1 -"Batiquitos_Lagoon_State_Marine_Conservation_Area","2015-07-01",18 -"Alstonia_calophylla","2017-12-01",2 -"4-Hydroxycyclohexanecarboxylate_dehydrogenase","2016-11-01",4 -"832_symmetry","2017-09-01",6 -"1931_Fuyun_earthquake","2016-07-01",64 -"1998_Masters_of_Formula_3","2016-01-01",60 -"2011_LG_Hockey_Games","2016-04-01",7 -"Generalized_pustular_psoriasis","2017-01-01",159 -"2013_European_Cup_Winter_Throwing","2016-07-01",56 -"2008_in_Argentina","2017-06-01",48 -"Apostrophized","2017-10-01",5 -"Algebraically_compact_module","2017-01-01",5 -"Askett","2015-10-01",79 -"2009_swine_flu_outbreak_timeline","2015-08-01",65 -"72704-01-9","2017-12-01",4 -"Alexandre-Charles-Albert-Joseph_Renard","2017-11-01",4 -"Acyl-CoA_oxidase","2016-09-01",250 -"2011_Budweiser_Shootout","2015-08-01",109 -"Augusta_Davies_Webster","2015-07-01",2 -"Association_theory","2017-07-01",112 -"Abemama_Airfield","2015-05-01",8 -"Archaeological_Museum_of_Heraklion","2015-10-01",14 -"Authorized_marches_of_the_Canadian_Armed_Forces","2016-11-01",241 -"1986_in_Portugal","2017-01-01",7 -"Antiziganism_in_Bulgaria","2017-12-01",13 -"Adriana_Martin","2015-09-01",21 -"2004_Green_Bay_Packers_season","2015-05-01",970 -"Agrippa_the_Sceptic","2017-11-01",95 -"Admiral_Island","2016-04-01",1 -"Auxiliary_sign_language","2015-06-01",31 -"2013_Food_City_500","2015-06-01",90 -"Andy_Roesch","2015-08-01",15 -"Alsoszentivan","2017-05-01",4 -"Architecture_of_Belgium","2015-05-01",199 -"1_South_African_Infantry","2017-06-01",5 -"1930_Auburn_Tigers_football_team","2016-12-01",39 -"1860_in_Canada","2017-05-01",269 -"Aldeaseca_de_la_Frontera","2018-03-01",21 -"Elijah_Fox_Cook","2015-07-01",13 -"2010_BCS_Bowl_Games","2016-03-01",1 -"2017_NPSL_season","2017-06-01",2806 -"Bank_of_New_South_Wales_v_Commonwealth","2016-12-01",173 -"American_Enterprise_Association","2016-02-01",4 -"26th_Kentucky_Derby","2018-03-01",1 -"Chaldean_Diocese_of_Amid","2016-11-01",18 -"Ajaran_language","2016-03-01",1 -"1992_Texas_Rangers_season","2017-06-01",113 -"26_SAS","2017-12-01",3 -"2015_Terengganu_FA_season","2016-01-01",537 -"Aagard,_Oregon","2017-03-01",3 -"Auberry,_CA","2017-05-01",13 -"American_Eskimo_spitz","2015-09-01",3 -"Antidiabetic","2016-11-01",75 -"Asinius","2017-11-01",26 -"Andrey_Vasilievich_Abramov","2016-10-01",1 -"Alan_Carrington","2018-03-01",91 -"Colebrook,_Ontario","2017-06-01",2 -"Abbasabad-e_Kheyrabad","2015-08-01",24 -"Arandjelovac_Municipality","2016-02-01",1 -"Aloysius_Valente","2017-12-01",11 -"Almondo_Curry","2016-03-01",86 -"4th_century_AD","2017-03-01",13 -"Askhat_Dilmukhamedov","2016-02-01",77 -"1147_AD","2017-05-01",1 -"1953_Torneo_di_Viareggio","2017-03-01",20 -"ATP_Schenectady","2015-12-01",30 -"Lakarian_City","2017-01-01",3 -"Adam_Ferency","2017-12-01",176 -"AugustDvorak","2016-07-01",5 -"97th_Light_Infantry_Division","2017-07-01",1 -"16th_Connecticut_Infantry_Regiment","2016-05-01",146 -"2011_Somalian_drought","2017-05-01",2 -"Anbargah","2017-12-01",8 -"1921_in_Paraguayan_football","2016-03-01",2 -"Cosmetic_dermatitis","2017-01-01",5 -"Annunciation_Greek_Orthodox_Cathedral,_Atlanta,_Georgia","2015-09-01",9 -"1300_AM","2016-07-01",106 -"A_Promising_Africa","2016-03-01",41 -"2015-16_Odense_Bulldogs_season","2016-10-01",1 -"Aral_AG","2017-12-01",1446 -"Angel_Vivar_Dorado","2015-12-01",6 -"1951_Australian_Championships","2018-03-01",32 -"DJMax_Portable_Hot_Tunes","2017-01-01",27 -"Allinge","2017-03-01",32 -"1986_Buick_WCT_Finals","2016-11-01",14 -"Arimatsu,_Aichi","2015-06-01",112 -"Arthur_Berzinsh","2017-02-01",249 -"Apolima_Uta","2017-04-01",23 -"Capitol_Hill_Pride_Festival","2015-07-01",19 -"Kara-Murza","2017-01-01",5 -"Aigleville,_Alabama","2015-11-01",19 -"Abdullah_bin_al-Hussein","2017-02-01",1 -"2017-18_Inter_Milan_season","2018-03-01",26 -"African_Collared_Dove","2016-07-01",10 -"Achaea_dmoe","2016-11-01",3 -"Aurora,_Utah","2016-06-01",201 -"Architecture_in_Portland,_OR","2017-07-01",1 -"Charchala","2015-07-01",4 -"Around_the_Roses","2015-07-01",3 -"1965_in_music","2016-12-01",3394 -"Alojzije_Jankovic","2017-04-01",5 -"Arisu_Seno","2015-08-01",6 -"ALCO_T-6","2017-01-01",77 -"1998_Royal_Bank_Cup","2015-12-01",32 -"1956_Claxton_Shield","2016-11-01",9 -"Anita_Dube","2017-07-01",233 -"Anderson_Windows","2015-05-01",13 -"Annaquatucket_River","2018-03-01",38 -"Black_salve","2017-01-01",1496 -"Anna_Pendleton_Schenck","2017-02-01",11 -"Asghar_Nadeem_Syed","2017-07-01",146 -"Disarming","2016-11-01",5 -"Antarctic_ice_cap","2017-08-01",7 -"Antonio_Ottone","2017-05-01",11 -"Coralie_Larnack","2017-01-01",9 -"Budha_Subba_Gold_Cup","2016-11-01",24 -"Amphoe_Chaiya","2017-03-01",9 -"Anarcho-capitalism_in_Somalia","2016-10-01",7 -"Felix_Loch","2017-01-01",131 -"26508_Jimmylin","2017-12-01",3 -"Andrew_McMillen","2015-11-01",134 -"Dundee_Canal_Industrial_Historic_District","2017-01-01",2 -"Aula_Baratto","2015-12-01",140 -"Church_of_St_Mary,_Knowsley","2015-07-01",1 -"Aggelakis","2017-10-01",1 -"Al_Badiyah","2017-11-01",157 -"Assault_Gunboat","2016-03-01",21 -"Lachau","2017-01-01",4 -"2008_Pittsburgh_Steelers_season","2016-12-01",10018 -"Apolychrosis_candidus","2018-01-01",24 -"Andrei_Krylov","2017-02-01",192 -"Aldesh_Vadher","2018-02-01",7 -"Alwand","2017-02-01",7 -"Edward_Baker_Lincoln","2015-07-01",4347 -"Aermotor_Corporation","2017-11-01",4 -"Aischylos","2017-01-01",7 -"6th_Assault_Aviation_Corps","2017-07-01",100 -"Azygos_lobe","2016-10-01",1598 -"Demirciler,_Nazilli","2015-07-01",4 -"Akhlaq-e-Hindi","2016-11-01",13 -"Dragon_Crusaders","2016-04-01",122 -"25V_USB","2016-01-01",1 -"Calliophis_melanurus","2017-01-01",31 -"Antonionian","2016-10-01",15 -"Ashley_Richardson","2017-09-01",1216 -"1st_Observation_Group","2018-01-01",6 -"Andrzej_Bargiel","2015-05-01",97 -"2008_AFL_National_Under_16_Championships","2018-03-01",20 -"Ammon_Bundy","2016-09-01",11890 -"Benno_Wandolleck","2016-11-01",5 -"Aero-Kros_MP-02_Czajka","2016-03-01",136 -"A6005_road","2015-10-01",14 -"Eagle_Eye_Networks","2015-07-01",101 -"Aarberg","2017-12-01",277 -"Encyclopedia_of_anthropology","2015-07-01",1 -"Duncormick_railway_station","2016-11-01",7 -"Aiqing_huajiao_zhuanyi","2017-03-01",1 -"Crude_oil_washing","2016-04-01",466 -"2010_Indiana_Hoosiers_football_team","2017-06-01",90 -"Book_of_Bodley_Head_Verse","2015-07-01",18 -"Absence_seizure","2016-05-01",18152 -"Cayucupil","2016-04-01",3 -"Akanabee","2017-03-01",1 -"Grooved_consonant","2017-01-01",5 -"Dellamora_philippinensis","2015-07-01",7 -"Dejan_Blazevski","2017-01-01",1 -"Arabis_armena","2016-08-01",25 -"1988_Summer_Paralympics_medal_table","2016-12-01",90 -"2012-13_Basketball_Championship_of_Bosnia_and_Herzegovina","2017-04-01",2 -"1966_in_music","2017-10-01",3510 -"Antti_Tyrvainen","2015-12-01",2 -"African_desert","2016-06-01",262 -"Bruneau_mariposa_lily","2016-04-01",1 -"Bernie_Parmalee","2017-06-01",221 -"2015_South_American_Youth_Football_Championship_squads","2015-09-01",594 -"1985_IIHF_World_U20_Championship","2015-08-01",7 -"18th_British_Academy_Film_Awards","2018-02-01",270 -"523_Ada","2016-04-01",35 -"Active_Pharmaceutical_Ingredients","2016-02-01",5 -"Burley,_ID_mSA","2015-07-01",2 -"CFRN-TV-10","2017-06-01",2 -"1982_Super_Bowl_of_Poker","2017-08-01",38 -"Australian_Journal_of_Educational_Technology","2017-01-01",1 -"2013_Super_League_Grand_Final","2016-06-01",212 -"2006_BCR_Open_Romania","2015-06-01",25 -"Charlestown_Townies","2016-04-01",319 -"1943_Polish_underground_raid_on_East_Prussia","2017-08-01",8 -"Anthony_Celestino","2018-02-01",182 -"Andrew_Beerwinkel","2018-02-01",73 -"Greigia_atrobrunnea","2017-01-01",1 -"Adrian_Beecham","2017-11-01",1 -"Implementation_of_mathematics_in_set_theory","2017-01-01",12 -"Annastacia_Palaszczuk","2015-05-01",6247 -"Egon_Zimmermann_II","2016-11-01",3 -"Air_aide-de-camp","2018-03-01",137 -"Albert_Murphy","2016-09-01",1 -"1924_Arkansas_Razorbacks_football_team","2016-02-01",28 -"Avondale_Mill","2016-10-01",68 -"Alexander_Volzhin","2015-12-01",25 -"Arek_Monthly","2017-08-01",31 -"Dinka_Blanche","2015-07-01",1 -"1921_Mercer_Baptists_football_team","2016-11-01",10 -"Afro-Antiguan_and_Barbudan","2016-06-01",252 -"American_southern_literature","2016-10-01",3 -"1947_Swiss_Grand_Prix","2016-11-01",32 -"99p_Stores","2017-12-01",3028 -"Artem_Radkov","2018-03-01",21 -"Arctic_brome","2016-12-01",19 -"Battle_Of_Moskova","2015-06-01",6 -"Airdrieonians","2016-06-01",32 -"Advanced_transportation_controller","2018-03-01",79 -"BC_government","2016-12-01",18 -"Antonio_Maura","2017-03-01",457 -"Anjuman,_Afghanistan","2017-09-01",62 -"Deodato_Guinaccia","2015-07-01",13 -"Blowjob_Betty","2016-11-01",28 -"453d_Flying_Training_Squadron","2017-08-01",3 -"1990_Africa_Cup_of_Nations","2016-04-01",22 -"Agenville","2016-08-01",100 -"1202_in_Scotland","2018-01-01",82 -"Calytrix_desolata","2017-06-01",10 -"1957_in_Chile","2016-04-01",13 -"Anglican_Bishop_of_Torres_Strait_people","2017-08-01",1 -"2015_Mexican_Grand_Prix","2015-06-01",528 -"Catalan_parliament","2017-01-01",14 -"Cult_Shaker","2017-01-01",32 -"Ander_Gayoso","2016-11-01",34 -"Ageneiosus_ucayalensis","2017-12-01",20 -"Club_de_Berne","2015-07-01",194 -"Adecco","2016-03-01",9863 -"Anti-unionism","2018-01-01",11 -"Auchindoun_Castle","2017-01-01",102 -"557_in_poetry","2016-07-01",1 -"Abu_ol_Verdi_Rural_District","2017-01-01",1 -"Centro_73","2016-04-01",23 -"Dagger_compact_category","2016-04-01",97 -"Alan_Nunn_May","2017-11-01",770 -"Basal_clade","2015-07-01",44 -"Aizu_Line","2015-08-01",26 -"Edward_Kernan_Campbell","2016-04-01",5 -"865_area_code","2016-12-01",9 -"Bahamas_at_the_1984_Summer_Olympics","2017-06-01",35 -"Gardan_Kalat","2017-01-01",1 -"American_Samoa_national_under-19_football_team","2017-12-01",4 -"Kayah_National_United_League","2017-01-01",14 -"2007_Nordea_Nordic_Light_Open_-_Singles","2016-10-01",2 -"Avondale_Estate","2016-11-01",2 -"Acalolepta_variolaris","2017-02-01",3 -"Anantapur,_Andhra_Pradesh","2017-05-01",1032 -"Amenable_Banach_algebra","2015-08-01",59 -"300_metres","2017-01-01",61 -"Black_Bottom,_Kentucky","2016-04-01",8 -"100_Players_Who_Shook_The_Kop","2018-01-01",1133 -"Adventure_story","2015-07-01",29 -"Anacampsis_lignaria","2017-05-01",5 -"2007_American_Indoor_Football_Association_season","2015-09-01",89 -"Dmitry_Kardovsky","2016-04-01",33 -"A10_autoroute","2015-11-01",27 -"1995_Sydney_Bulldogs_season","2017-04-01",40 -"Ilex_jelskii","2017-01-01",2 -"Adrian_Jose_Hernandez","2016-10-01",2 -"CallAir_A-5","2016-11-01",4 -"22nd_meridian_west","2015-07-01",45 -"Anglican_Diocese_of_Antananarivo","2015-08-01",2 -"Andrew_Kelsey","2016-11-01",14 -"Brownhill_Creek","2017-06-01",4 -"Abunai_Deka","2015-06-01",269 -"Aisha_Jefferson","2017-04-01",115 -"Alonso_Lopez","2017-03-01",7 -"Aeroparque_Ciudad_de_Mendoza","2016-01-01",1 -"Arthur_Ashley_Sykes","2017-12-01",45 -"Holy_Face_Medal","2017-01-01",20 -"1Chronicles","2018-02-01",1 -"2014_CFU_Club_Championship","2017-12-01",108 -"Aetna_class_ironclad_floating_battery","2015-06-01",37 -"Antoine_Delrio","2015-07-01",2 -"Chislet_Windmill","2015-07-01",38 -"Aerojet_SD-2","2017-07-01",59 -"Age_role_play","2015-09-01",2 -"50687_Paultemple","2018-03-01",8 -"1997-98_Cuban_National_Series","2017-02-01",1 -"Aleksandr_Borisovich_Belyavskiy","2017-10-01",42 -"Carol_MacReady","2017-01-01",111 -"18th_Chess_Olympiad","2015-06-01",134 -"Clara_Schonfeld","2015-07-01",1 -"Apollonius_of_Athens","2017-02-01",35 -"ABC_80","2018-03-01",603 -"Apatelodes_damora","2015-08-01",22 -"Ernest_Walbourn","2016-04-01",30 -"428_BCE","2017-04-01",2 -"72nd_Seaforth_Highlanders","2017-12-01",29 -"Broughton_Hackett","2015-07-01",38 -"A_Fazenda_2","2016-12-01",56 -"ATCvet_code_QJ01MQ","2017-05-01",2 -"Abura,_Iran","2017-03-01",3 -"DeLeon_Independent_School_District","2015-07-01",1 -"Abby_aldrich","2016-09-01",1 -"Cinema_One_Originals","2016-11-01",359 -"2013_European_Short_Course_Swimming_Championships","2017-09-01",124 -"Ars_technica","2015-11-01",442 -"AMS_Production_Company","2016-02-01",1 -"Joao_Soares","2017-01-01",1 -"Cervical_vertebra_6","2017-06-01",45 -"Kevin_Pugh","2017-01-01",2 -"Alpha-1_antitrypsin","2015-11-01",11845 -"Assyrians_in_iran","2017-07-01",53 -"Boophis_ankarafensis","2016-11-01",2 -"A_View_To_a_Kill","2018-01-01",4 -"Charles_Edouard_Brown-Sequard","2015-07-01",7 -"1919_in_Ireland","2017-04-01",239 -"74th_Foot","2015-06-01",3 -"9275_Persson","2016-07-01",22 -"Dalcerides_mesoa","2015-07-01",11 -"A_Summer_Bird-Cage","2016-03-01",248 -"2011_NAB_Cup","2017-10-01",127 -"13th_Parliament_of_Lower_Canada","2015-08-01",41 -"2011_Players_Championship_Finals","2015-07-01",25 -"Flag_of_Tenerife","2017-01-01",128 -"Hypopta_corrientina","2017-01-01",1 -"Jalatarangam","2017-01-01",16 -"Adjoint_endomorphism","2018-01-01",330 -"Anime_conventions","2015-06-01",18 -"2004_Grammy_Award","2015-06-01",13 -"American_war","2015-07-01",80 -"Beynes,_Yvelines","2016-11-01",32 -"Agriculture_Department","2016-06-01",16 -"Andrey_Chisty","2015-10-01",58 -"Ait_Yahia_Moussa","2017-08-01",7 -"Alfred_Blau","2017-03-01",57 -"1869_in_sports","2017-08-01",73 -"Ambolodia_Sud","2016-04-01",6 -"Animal_slaughter","2017-06-01",6423 -"Adamowka_Commune","2018-01-01",2 -"Arsenic_pentachloride","2016-03-01",467 -"220_BCE","2016-01-01",3 -"863d_Engineer_Battalion","2015-11-01",160 -"Amer_Abu-Hudaib","2017-04-01",31 -"Aaina_tv","2017-08-01",3 -"Arnhem,_Netherlands","2015-08-01",67 -"Antoine_de_sartine","2015-08-01",4 -"ATC_code_A16","2016-01-01",155 -"Eastern_Front","2017-01-01",70 -"Ashy-headed_tyrannulet","2016-12-01",44 -"Aoheng_language","2015-08-01",64 -"1996_World_Junior_Canoe_Slalom_Championships","2017-11-01",15 -"Agriophara_nodigera","2017-11-01",12 -"Amsterdam_Island_cattle","2015-12-01",675 -"Aliyah_from_the_Soviet_Union_in_the_1990s","2017-08-01",54 -"Abandoned_and_Little_Known_Airfields","2018-01-01",2 -"Church_numerals","2015-07-01",57 -"Ankeny_Christian_Academy","2015-09-01",74 -"2010_FIFA_World_Cup_qualification_-_AFC_First_Round","2017-06-01",58 -"1ESS_switch","2015-07-01",514 -"Chelys_boulengerii","2016-04-01",1 -"Bivalent_logic","2016-11-01",25 -"Ivan_Skavinsky_Skavar","2017-01-01",1 -"Fergus_Sings_the_Blues","2016-04-01",62 -"2015-16_Libyan_Premier_League","2017-02-01",4 -"Dutch_Chess_Championship","2017-01-01",35 -"Every_Man_in_His_Humor","2016-11-01",1 -"2008_Allstate_BCS_National_Championship_Game","2015-08-01",11 -"Aq_Tappeh,_Hamadan","2015-09-01",25 -"Agrotractor","2016-02-01",1 -"Alexander_of_Pfalz-Zweibrucken","2017-12-01",2 -"2003_Mistral_World_Championships","2016-04-01",6 -"146th_Fighter-Interceptor_Wing","2015-11-01",49 -"Al-Qahir","2016-04-01",328 -"25604_Karlin","2015-05-01",20 -"Allen_taflove","2017-12-01",3 -"Aretha_Thurmond","2017-05-01",109 -"Atlanta_and_lagrange_rail_road","2015-07-01",1 -"ACSI_College_Iloilo","2015-10-01",1 -"Alan_Sacks","2015-07-01",150 -"African_Desert_Warbler","2017-02-01",11 -"A_Man_and_His_Soul","2018-02-01",89 -"ASCII_ART","2015-05-01",9 -"1992-93_VMI_Keydets_basketball_team","2016-10-01",1 -"George_and_the_Dragon","2017-01-01",18 -"2012_NAB_Cup","2016-12-01",99 -"1965_Indy_500","2016-05-01",51 -"Forest_Glen,_Nova_Scotia","2016-04-01",9 -"A_Critical_Dictionary_of_English_Literature","2016-08-01",4 -"Aquion_Energy","2015-08-01",1077 -"Alibeyce,_Emirdag","2017-09-01",1 -"Blauhu00F6hle","2015-07-01",1 -"Ian_Sommerville","2017-01-01",1 -"Air_propulsion","2017-07-01",474 -"2016_12_Hours_of_Sebring","2016-10-01",187 -"Asites","2017-07-01",4 -"Al-Kini","2017-03-01",1 -"Austin_Aztex_2009_season","2016-03-01",10 -"Alto_Vista_Chapel","2015-12-01",833 -"Abecedaria","2017-04-01",22 -"Farm_to_Market_Road_2503","2016-11-01",3 -"Anglican_Bishop_of_The_Leeward_Islands","2015-09-01",2 -"Basketball_at_the_2011_Pan_American_Games","2017-06-01",120 -"Angela_Peel","2016-08-01",7 -"Amber_Frey","2018-02-01",728 -"Afraid_to_Sleep","2017-06-01",51 -"ATC_code_A02BA","2018-02-01",7 -"Apateon_pedestris","2015-11-01",5 -"Alois_Estermann","2015-12-01",1155 -"1752_in_science","2016-01-01",78 -"Baldassin","2017-06-01",3 -"Camilla_Hildegarde_Wedgwood","2017-01-01",1 -"B-A-C-H_motive","2016-10-01",3 -"AI_Velorum_star","2016-09-01",1 -"Ali_Zayn_al-Abidin","2017-04-01",71 -"Ailurarctos_lufengensis","2015-07-01",1 -"Clearview,_Philadelphia","2017-06-01",67 -"Adam_Sender","2016-08-01",759 -"Apriona_paucigranula","2018-02-01",7 -"Dark_at_the_Top_of_the_Stairs","2015-07-01",10 -"Acanthio","2017-12-01",11 -"1980_Labatt_Brier","2018-01-01",111 -"2016-17_New_York_Knicks_season","2017-10-01",21 -"1995_CAF_Cup","2015-10-01",48 -"Boiled_linseed_oil","2016-04-01",79 -"2015_Kumanovo_clashes","2016-07-01",6 -"David_Jamieson","2017-01-01",3 -"1915_Florida_Gators_football_team","2015-08-01",32 -"2010-11_New_Zealand_Football_Championship","2017-03-01",1 -"Ashley_Church","2015-08-01",27 -"Acanthoxylini","2017-06-01",27 -"American_Hindu","2016-10-01",33 -"Amylosporomyces","2015-12-01",20 -"2007_Southeast_Asia_Basketball_Association_Championship","2018-01-01",1 -"Aethelred_I","2017-08-01",1 -"2-methyl-GPP_synthase","2018-02-01",1 -"Dave_Aspin","2016-11-01",6 -"Descent_of_the_Nine","2016-04-01",1 -"2010_Kleen_Energy_Systems_disaster","2017-08-01",3 -"1978_in_Japanese_television","2017-08-01",70 -"Alexandros_Falekas","2018-01-01",1 -"1910_in_Afghanistan","2016-02-01",32 -"Abd-ru-shin","2017-09-01",681 -"610_in_poetry","2017-05-01",3 -"2015_arrests_of_FIFA_officials","2017-12-01",46 -"ATmega328P","2017-09-01",26 -"A_G_Mathews","2017-12-01",3 -"Attack_on_Mers-el-Kebir","2016-12-01",511 -"2016_in_Estonia","2016-05-01",89 -"Adidas-Salomon","2015-09-01",574 -"Education_and_Skills_Act_2008","2016-11-01",141 -"1789_in_the_United_States","2015-07-01",845 -"Apple_Computer_advertising","2015-09-01",7 -"9th_US_Army","2016-12-01",17 -"Ad_Rotas","2016-02-01",16 -"Agios_Ioannis,_Paphos","2018-03-01",97 -"Arabian_toad","2017-12-01",100 -"Anterior_pituitary_acidophil","2016-06-01",47 -"Arguello,_Christine","2017-12-01",3 -"Amilkar_Ariza","2017-03-01",67 -"Charles_Grierson","2016-11-01",14 -"Achi,_Bolivar","2017-11-01",1 -"Exonym_and_endonym","2017-01-01",1712 -"Abdul_Maroof_Gullestani","2017-12-01",20 -"Fairlawne_Handicap_Chase","2016-04-01",11 -"1963_Virginia_Tech_Hokies_football_team","2016-07-01",6 -"AE_Clarke","2017-12-01",3 -"ALFA-PROJ_Model_3563_sport","2017-10-01",2 -"Aleks_Vanderpool-Wallace","2018-02-01",32 -"Antioxident","2017-05-01",16 -"Calliope_Project","2015-07-01",3 -"Anderson_World","2017-10-01",5 -"Amydria_selvae","2017-11-01",6 -"Antoni_Katski","2016-09-01",1 -"Bera_District","2017-06-01",85 -"80_South_Street_New_Design","2016-07-01",86 -"Askizsky","2015-08-01",2 -"Amausi_metro_station","2015-11-01",44 -"9486_Utemorrah","2017-04-01",5 -"Army_CIS","2018-01-01",2 -"1851_Chilean_Revolution","2017-06-01",255 -"Jens_Robert_Dahlqvist","2017-01-01",6 -"1966-67_Tercera_Division","2017-05-01",1 -"Chanel_Iman","2017-06-01",9434 -"Astydamia","2017-06-01",34 -"1944_in_Belgium","2016-09-01",27 -"Acton_Baronets,_of_Aldenham","2017-01-01",1 -"2014_FBS_season","2016-12-01",5 -"2016_Winter_Youth_Olympics","2017-09-01",2090 -"1903_Clemson_Tigers_football_team","2017-06-01",50 -"2014_Taca_da_Liga_Final","2017-04-01",2 -"10th_Alberta_general_election","2016-11-01",4 -"Edertalschule_Frankenberg","2016-04-01",16 -"4th_Punjab_Infantry_Regiment","2017-09-01",136 -"America_Air_Linhas_Aereas","2018-02-01",1 -"Australian_Liberal_Party","2015-06-01",146 -"American_licorice","2017-05-01",15 -"2013_NASCAR_Cup_Series","2015-10-01",49 -"Anja_Lundqvist","2016-03-01",93 -"Amauris_dannfelti","2016-01-01",12 -"Abandoned_shipwrecks_act","2015-06-01",3 -"11086_Nagatayuji","2017-02-01",3 -"Advertising_tissues","2017-06-01",1 -"Anti_corn-law_league","2016-10-01",1 -"Always_Guaranteed","2017-09-01",445 -"Alfredo_Palacio_Moreno","2018-01-01",48 -"Antonio_Puche_Vicente","2015-06-01",1 -"Elazig_Province","2017-01-01",1 -"ATC_code_C02AC01","2017-05-01",1 -"Alexander_Mattock_Thompson","2016-08-01",2 -"Cocos_Islands_Malay","2017-06-01",63 -"Aftonbladet_antisemitism_controversy","2016-10-01",1 -"Azad_Kashmir,_Pakistan","2015-07-01",14 -"1852_English_cricket_season","2016-10-01",24 -"Birmingham_Pride","2015-07-01",129 -"Air-pollution_controls","2015-08-01",4 -"James_Southerton","2017-01-01",20 -"Architecture_of_Chiswick_House","2015-06-01",240 -"Alexander,_Colin","2015-12-01",1 -"Al-Mansooreh","2016-10-01",1 -"Arielle_Gastineau_Ashton","2017-12-01",18 -"Blue_Ben","2017-06-01",240 -"1911_Michigan_State_Normal_Normalites_football_season","2017-11-01",1 -"Arctictis_binturong","2017-04-01",334 -"Fornaldarsaga","2016-04-01",18 -"Bibasis_gomata","2017-06-01",35 -"Anna_Schchian","2017-06-01",19 -"2005_in_Rwanda","2016-08-01",69 -"Archaeology_in_ethiopia","2016-01-01",1 -"23277_Benhughes","2016-12-01",2 -"Bahrain_-_USA_relations","2017-06-01",1 -"Dieter_Korn","2015-07-01",13 -"Antidynamo_theorem","2016-10-01",222 -"An_Jae-Won","2016-12-01",1 -"Bruray","2015-07-01",82 -"Gosport_Council_election,_2004","2017-01-01",2 -"1856_in_South_Africa","2017-03-01",60 -"Dialakoro,_Guinea","2017-01-01",1 -"05-CV-1678","2016-02-01",1 -"Allison,_Henry","2016-12-01",5 -"Animal_house","2016-06-01",1399 -"Alexander_Tabarrok","2017-03-01",5 -"Chung-Ho_Memorial_Hospital","2017-06-01",50 -"2013_Internazionali_Trofeo_Lame_Perrel-Faip_-_Doubles","2016-03-01",4 -"1965_Speedway_World_Team_Cup","2017-11-01",13 -"Alexander_Ollongren","2017-11-01",788 -"Amore_traditore,_BWV_203","2016-06-01",83 -"Arthur_William_Rogers","2015-10-01",31 -"Ashoka_pillar","2017-02-01",265 -"1_62_honeycomb","2018-02-01",10 -"1926_Australasian_Championships","2016-05-01",47 -"Export_award","2016-04-01",3 -"5000_Days_Project","2016-07-01",75 -"2012_UCI_Europe_Tour","2017-03-01",65 -"1985_Toronto_Indoor_-_Singles","2015-08-01",4 -"Cedar_Grove,_North_Carolina","2017-06-01",18 -"Battle_of_The_Afsluitdijk","2016-04-01",15 -"Arishtanemi","2017-03-01",7 -"Alfalfa_bill_murray","2016-12-01",7 -"Elisha_Jay_Edwards","2015-07-01",28 -"Arturas_Paulauskas","2016-01-01",10 -"Abdelrahman_Hamad","2015-09-01",2 -"1948_in_Northern_Ireland","2015-07-01",29 -"1988_in_philosophy","2015-05-01",70 -"5-Hydroxytryptaminen","2016-01-01",4 -"2017_FBS_season","2017-10-01",124 -"Areeiro","2016-04-01",2 -"Alemonides","2016-03-01",6 -"Abrochia_caurensis","2016-10-01",1 -"Anafylaxia","2018-01-01",2 -"1938_Grand_National","2018-02-01",80 -"China-Korea_Champions_League","2015-07-01",4 -"Acetyl_bromide","2017-11-01",448 -"24_hours_of_lemans","2015-05-01",37 -"Albright_hereditary_osteodystrophy","2017-02-01",153 -"Ashland_Bus_System","2015-08-01",115 -"1,8-Cineole_2-endo-monooxygenase","2016-10-01",8 -"2005-2006_NHL_Season","2015-11-01",6 -"Cammie_Dunaway","2015-07-01",344 -"D-Fish","2016-11-01",2 -"4_sister_vineyard","2015-09-01",1 -"Alessia_Cara_discography","2017-03-01",100 -"Alexander_Berg","2017-08-01",63 -"4822_Karge","2018-02-01",32 -"Emile_Francis_Trophy","2017-01-01",8 -"Amin_Ghaseminejad","2017-06-01",45 -"Artichia","2017-09-01",19 -"Cividale","2016-11-01",41 -"2007_Orissa_Violence","2016-05-01",1 -"Australian_Saltbush","2016-12-01",5 -"Asian_Food_Channel","2016-09-01",727 -"Camp_iawah","2015-07-01",1 -"ATC_code_J01MA04","2017-11-01",1 -"Arpad_Balazs","2017-10-01",2 -"Angel_of_Music,_or_The_Private_Life_of_Giselle","2018-02-01",56 -"1983_Torneo_di_Viareggio","2016-03-01",22 -"Arellano_University","2017-09-01",1699 -"ATC_code_B03AA","2017-11-01",1 -"FS5000","2016-11-01",1 -"Abd-Allah_ibn_Zubayr","2017-05-01",2 -"1889_SAFA_season","2016-04-01",28 -"Aloha_bowl_broadcasters","2015-05-01",2 -"1994_All_England_Open_Badminton_Championships","2016-07-01",75 -"Are_We_Not_Horses","2015-07-01",79 -"Angiolo_Torchi","2018-02-01",5 -"Chimanimani_National_Park","2017-06-01",37 -"Art_manifesto","2017-09-01",2619 -"Adrian_Apostol","2016-10-01",62 -"Adventure_book","2015-10-01",14 -"Albemarle_Bertie","2016-06-01",20 -"Adam_Deibert","2017-08-01",611 -"Alberta_association_of_architects","2017-10-01",2 -"Alloschmidia","2017-11-01",15 -"Administrative_department_of_security","2016-05-01",1 -"Archdeaconry_of_Dudley","2017-07-01",19 -"Ammayenna_Sthree","2015-12-01",38 -"Aaron_Spelling","2016-05-01",25128 -"Anatolian_hieroglyph","2016-07-01",308 -"Central_University_of_Rajasthan","2016-11-01",323 -"Annamanum_touzalini","2017-08-01",7 -"Acleris_hispidana","2016-11-01",2 -"Frisco_kid","2016-04-01",15 -"Allerheiligenberg_monastery","2017-12-01",2 -"Arctic_comb_jelly","2017-03-01",3 -"279377_Lechmankiewicz","2016-06-01",1 -"AEGON_Pro-Series_Loughborough","2018-02-01",7 -"Firefly_Space_Systems","2017-01-01",235 -"2000-01_Hong_Kong_League_Cup","2017-12-01",6 -"British_supermarkets","2017-01-01",2 -"A_description_of_New_England","2016-10-01",13 -"Artificial_Flavoring","2016-06-01",2 -"Anglican_bishop_of_the_Torres_people","2018-02-01",1 -"Antonio_Diaz_Cardoso","2018-02-01",1 -"Johan_Patriksson","2017-01-01",3 -"Ashutosh_Morya","2017-07-01",1 -"Iron_ore","2017-01-01",3682 -"AT-16_Scallion","2015-08-01",594 -"Data_analyst","2015-07-01",134 -"Cabbageball","2016-04-01",3 -"Acanthonyx_seriopuncta","2017-04-01",2 -"Aegeria_ruficauda","2017-10-01",1 -"Archibald_Douglas,_1st_Earl_of_Ormond","2016-06-01",100 -"2014_European_Championships_in_Athletics","2017-01-01",3 -"1Co-Co1","2017-08-01",77 -"Arthur_Abba_Goldberg","2015-10-01",2 -"Ameri-Cana_Ultralights","2015-05-01",33 -"1979_British_Formula_One_season","2015-12-01",218 -"American_colonial_history","2016-06-01",6 -"Arcadia_Martin_Wesay_Toe","2015-06-01",73 -"Adam_Ornstein","2017-08-01",2 -"Archive_of_Modern_Conflict","2016-12-01",307 -"Ciro_Urriola","2015-07-01",12 -"Acanthosyris","2015-12-01",53 -"Eriopyga_jamaicensis","2015-07-01",1 -"10th_parallel_north","2016-06-01",1412 -"Derek_Almond","2017-01-01",2 -"Jaimanglapur","2017-01-01",4 -"Aphroditeola_olida","2018-02-01",6 -"18th_dynasty_of_egypt","2017-06-01",2 -"Ali_ben_Ahmed","2016-08-01",62 -"Ashkur_Mahalleh","2018-02-01",8 -"Adolf_Mosengel","2017-02-01",54 -"1838_Safed_pogrom","2016-02-01",1 -"1829_in_architecture","2017-05-01",24 -"Arcones,_Segovia","2016-05-01",3 -"Albert_Smith_Medal","2018-02-01",30 -"Arqanqergen_mass_murder","2015-10-01",60 -"Jaan_Usin","2017-01-01",4 -"2009_Bangladesh_Rifles_revolt","2016-03-01",269 -"-coltore","2015-11-01",9 -"Ernest_Makins","2017-01-01",10 -"Amsterdam_Bijlmer_Arena","2016-07-01",87 -"Apostolic_assemblies_of_christ","2018-01-01",1 -"Abirabad,_Razavi_Khorasan","2015-08-01",26 -"2016_All-Ireland_Senior_Football_Championship","2015-10-01",883 -"Asylum_seeking","2016-06-01",36 -"56th_parallel","2015-07-01",12 -"Junior_roller_derby","2017-01-01",19 -"Ana_Goncalves","2016-03-01",2 -"Alekseevskiy_Raion","2017-11-01",1 -"2009_Vietnam_national_football_team_results","2017-07-01",15 -"Chicago,_Burlington_and_Quincy_Railroad_Depot","2017-01-01",2 -"Fox_Valley_Conference","2016-04-01",84 -"Brachioplasty","2017-06-01",304 -"Arnold_Doren","2017-06-01",11 -"All_Ireland_mandolin_Champion","2015-07-01",2 -"Deborah_Rennard","2016-04-01",814 -"Anthony_Macdonnell","2016-02-01",2 -"Azerbaijan_Pakistan_relations","2017-01-01",1 -"A_Girl_Named_Zippy","2018-03-01",346 -"Academic_OneFile","2018-02-01",109 -"East_Point_Academy","2017-01-01",48 -"2011_Italian_Figure_Skating_Championships","2017-03-01",47 -"Chen_Qiao_En","2016-04-01",52 -"Canobie_lake","2016-04-01",1 -"Andrei_Arlashin","2017-11-01",13 -"Again_Into_Eyes","2017-12-01",54 -"Andropogon_curtipendulus","2018-02-01",1 -"Abbath","2016-05-01",927 -"Alien_Opponent","2016-05-01",160 -"Art_of_Love","2016-02-01",3 -"Ariana_Huffington","2017-05-01",84 -"Amy_Poehler","2016-04-01",62732 -"Cherven,_Rousse_Province","2015-07-01",2 -"1_Month_2_Live","2018-03-01",306 -"Country_Day_School_of_the_Sacred_Heart","2017-06-01",132 -"Cooperative_institute_for_arctic_research","2015-07-01",2 -"Depression_symptoms","2017-01-01",7 -"Brent_Skoda","2016-04-01",31 -"American_Christians","2016-12-01",10 -"Counterbleed","2017-01-01",1 -"Abarka","2016-05-01",325 -"Aleksander_Povetkin","2017-02-01",89 -"Austin_TX","2016-03-01",119 -"Aleksandr_Tretyakov","2017-01-01",40 -"Connecticut_congressional_districts","2016-11-01",3 -"Alessio_de_Marchis","2015-10-01",66 -"Capel_Salem,_Pwllheli","2016-04-01",6 -"5-alpha_reductase_deficiency","2016-10-01",30 -"Annabelle_Croft","2016-01-01",32 -"Aeronca_Aircraft_Corporation","2017-05-01",9 -"1597_in_Scotland","2016-07-01",18 -"Alf_Somerfield","2017-11-01",10 -"Agapanthia_villosoviridescens","2018-02-01",53 -"Adam_Goldberg","2015-12-01",42338 -"1961_Paris_massacre","2017-01-01",52 -"2007_in_radio","2017-04-01",131 -"Arthur_French,_5th_Baron_de_Freyne","2015-12-01",44 -"AMD_Socket_G3","2017-04-01",121 -"Albert_geouffre_de_lapradelle","2016-02-01",1 -"Collaborations_between_ex-Beatles","2015-07-01",1279 -"Betty_Ireland","2016-04-01",40 -"Domingo_Tirado_Benedi","2015-07-01",1 -"Bac_Ly","2016-04-01",1 -"All_gas-phase_iodine_laser","2015-07-01",136 -"Andre_Salifou","2017-01-01",1 -"1,3-b-D-glucan","2017-05-01",2 -"Joseph_Johnston_Muir","2017-01-01",3 -"17th_of_Shahrivar_league","2016-05-01",63 -"2001_in_art","2018-03-01",131 -"Abiji_language","2017-10-01",6 -"Ahliah_school","2018-03-01",133 -"1605_in_India","2017-12-01",83 -"Dr_Jeom_Kee_Paik","2015-07-01",1 -"1954_Texas_Longhorns_football_team","2018-01-01",69 -"1985_Little_League_World_Series","2016-07-01",226 -"Eleanor_de_bohun","2015-07-01",1 -"Adrenaline_strength","2016-03-01",8 -"434_BC","2018-02-01",97 -"8x60mm_S","2015-06-01",61 -"2016-17_South_Pacific_cyclone_season","2017-09-01",101 -"Beth_Aala","2017-06-01",15 -"Al_Shaver","2017-07-01",138 -"Adelphoi_Zangaki","2018-01-01",89 -"Cyclopropyl_group","2016-11-01",167 -"216_Sqn","2017-08-01",11 -"20469_Dudleymoore","2017-05-01",5 -"Attila_Hildmann","2017-06-01",103 -"1970_Arkansas_Razorbacks_football_team","2016-11-01",66 -"Anthony_Fairfax","2017-08-01",24 -"Fort_Point,_Boston","2016-04-01",384 -"Epsilon_numbers","2016-04-01",3 -"2013_Recopa_Sudamericana","2016-05-01",202 -"Italo_Disco","2017-01-01",27 -"Andersen_Press","2015-09-01",228 -"Amasa_Walker","2017-09-01",146 -"2010_in_Israeli_film","2015-09-01",234 -"A-25_Shrike","2017-12-01",90 -"2009_Winnipeg_Blue_Bombers_season","2017-06-01",66 -"Ashland_County,_Ohio","2016-10-01",1298 -"Dusky_Turtle_Dove","2017-01-01",3 -"Antonov_148","2017-02-01",129 -"Abdul_Hamid_Lahori","2017-08-01",458 -"Amadeo_of_Spain","2015-11-01",1701 -"2015_Novak_Djokovic_tennis_season","2017-07-01",2484 -"Dhabawallah","2016-04-01",4 -"Afshar_Beylik","2017-06-01",4 -"1998_ATP_Tour_World_Championships_-_Singles","2017-03-01",20 -"Beach_Haven_Terrace,_New_Jersey","2016-11-01",4 -"Aix-la_Chapelle","2018-03-01",66 -"Ackerman,_Val","2017-05-01",2 -"47th_Ohio_Infantry","2016-12-01",59 -"100_People,_100_Songs","2017-11-01",517 -"2007_Masters_of_Formula_3","2016-01-01",63 -"1832_US_presidential_election","2016-05-01",6 -"Aaron_Baker","2016-05-01",113 -"2015-16_FIBA_Europe_Club_Competition","2017-11-01",2 -"Alebra","2018-02-01",27 -"Asilus_crabroniformis","2016-11-01",4 -"Earth_and_Air_and_Rain","2016-11-01",31 -"2014_Stade_Tata_Raphael_disaster","2018-02-01",1 -"Alexander_Izvolski","2017-01-01",7 -"Fabric_17","2017-01-01",13 -"1925_Campeonato_de_Portugal_Final","2018-01-01",37 -"1948_Ashes_series","2017-01-01",121 -"Abraham_ben_david","2016-09-01",4 -"2006_Acropolis_Rally","2017-01-01",12 -"Alottment","2017-03-01",6 -"Angolanness","2015-07-01",11 -"2002_in_NASCAR_Craftsman_Truck_Series","2016-01-01",12 -"Aces_of_ANSI_Art","2015-08-01",77 -"Alan_Tskhovrebov","2015-08-01",13 -"Aegis_Security","2015-10-01",1 -"Alec_the_Great","2015-05-01",69 -"Corel_SnapFire","2016-11-01",9 -"AbdulMagid_Breish","2016-03-01",276 -"A_Night_in_NYC","2015-10-01",232 -"79th_parallel_south","2016-11-01",17 -"Alphonse_Crespo","2016-06-01",50 -"Acacia_petite_feuille","2016-05-01",1 -"Amstrad_464","2017-12-01",18 -"Charles_County,_Maryland","2017-06-01",2079 -"1972_outbreak_of_smallpox_in_Yugoslavia","2018-03-01",375 -"Alungili","2017-09-01",37 -"Brontispalaelaps_froggatti","2016-04-01",1 -"Alison_Lacey","2016-12-01",94 -"Alessandro_Capra","2017-07-01",21 -"2012_UCF_Knights_baseball_team","2016-08-01",46 -"16_Candles_Down_the_Drain","2017-05-01",2 -"Anandra_strandi","2015-08-01",11 -"Brigitte_Rohde","2017-01-01",9 -"Agenda_VR3","2015-09-01",93 -"1641_in_architecture","2015-11-01",32 -"ALF_Tales","2016-04-01",280 -"A_Woman_Scorned","2015-07-01",164 -"Air-free_techniques","2016-04-01",5 -"1973_in_British_television","2016-04-01",96 -"All_Saints_Cemetery","2017-04-01",345 -"1981_in_Swedish_football","2016-06-01",21 -"Apple_Dictionary","2016-10-01",19 -"2015_PBZ_Zagreb_Indoors","2016-08-01",121 -"16th_IIFA_Awards","2017-02-01",1194 -"Duki,_Pakistan","2016-04-01",14 -"Administration_of_Borderchek_points,_Population_and_Immigration","2015-09-01",2 -"Alonia,_Zante","2017-10-01",1 -"African_United_Club","2017-10-01",50 -"Burjanadze-Democrats","2016-04-01",19 -"Application_software_development","2015-06-01",27 -"Almonacid_de_la_Sierra,_Zaragoza","2015-06-01",1 -"Baissour","2016-12-01",100 -"Coti_Sorokin","2016-04-01",46 -"Alberta_and_Great_Waterways_Railway_scandal","2017-05-01",70 -"1942_Alabama_Crimson_Tide_football_team","2015-09-01",144 -"Adam_Art_Gallery","2016-08-01",80 -"Akshinski_Raion","2016-09-01",1 -"Edwin_of_Deira","2015-07-01",34 -"Altaf_Mahmud","2015-10-01",245 -"Astana_cycling_team","2017-12-01",7 -"1982_CART_World_Series_season","2015-12-01",3 -"3_Rotaxane","2017-03-01",1 -"1924_Eastern_Suburbs_season","2015-08-01",32 -"Downtown_Science","2016-11-01",6 -"1993-94_Slovak_Cup","2017-04-01",1 -"Brandon_Wayne_Hedrick","2016-04-01",32 -"2015_Brasil_Open","2016-01-01",403 -"Aung_Pinle_Hsinbyushin","2016-02-01",69 -"An_Numaniyah","2016-06-01",185 -"24th_Arkansas_Infantry_Regiment","2016-03-01",64 -"Adimchinobe_Echemandu","2017-05-01",90 -"August_Belmont,_Jr","2017-06-01",8 -"Empacher","2016-11-01",102 -"Abdulkadir_Sheikh_Dini","2017-01-01",70 -"Alvaro_Quiros","2017-08-01",12 -"Algernon_May","2017-11-01",35 -"Athol_Shmith","2016-02-01",188 -"2004_Indesit_ATP_Milan_Indoor_-_Doubles","2015-09-01",1 -"Alfred_Dennis","2016-11-01",9 -"2nd_Medical_Battalion","2017-05-01",380 -"Atom_clocks","2016-03-01",12 -"368th_Expeditionary_Air_Support_Operations_Group","2015-06-01",48 -"1911_Washington_Senators_season","2017-06-01",46 -"1963_Night_Series_Cup","2015-07-01",26 -"Aromobates_capurinensis","2017-12-01",21 -"2013-14_Super_Lig","2017-05-01",14 -"Al_taglio","2016-09-01",2 -"2015_RBC_Tennis_Championships_of_Dallas","2016-04-01",18 -"2011_Mirabella_Cup","2017-11-01",15 -"1996_NHL_Western_Conference_Final","2015-06-01",1 -"2009_Formula_Nippon_Championship","2016-11-01",44 -"Information_security_awareness","2017-01-01",56 -"A_Noiseless_Patient_Spider","2018-03-01",757 -"Aggregate_field_theory","2017-06-01",3 -"Armenians_in_Central_Asia","2015-10-01",351 -"Acona,_Mississippi","2017-10-01",33 -"Apozomus","2017-12-01",19 -"Antwun_Echols","2016-11-01",87 -"1949_Albanian_Cup","2016-11-01",11 -"Aesychlus","2016-10-01",4 -"1961_Pulitzer_Prize","2015-09-01",879 -"East_Midlands_Conference_Centre","2016-04-01",13 -"Blumen","2016-11-01",11 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/dump.sql b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/dump.sql deleted file mode 100644 index fcbf558352a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/dump.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO some_table (`path`, `month`, `hits`) VALUES ('Bangor_City_Forest', '2015-07-01', 34), ('Alireza_Afzal', '2017-02-01', 24), ('Akhaura-Laksam-Chittagong_Line', '2015-09-01', 30), ('1973_National_500', '2017-10-01', 80), ('Attachment', '2017-09-01', 1356), ('Kellett_Strait', '2017-01-01', 5), ('Ajarani_River', '2018-01-01', 30), ('Akbarabad,_Khomeyn', '2017-03-01', 8), ('Adriaan_Theodoor_Peperzak', '2018-02-01', 88), ('Alucita_dryogramma', '2015-09-01', 1), ('Brit_Med_J', '2015-07-01', 1), ('4th_Metro_Manila_Film_Festival', '2015-09-01', 80), ('Alialujah_Choir', '2018-03-01', 221), ('1953-54_SM-sarja_season', '2016-09-01', 1), ('Air_Force_Song', '2018-02-01', 19), ('4-6_duoprism', '2016-03-01', 30), ('Ashley_Spurlin', '2017-06-01', 94), ('Asfaq_Kayani', '2017-10-01', 1), ('1607_in_architecture', '2016-06-01', 7), ('4-way_speakers', '2015-10-01', 2), ('Blue_Heeler', '2015-07-01', 149), ('5_Euro', '2017-04-01', 16), ('2009_Spa-Francorchamps_GP2_Series_round', '2016-04-01', 12), ('2015_Guru_Granth_Sahib_desecration', '2015-11-01', 6821), ('Agriculture_Marketing_Service', '2016-07-01', 2), ('2006_Football_League_Cup_Final', '2015-11-01', 1711), ('2008_Uber_Cup_group_stage', '2016-02-01', 40), ('1923_PGA_Championship', '2016-08-01', 97), ('Fannie_Bay', '2016-04-01', 6), ('AlchemyAPI', '2016-04-01', 344), ('Cinema_of_Italy', '2017-01-01', 1217), ('Arodes', '2016-11-01', 36), ('Damien_Marley', '2015-07-01', 168), ('Al_Jumayl_Baladiyat', '2015-08-01', 5), ('2015_Alabama_State_Hornets_football_team', '2017-06-01', 32), ('Aglossa_tanya', '2016-03-01', 1), ('73rd_Pennsylvania_Infantry', '2017-01-01', 12), ('2015_European_Junior_and_U23_Canoe_Slalom_Championships', '2018-02-01', 31), ('African_Leopard', '2016-08-01', 64), ('Faverolles,_Orne', '2017-01-01', 5), ('Aaron_Fukuhara', '2015-11-01', 17), ('Annular_ligaments_of_trachea', '2017-01-01', 31), ('2014_US_Open_Series', '2016-11-01', 35), ('A_Better_Mousetrap', '2018-02-01', 4), ('Dibaklu', '2016-11-01', 1), ('At_Samat_District', '2015-06-01', 35), ('Aaron_Peasley', '2017-05-01', 32), ('Apistomology', '2015-12-01', 2), ('Buyat_Bay', '2015-07-01', 54), ('1942_Estonian_Football_Championship', '2017-05-01', 22), ('Action_for_Autism', '2016-06-01', 346), ('100_Hz', '2015-06-01', 72), ('2003_Arizona_State_Sun_Devils_football_team', '2017-05-01', 82), ('Antona_obscura', '2016-09-01', 1), ('Akiko_Sugiyama', '2015-12-01', 32), ('Elysburg', '2016-11-01', 8), ('2017_New_South_Wales_Cup', '2017-09-01', 38), ('2011-12_Gold_Coast_United_FC_season', '2017-06-01', 1), ('Agency_for_the_Prohibition_of_Nuclear_Weapons_in_Latin_America_and_the_Caribbean', '2016-04-01', 15), ('Albert_Dunn', '2017-08-01', 87), ('Hahamakin_ang_Lahat', '2017-01-01', 984), ('2013_Spuyten_Duyvil_derailment', '2017-11-01', 5), ('Ayling', '2017-01-01', 5), ('Anti-Establishment', '2016-10-01', 1), ('1951_Spanish_motorcycle_Grand_Prix', '2018-01-01', 48), ('2009-10_Brunei_Premier_League', '2017-08-01', 4), ('23_Ursae_Majoris', '2016-08-01', 90), ('1927-28_Austrian_football_championship', '2017-08-01', 4), ('Andrew_McKeever', '2017-10-01', 3), ('Clinocottus', '2017-06-01', 23), ('2006_State_of_Origin', '2015-11-01', 7), ('2013-14_Los_Angeles_Clippers_season', '2015-07-01', 8), ('Cor_Jesu', '2017-01-01', 1), ('Besseringen_B-Werk', '2017-06-01', 158), ('Amy_Hempel', '2017-07-01', 1091), ('Franc-Comtois', '2016-04-01', 2), ('Allium_giganteum', '2017-07-01', 1103), ('Abishai', '2016-08-01', 56), ('Abraham_Clark_High_School', '2016-04-01', 88), ('Baku_chronology', '2015-06-01', 1), ('22nd_MEU', '2015-10-01', 39), ('2015_Open_Engie_de_Touraine', '2015-10-01', 195), ('Churchill_Bowl', '2017-06-01', 30), ('AGMARK', '2017-08-01', 117), ('American_standard_wire_gauge', '2017-12-01', 3), ('Araby,_LA', '2015-05-01', 2), ('217_BC', '2016-12-01', 202), ('2008_Trinidad_and_Tobago_League_Cup', '2016-02-01', 6), ('Alazan_Bay', '2015-12-01', 22), ('Aluminum_fencing', '2015-11-01', 48), ('Achilles_tendinitis', '2016-10-01', 5884), ('AFP_Peacekeeping_Operations_Center', '2017-01-01', 64), ('2013_Xinjiang_clashes', '2016-01-01', 1), ('Arborea_Giudicato_of_Arborea', '2015-09-01', 3), ('1941_Cleveland_Rams_season', '2017-06-01', 40), ('Ju_Posht,_Rasht', '2017-01-01', 3), ('Ascalenia', '2016-07-01', 10), ('Aplectoides', '2018-02-01', 4), ('European_Cup_1969-70', '2016-11-01', 14), ('Armen_Mkertchian', '2016-05-01', 9), ('2015_Aspria_Tennis_Cup_-_Singles', '2018-02-01', 1), ('14_August_1947', '2017-11-01', 6), ('Adobe_Creative_Suite_1', '2015-05-01', 1), ('IC_chips', '2017-01-01', 2), ('Austo_AE300', '2016-07-01', 4), ('Date_palms', '2015-07-01', 79), ('BCS_bowl_game', '2017-06-01', 13), ('AR_Border', '2017-06-01', 1), ('Aranda_de_Duero', '2016-04-01', 256), ('1919_Wake_Forest_Demon_Deacons_football_team', '2016-01-01', 16), ('All_The_Wrong_Clues_For_The_Right_Solution', '2017-10-01', 9), ('Allan_Campbell_McLean', '2015-06-01', 131), ('Bradford_Council_election,_2011', '2017-06-01', 5), ('Astronomy_and_astrophysics', '2015-09-01', 62), ('Dutch_Antillean_people', '2015-07-01', 57), ('Army_Radio', '2018-03-01', 711), ('BBVA_Bancomer', '2016-11-01', 709), ('Lake_Aloha', '2017-01-01', 30), ('Andy_Bean', '2018-02-01', 3092), ('1941_Pittsburgh_Steelers_season', '2016-05-01', 147), ('Aniopi_Melidoni', '2016-06-01', 4), ('Aglossosia_fusca', '2017-09-01', 3), ('Art_books', '2017-04-01', 36), ('1929_Washington_Senators_season', '2017-04-01', 47), ('Antaeotricha_congelata', '2016-12-01', 10), ('Douglas_C-54G-5-DO_Skymaster', '2017-01-01', 1), ('Chris_Jamison', '2016-11-01', 827), ('Ace_Blackwell', '2015-11-01', 9), ('Abdul_Qadir_Fitrat', '2018-02-01', 32), ('Arnoldo_Vizcaino', '2017-10-01', 1), ('2012_Open_EuroEnergie_de_Quimper_-_Doubles', '2017-12-01', 3), ('Dale_Rominski', '2017-01-01', 7), ('ADHD_coaching', '2015-06-01', 50), ('Claire_Yiu', '2016-11-01', 209), ('Applicant', '2015-10-01', 253), ('Apache_OpenOffice', '2017-06-01', 6031), ('Abel_Kiprop_Mutai', '2015-09-01', 22), ('Airdrome_Taube', '2017-04-01', 46), ('Andrey_Viktorovich', '2016-06-01', 1), ('American_Idol_controversy', '2016-03-01', 36), ('Anthrenocerus_confertus', '2018-01-01', 17), ('Appraisal_Subcommittee', '2018-03-01', 17), ('Babusa', '2015-07-01', 3), ('500_homeruns', '2016-06-01', 1), ('Argentina_national_volleyball_team', '2016-08-01', 64), ('Chief_prosecutor_of_Russia', '2015-07-01', 1), ('Absolution_DVD', '2015-06-01', 1), ('1,3-Beta-glucan_synthase', '2017-05-01', 440), ('Dave_Sinardet', '2016-04-01', 26), ('Adeline_Whitney', '2018-03-01', 10), ('Allon_shvut', '2016-07-01', 3), ('2012_Penn_State_Nittany_Lions_football_season', '2017-12-01', 3), ('Coleman-Franklin-Cannon_Mill', '2017-01-01', 4), ('Action_director', '2015-05-01', 93), ('AD_547', '2016-01-01', 1), ('Acta_germanica', '2017-09-01', 1), ('Abu_Dhabi_Global_Market_Square', '2017-01-01', 35), ('Kozo_Shioya', '2017-01-01', 7), ('China_Investment_Corp', '2017-01-01', 2), ('Dmitri_Zakharovich_Protopopov', '2016-04-01', 129), ('Anatra_Anadis', '2017-10-01', 208), ('Archaikum', '2017-11-01', 5), ('2000_Webby_Awards', '2017-04-01', 360), ('2003_BCR_Open_Romania_-_Singles', '2016-08-01', 2), ('Abacetus_bisignatus', '2016-09-01', 79), ('American_school_of_kinshasa', '2016-01-01', 1), ('Anna,_7th_Duchess_of_Bedford', '2016-08-01', 8), ('Black_majority_district', '2016-11-01', 3), ('Dagma_Lahlum', '2015-07-01', 1), ('Credit_Saison', '2015-07-01', 517), ('Ariyankuppam_firka', '2016-02-01', 19), ('Annette_Fuentes', '2016-06-01', 17), ('Angerstein,_John', '2015-12-01', 2), ('Annenkov_Island', '2016-03-01', 280), ('Anne_Frank_museum', '2016-06-01', 67), ('Annales_sancti_Amandi', '2017-06-01', 22), ('L-FABP', '2017-01-01', 1), ('Alvord,_TX', '2017-06-01', 12), ('2006_World_Team_Table_Tennis_Championships', '2016-05-01', 119), ('Angriffen', '2015-12-01', 9), ('Anthony_Oppenheimer', '2017-03-01', 452), ('Absamat_Masaliyevich_Masaliyev', '2016-09-01', 1), ('Airborne_Museum_at_Aldershot', '2016-03-01', 41), ('Aktiubinsk_Oblast', '2015-08-01', 7), ('100_East_Wisconsin', '2015-05-01', 782), ('7th_Bangladesh_National_Film_Awards', '2017-08-01', 91), ('Alejandro_Reyes', '2017-12-01', 35), ('Applied_philosophy', '2018-03-01', 539), ('Adhemar_Pimenta', '2016-06-01', 146), ('Break_the_fourth_wall', '2016-04-01', 66), ('Annoushka_Ducas', '2017-10-01', 411), ('ATC_code_J01CA01', '2015-06-01', 1), ('Evelyn_County,_New_South_Wales', '2016-11-01', 7), ('Elastic_scattering', '2016-11-01', 1374), ('1032_Pafuri', '2015-07-01', 35), ('Andrew_Bromwich', '2015-08-01', 26), ('Ishita_Arun', '2017-01-01', 249), ('Aspergics', '2016-07-01', 1), ('1857_in_Chile', '2018-03-01', 22), ('Breffni', '2015-07-01', 38), ('845_in_poetry', '2017-08-01', 2), ('20321_Lightdonovan', '2015-10-01', 12), ('Arthur_Chandler', '2017-12-01', 27), ('CsISOLatin2', '2017-06-01', 1), ('1900_Grand_National', '2016-06-01', 69), ('Aeritalia_AMX', '2017-03-01', 3), ('B_Sharps', '2015-06-01', 11), ('544_area_code', '2015-09-01', 2), ('30th_Guldbagge_Awards', '2015-06-01', 37), ('Agrippina', '2017-08-01', 315), ('Ardmore', '2016-02-01', 433), ('Amplypterus_panopus', '2016-03-01', 23), ('Alexander_Bukharov', '2017-09-01', 5), ('Alaska_Raceway_Park', '2017-01-01', 46), ('Albanian_National_Road_Race_Championships', '2017-03-01', 31), ('1968_Democratic_National_Convention_protest_activity', '2017-10-01', 2802), ('2012_Birthday_Honours', '2017-10-01', 427), ('2000_NHL_expansion_draft', '2017-06-01', 1), ('A_Town_Where_You_Live', '2016-11-01', 2920), ('Ahmed_Shahzad', '2018-03-01', 25), ('Elisabeth_Svendsen', '2016-11-01', 39), ('2002_FINA_Synchronised_Swimming_World_Cup', '2016-08-01', 30), ('Akatek', '2017-04-01', 10), ('Animation_with_DAZ_Studio', '2018-02-01', 78), ('Fergus_Craig', '2016-11-01', 119), ('Ancel_Nalau', '2015-11-01', 5), ('5171_Augustesen', '2017-04-01', 20), ('Anne_McGuire', '2017-11-01', 329), ('Australian_Photoplay_Company', '2015-12-01', 6), ('1913_in_Canada', '2017-04-01', 137), ('Arhopala_allata', '2015-05-01', 26), ('Il_Paradiso_delle_Signore', '2017-01-01', 31), ('Geri_Palast', '2017-01-01', 38), ('Alan_Abela_Wadge', '2017-03-01', 77), ('22nd_Tactical_Air_Support_Squadron', '2017-10-01', 7), ('Avant_Stellar', '2017-06-01', 22), ('Black_phantom_tetra', '2016-11-01', 205), ('Billy_McCaffrey', '2017-01-01', 314), ('Annie_Furuhjelm', '2017-11-01', 97), ('1992_PGA_Tour', '2017-12-01', 307), ('2008_Chilean_pork_crisis', '2016-01-01', 55), ('2012_Currie_Cup_First_Division', '2018-02-01', 32), ('Aleksei_Fomkin', '2015-05-01', 144), ('Alexander_Krausnick-Groh', '2016-05-01', 101), ('Adam_Richard_Wiles', '2017-08-01', 5), ('ATCvet_code_QA01AD01', '2015-09-01', 2), ('Abu_Bakr_Ibn_Bajja', '2017-03-01', 5), ('Architecture-Studio', '2016-04-01', 94), ('950s_BC', '2016-02-01', 257), ('Abschwunges', '2017-07-01', 1), ('Adonis_Geroskipou', '2017-06-01', 15), ('2008-09_SV_Werder_Bremen_season', '2016-03-01', 3), ('Closed_loops', '2016-04-01', 1), ('AFC_Youth_Championship_1982', '2015-12-01', 10), ('Aquila_Shoes', '2015-08-01', 209), ('9842_Funakoshi', '2017-12-01', 11), ('Educational_quotient', '2016-04-01', 21), ('Antoni_Julian_Nowowiejski', '2018-01-01', 211), ('Adi_Oka_Idhile', '2017-11-01', 16), ('DEXIA-BIL_Luxembourg_Open', '2016-11-01', 3), ('Andrew_James_Simpson', '2016-03-01', 43), ('Alexander_Boksenberg', '2017-12-01', 61), ('1827_in_Denmark', '2017-03-01', 39), ('Afternoon_tea_with_suggs', '2017-11-01', 3), ('Alpha,_MN', '2017-06-01', 6), ('Ari_Onasis', '2015-06-01', 4), ('1961-62_Football_League_First_Division', '2015-11-01', 1), ('Andi_Lila', '2015-06-01', 2847), ('A_Gathering_Of_Old_Men', '2018-02-01', 1), ('Abul_Fazl_al-Abbas', '2017-01-01', 1), ('Asgill,_Charles', '2017-08-01', 1), ('Alexander_Arkhangelsky', '2015-07-01', 12), ('1947-48_Portuguese_Liga', '2015-06-01', 1), ('3rd_MMC_-_Varna', '2016-07-01', 3), ('Alberts,_Wayne', '2017-05-01', 3), ('Alois_Schickelgruber', '2018-02-01', 9), ('Hefner_Stadium', '2017-01-01', 2), ('410912_Lisakaroline', '2018-02-01', 26), ('Academy_at_Mountain_State', '2018-03-01', 1), ('617_Squadron', '2016-05-01', 489), ('Al_Silm_Haji_Hajjaj_Awwad_Al_Hajjaji', '2015-07-01', 5), ('Arturo_Merino_Benitez_Airport', '2017-10-01', 13), ('AEK_Athens_Futsal', '2015-06-01', 10), ('Aggaeus', '2018-02-01', 2), ('Association_for_Retarded_Citizens_of_the_United_States', '2017-08-01', 3), ('Kielce_pogrom', '2017-01-01', 335), ('1351_in_poetry', '2016-01-01', 17), ('1923_Princeton_Tigers_football_team', '2017-11-01', 41), ('Auzata_semipavonaria', '2017-01-01', 2), ('892_in_poetry', '2016-01-01', 6), ('Anton_Krotiak', '2017-12-01', 2), ('Arthur_Shelley', '2017-12-01', 23), ('2003_Kyoto_Purple_Sanga_season', '2018-02-01', 9), ('Frederic_Bowker_Terrington_Carter', '2016-04-01', 6), ('2-orthoplex', '2016-03-01', 1), ('Acacia_australiana', '2015-09-01', 4), ('2012_Newcastle_Knights_season', '2016-06-01', 103), ('Ann_Wrights_Corner,_Virginia', '2017-07-01', 19), ('12557_Caracol', '2017-03-01', 5), ('2001_African_Footballer_of_the_Year', '2017-05-01', 1), ('Bass_Pyramid', '2017-01-01', 22), ('A_noodle', '2015-05-01', 5), ('Aed_Bennan', '2018-02-01', 2), ('1886_Yale_Bulldogs_football_team', '2017-10-01', 58), ('2002_Players_Championship', '2016-06-01', 54), ('African_Skimmer', '2017-07-01', 2), ('3rd_Guldbagge_Awards', '2016-12-01', 39), ('Arrows_A19B', '2015-10-01', 1), ('Archduchess_Elisabetta_of_Austria-Este', '2017-08-01', 1526), ('America_Islands', '2015-11-01', 1), ('1932_Olympic_Games', '2016-01-01', 9), ('2011_Chinese_pro-democracy_protests', '2015-11-01', 2044), ('Bank_walkaway', '2016-04-01', 113), ('594_in_Ireland', '2017-04-01', 1), ('Association_of_Municipal_Corporations', '2016-12-01', 5), ('Andreas_Brantelid', '2015-09-01', 167), ('Amarthal_urf_Unchagaon', '2017-05-01', 82), ('3-methoxymorphinan', '2017-04-01', 146), ('2382_BC', '2016-07-01', 10), ('1763_in_science', '2016-07-01', 28), ('Arvert', '2017-04-01', 77), ('Ale_yeast', '2017-12-01', 19), ('A_Man_Without_a_Soul', '2018-03-01', 17), ('Air_Force_Base_Louis_Trichardt', '2017-09-01', 1), ('Athirson_Mazzoli_de_Oliveira', '2017-06-01', 3), ('Anthony_Chan_Yau', '2017-07-01', 181), ('Basic_Enlisted_Submarine_School', '2017-06-01', 392), ('Aboriginal_Lands_of_Hawaiian_Ancestry', '2015-09-01', 11), ('Fondren_Southwest,_Houston', '2017-01-01', 4), ('3_World_Financial_Center', '2017-07-01', 64), ('1971_IIHF_European_U19_Championship', '2017-09-01', 9), ('1937-38_Allsvenskan', '2015-12-01', 6), ('Christopher_Ashton_Kutcher', '2017-06-01', 2), ('Australian_rules_football_in_south_australia', '2016-12-01', 1), ('Amicable_pair', '2018-01-01', 7), ('Alan_Tomes', '2015-11-01', 82), ('Alexei_Petrovich,_Tsarevich_of_Russia', '2015-12-01', 3887), ('Alexis_Damour', '2015-10-01', 66), ('Bankruptcy_Act_of_1938', '2017-06-01', 76), ('Amphiphyllum', '2016-06-01', 14), ('Conway_High_School_West', '2016-04-01', 1), ('5447_Lallement', '2015-11-01', 10), ('Gabriel_Iddan', '2017-01-01', 1), ('1879-80_Scottish_Cup', '2017-04-01', 3), ('2011_Eneco_Tour', '2016-10-01', 31), ('1471_in_England', '2015-11-01', 94), ('Ashland_Town_Hall', '2017-01-01', 5), ('Archduke_John', '2015-05-01', 20), ('2000_Cameroonian_Premier_League', '2017-09-01', 18), ('1997_flood', '2017-11-01', 5), ('Agile_management', '2015-09-01', 26677), ('Am_841', '2017-12-01', 3), ('Apprentice_Mason', '2018-01-01', 4), ('Hales-Jewett_theorem', '2017-01-01', 2), ('Alien_Abductions', '2017-10-01', 14), ('Arjun_Menon', '2016-02-01', 370), ('Anthokyan', '2016-01-01', 4), ('Automobili_Lamborghini', '2016-02-01', 1110), ('Alain_Prost', '2017-04-01', 25196), ('Fartein_Valen', '2016-04-01', 90), ('Antonio_Galli_da_Bibiena', '2016-05-01', 5), ('Al_Jawf,_Libya', '2017-03-01', 600), ('AD_695', '2018-02-01', 1), ('Amir_chand', '2015-11-01', 1), ('Alcis_obliquisigna', '2017-08-01', 1), ('Chandra_Talpade_Mohanty', '2017-01-01', 306), ('Algerian_safe_house,_Jalalabad', '2015-06-01', 3), ('Jake_Milner', '2017-01-01', 1), ('Alternate_Communications_Center', '2017-10-01', 1), ('In_the_Bleachers', '2017-01-01', 42), ('Alex_Puodziukas', '2016-04-01', 7), ('Altarpiece_of_Pilgrim_II', '2018-02-01', 2), ('Cybernetical_Physics', '2017-01-01', 3), ('Christopher_Unthank', '2017-06-01', 2), ('1982_Independence_Bowl', '2015-06-01', 102), ('Ascoli_Calcio_1898', '2018-03-01', 1115), ('Briggs-Rauscher_reactions', '2017-06-01', 1), ('Adjadja', '2018-02-01', 45), ('Afghanistan_from_Ahmad_Shah_until_Dost_Mohammed', '2016-06-01', 3), ('Catholic_social_doctrine', '2017-01-01', 6), ('2833_BC', '2016-11-01', 1), ('Bethy_Woodward', '2016-04-01', 38), ('Bateman_polynomials', '2017-06-01', 22), ('1966_Buenos_Aires_Grand_Prix', '2015-10-01', 19), ('A_River_Somewhere', '2015-10-01', 353), ('2016-17_BVIFA_National_Football_League', '2017-04-01', 2), ('1909_Major_League_Baseball_season', '2015-10-01', 362), ('1988_Oklahoma_Sooners_football', '2017-11-01', 2), ('2010s_in_Chechen_fashion', '2016-10-01', 1), ('Accademia_Olimpica', '2017-08-01', 17), ('Air_cooling', '2015-07-01', 2010), ('Amir_Saoud', '2016-11-01', 22), ('Alex_Auburn', '2015-05-01', 52), ('Apamea_impulsa', '2016-11-01', 6), ('Australian_federal_election,_2007', '2015-07-01', 1794), ('Ain_Sakhri', '2017-10-01', 76), ('Belosaepiidae', '2015-07-01', 68), ('Acts_of_Parliament_in_the_United_Kingdom', '2017-10-01', 4070), ('Equity_Office', '2016-11-01', 202), ('David_Bintley', '2017-01-01', 51), ('Aksel_Schiotz', '2018-03-01', 3), ('Appropriation_Act_2000', '2017-05-01', 12), ('Edward_Johnson_III', '2016-11-01', 491), ('2006_Ohio_State_Buckeyes_football_team', '2016-03-01', 1452), ('Battle_of_Fort_Beausejour', '2015-07-01', 97), ('Abel_Foullon', '2015-12-01', 82), ('Apollo_VIII', '2015-10-01', 19), ('Carry_on_up_the_jungle', '2015-07-01', 8), ('Armour_villa', '2017-05-01', 4), ('201_Poplar', '2015-08-01', 265), ('Arta_prefecture', '2016-08-01', 1), ('2015-16_Ekstraklasa', '2018-02-01', 13), ('Alport,_Ontario', '2018-02-01', 2), ('Bongoland', '2017-06-01', 62), ('Alfred_Charles_Post', '2016-11-01', 11), ('Aam_Aadmi_Party_crisis', '2016-10-01', 1), ('Andrea_Moda', '2015-07-01', 143), ('Abdul_Halim_Sharar', '2017-08-01', 545), ('Apostolic_Vicariate_of_Yunnan', '2016-12-01', 1), ('Catherine_Steadman', '2016-11-01', 5218), ('Agastachys_odorata', '2015-10-01', 38), ('9783_Tensho-kan', '2016-03-01', 2), ('AFL_Cairns', '2017-10-01', 337), ('Abomey', '2015-06-01', 1062), ('Anne_Crofton,_1st_Baroness_Crofton', '2015-12-01', 42), ('Cash-flow_return_on_investment', '2017-01-01', 137), ('Alberto_Arvelo_Torrealba_Municipality', '2015-08-01', 56), ('Abyssinian_Shorthorned_Zebu', '2017-09-01', 124), ('Albanian_hip_hop', '2016-01-01', 1812), ('Alphonso_IV_of_Portugal', '2016-02-01', 12), ('19th_The_Alberta_Mounted_Rifles', '2016-10-01', 1), ('Chinese_shadow_theatre', '2016-04-01', 1), ('American_Committee_of_the_Fourth_International', '2017-08-01', 4), ('2014_Bahrain_GP2_Series_round', '2016-03-01', 80), ('Alexandrian_orthodox', '2017-09-01', 2), ('2010_Hurricane_Season', '2015-05-01', 18), ('1938_All-Ireland_Senior_Camogie_Championship_Final', '2017-01-01', 1), ('ATC_code_D01', '2018-01-01', 203), ('Albedo', '2015-08-01', 23484), ('Chavigny,_Meurthe-et-Moselle', '2017-01-01', 12), ('Becky_Essex', '2015-07-01', 51), ('Archaeological_Museum_Padre_Le_Paige', '2018-02-01', 2), ('Abu_Bakar_Sillah', '2017-01-01', 5), ('Back_chat', '2017-01-01', 2), ('Anchylobela_dyseimata', '2015-12-01', 11), ('Anthony_Overton', '2017-03-01', 261), ('Bear_maul', '2016-04-01', 3), ('Ambarawa,_Central_Java', '2016-01-01', 1), ('Amber_lager', '2016-11-01', 87), ('2nd_LAAD', '2017-09-01', 8), ('Ashiya,_Hyogo', '2018-03-01', 24), ('Angels_at_Risk', '2018-02-01', 74), ('Audrey_Marie_Munson', '2016-03-01', 17), ('1984_Australian_Football_Championships', '2017-01-01', 27), ('Ammonia_fountain', '2016-06-01', 434), ('Allister_Bentley', '2018-03-01', 11), ('Alsager_Hay_Hill', '2016-10-01', 72), ('1753_English_cricket_season', '2015-05-01', 51), ('2009-10_New_Jersey_Devils_season', '2016-10-01', 1), ('An_Untamed_State', '2016-05-01', 1109), ('Beatrice_Carmichael', '2016-11-01', 5), ('Abdul_Ghani_Ahmad', '2017-12-01', 115), ('Arteria_suralis', '2017-02-01', 3), ('Berzasca_River', '2017-01-01', 1), ('Angel_Attack', '2015-09-01', 98), ('1969_San_Francisco_49ers_football_team', '2017-11-01', 1), ('Anthony_Beilenson', '2017-09-01', 114), ('Crystalline_Entity', '2016-04-01', 180), ('Granice', '2017-01-01', 2), ('203rd_General_Hospital', '2017-07-01', 44), ('Acrocercops_rhombiferellum', '2017-12-01', 20), ('Ampliglossum_blanchetii', '2017-05-01', 1), ('11553_Scheria', '2017-03-01', 2), ('Ashkenozi', '2017-02-01', 1), ('2010_Calder_Cup_Playoffs', '2018-01-01', 9), ('Alice_Caymmi', '2016-01-01', 121), ('Alfredo_Alvar', '2017-04-01', 44), ('2006_Legends_Tour', '2017-07-01', 30), ('Albano_Albanese', '2015-10-01', 53), ('1943_Frankford_Junction_train_wreck', '2016-08-01', 510), ('Evans_Court_Apartment_Building', '2016-04-01', 4), ('Abu_al-Rayhan_Muhammad_ibn_al-Biruni', '2017-11-01', 1), ('Abubakar_Muhammad_Rimi', '2015-05-01', 4), ('Dostpur', '2016-11-01', 26), ('Accessories_Council_Excellence_Awards', '2016-03-01', 14), ('2006_North_American_heat_wave', '2015-06-01', 1161), ('Amstelodamum', '2017-09-01', 12), ('A_Very_Peculiar_Practice', '2016-08-01', 1860), ('Allegorie_der_Liebe', '2015-09-01', 1), ('Alex_Mackie', '2017-02-01', 95), ('1812_Homestead_Farm_and_Museum', '2017-09-01', 29), ('Argus_distribution', '2016-03-01', 8), ('Anthony_Thomas_Stover', '2017-02-01', 1), ('Arthur_Shallcross', '2016-11-01', 20), ('Antoine_Francois_Fourcroy', '2018-01-01', 1), ('Abbas_Halim', '2016-11-01', 21), ('Akiva_Baer_ben_Joseph', '2017-08-01', 1), ('Balatonfuered', '2016-11-01', 3), ('Antemnae', '2017-11-01', 204), ('Cling_Cling', '2017-06-01', 93), ('B_flat_major', '2017-01-01', 28), ('AirExplore', '2017-12-01', 930), ('Auckland_Super_Sprint', '2015-11-01', 120), ('Alfredo_De_Gasperis', '2017-12-01', 793), ('Geoffrey_I_of_Vianden', '2017-01-01', 5), ('Copa_de_Zaachila', '2016-04-01', 6), ('Alboacen', '2017-09-01', 1), ('BNH_Hospital_Bangkok', '2017-06-01', 2), ('Agricultural_health_and_safety', '2016-09-01', 1), ('Chiasms', '2017-06-01', 2), ('Al_Karaana', '2016-05-01', 58), ('Alberta_Highway_872', '2016-11-01', 1), ('Among_the_mourners', '2016-03-01', 1), ('Achema_Power_Plant', '2015-06-01', 55), ('ATSE_Graz', '2017-10-01', 65), ('Arthroscopy', '2017-02-01', 11721), ('2010-2012_European_Nations_Cup_Second_Division', '2018-01-01', 7), ('1967_Cincinnati_Reds', '2015-08-01', 4), ('24th_Golden_Disc_Awards', '2017-05-01', 71), ('Johnny_Floyd', '2017-01-01', 17), ('Arthur_Rupin', '2016-02-01', 5), ('Alpine_skiing_at_the_2011_Canada_Winter_Games', '2015-09-01', 38), ('College_Press_Service', '2017-01-01', 8), ('American_Psycho', '2015-08-01', 55567), ('CBC_Winnipeg', '2017-06-01', 17), ('Burning_the_process', '2016-04-01', 1), ('2011_Stanley_Cup_playoffs', '2017-01-01', 1036), ('Andrew_Mumford', '2017-01-01', 6), ('1925_in_fine_arts_of_the_Soviet_Union', '2018-02-01', 28), ('Aragvi_river', '2017-02-01', 2), ('Andrew_Adamson', '2018-03-01', 16269), ('Arcides_fulvohirta', '2016-10-01', 1), ('Araya_Selassie_Yohannes', '2015-11-01', 423), ('Apartment_house', '2016-09-01', 85), ('Advanced_Art', '2015-12-01', 171), ('1028_Lydina', '2015-06-01', 53), ('2005_July_6_United_Nations_assault_on_Cite_Soleil,_Haiti', '2017-04-01', 2), ('Adolph_Weiss', '2015-06-01', 98), ('Adam_Jerzy_Czartoryski', '2015-09-01', 1237), ('1980_United_States_presidential_election', '2017-05-01', 56), ('1956_Oscars', '2016-08-01', 10), ('Burundian_Senate_election,_2005', '2016-04-01', 1), ('Amarolea_floridana', '2015-07-01', 3), ('August_Bier', '2015-12-01', 514), ('Arbelodes_sebelensis', '2018-03-01', 6), ('Abiah_Brown', '2018-02-01', 1), ('A_Maceo_Smith_High_School', '2016-10-01', 2), ('1488_in_architecture', '2017-12-01', 6), ('2009_AMP_Energy_500', '2016-04-01', 45), ('1921_Baylor_Bears_football_team', '2017-03-01', 21), ('Dmitry_Akhba', '2015-07-01', 43), ('2004_Big_12_Conference_Baseball_Tournament', '2016-07-01', 37), ('Abdisalam_Omer', '2018-02-01', 116), ('Alma,_son_of_Alma', '2015-08-01', 53), ('An_Phoblacht', '2016-10-01', 962), ('2009_Turner_Prize', '2016-01-01', 75), ('Jack_Zajac', '2017-01-01', 24), ('1906_Wimbledon_Championships', '2016-04-01', 22), ('Chuckwalla_Valley', '2017-06-01', 22), ('Alien_Quadrology', '2016-02-01', 1), ('Chalcidoptera_contraria', '2016-04-01', 1), ('Alaska_Republican_Gubernatorial_Primary_Election,_2006', '2016-02-01', 1), ('333639_Yaima', '2018-02-01', 7), ('Aquila_hastata', '2015-11-01', 28), ('Al-Fua', '2017-07-01', 1), ('Anihilation', '2015-07-01', 28), ('International_Toy_Fair', '2017-01-01', 1), ('38th_Regiment_Indiana_Infantry', '2017-01-01', 10), ('Andrea_Stella', '2017-07-01', 75), ('Anselmo_de_Moraes', '2015-09-01', 562), ('Applemore', '2016-05-01', 3), ('Akpinar,_Kirsehir', '2015-06-01', 3), ('Ant_nest', '2016-05-01', 53), ('Catherine_of_Siena', '2016-11-01', 8806), ('Barbos', '2015-06-01', 12), ('Amlaib_mac_Iduilb', '2017-08-01', 2), ('Alice_Janowski', '2018-03-01', 17), ('Acacia_leptocarpa', '2017-03-01', 48), ('Al-Hadi_Yahya', '2016-01-01', 39), ('2015_British_Figure_Skating_Championships', '2017-07-01', 38), ('Avenues_Television', '2016-03-01', 214), ('Dendropsophus_sartori', '2015-07-01', 11), ('1952_in_Germany', '2015-05-01', 63), ('Armuchee_High_School', '2016-04-01', 27), ('April_1_RFC', '2017-11-01', 2), ('Caroline_Bliss', '2016-11-01', 972), ('66th_Rice_Bowl', '2016-06-01', 17), ('Alec_Smight', '2017-02-01', 173), ('Alexei_Panin', '2017-09-01', 3), ('Codeword', '2016-04-01', 84), ('Dormice', '2015-07-01', 63), ('2105_BC', '2017-11-01', 6), ('5th_National_Congress_of_Kuomintang', '2016-06-01', 5), ('Caminho_das_Indias', '2017-01-01', 5), ('Agerbo', '2017-11-01', 2), ('Abe_Anellis', '2018-01-01', 86), ('Aceh_Medal', '2015-07-01', 33), ('Alltech_Arena', '2016-10-01', 144), ('Aly_Oury', '2016-06-01', 260), ('757th_Troop_Carrier_Squadron', '2017-07-01', 2), ('Alec_Peters', '2017-12-01', 2731), ('Agua_Buena_Airport', '2017-09-01', 12), ('Alessandro_Livi', '2016-08-01', 104), ('Andkaer', '2017-04-01', 3), ('Cateran', '2017-06-01', 135), ('57th_Venice_International_Film_Festival', '2017-04-01', 180), ('Brijal_Patel', '2017-06-01', 98), ('Cnemaspis_jerdonii', '2015-07-01', 6), ('Aluminum_sodium_salt', '2016-10-01', 3), ('Arnaldo_Antonio_Sanabria_Ayala', '2017-09-01', 4), ('Angels_of_Iron', '2018-02-01', 83), ('Bugs_Bunny_Rabbit_Rampage', '2017-06-01', 422), ('Admiralty_Class_Destroyer', '2017-10-01', 2), ('Atlas_Media', '2017-05-01', 2), ('Arcesilaus_i_of_cyrene', '2017-03-01', 1), ('2011_Tajikistan_national_football_team_results', '2017-04-01', 13), ('Artur_Shakhnazarov', '2017-12-01', 22), ('747_Express_Bus', '2018-03-01', 20), ('101-in-1_Party_Megamix', '2017-10-01', 188), ('Fastpoint_Games', '2016-11-01', 32), ('Analog_Anthology_1', '2017-07-01', 1), ('Archival_bond', '2015-09-01', 119), ('1985_Air_Force_Falcons_football', '2017-09-01', 4), ('American_Airlines_plane_diverted_to_Miami_after_landing_gear_problem', '2017-06-01', 3), ('Adaptive_Evolution_in_the_Human_Genome', '2017-08-01', 2), ('Arthur_Strangways', '2015-11-01', 5), ('1583_in_poetry', '2015-09-01', 68), ('Andrew_igoudala', '2015-06-01', 2), ('Euonychophora', '2016-11-01', 37), ('Catechizing', '2016-04-01', 4), ('1960-61_ice_hockey_Bundesliga_season', '2018-03-01', 3), ('Buk_Vlaka', '2017-06-01', 10), ('Arbor_Day', '2018-03-01', 16265), ('Guan_Sheng', '2017-01-01', 73), ('2014_Barcelona_Open_Banc_Sabadell', '2016-08-01', 57), ('1976-77_Nationalliga_A', '2016-04-01', 1), ('AFL_records', '2015-11-01', 16), ('2005_Tour_Down_Under', '2016-10-01', 26), ('92_BCE', '2015-08-01', 4), ('Bento_Box_Animation', '2017-01-01', 1), ('Alabama_Territory', '2018-03-01', 1195), ('Abdul-Wasa_Al-Saqqaf', '2016-07-01', 21), ('Archbishops_of_Semarang', '2017-01-01', 6), ('Ambivina', '2017-10-01', 13), ('Aghjaghala_Ulia', '2017-08-01', 2), ('Blechnum_novae-zelandiae', '2016-11-01', 26), ('Dictyosome', '2016-04-01', 19), ('Arts_Council_of_Great_Britain', '2016-12-01', 785), ('LBC_Radio', '2017-01-01', 3), ('Ageo,_Saitama', '2016-06-01', 396), ('Babla_Mehta', '2016-12-01', 674), ('2012-13_Russian_Cup', '2018-01-01', 10), ('Chandragupt', '2017-06-01', 6), ('407th_Air_Refueling_Squadron', '2016-01-01', 96), ('Aftermarket', '2016-07-01', 1253), ('A_Portrait_of_New_Orleans', '2016-08-01', 18), ('2000-01_Yemeni_League', '2017-03-01', 1), ('Actinidia_chinensis', '2015-11-01', 907), ('Amsterdam_Tournament_1999', '2018-03-01', 1), ('Arthur_Iberall', '2017-02-01', 112), ('Auricula_Meretricula', '2016-02-01', 103), ('Archbishop_of_Lahore', '2016-09-01', 8), ('Chippewa_Indians_of_Montana', '2016-04-01', 9), ('Abidjan-Niger_Railway', '2018-01-01', 22), ('29th_Annual_Grammy_Awards', '2017-05-01', 1087), ('Ateles_geoffroyi_frontatus', '2017-06-01', 3), ('Enrico_Cernuschi', '2016-11-01', 3), ('A4183_road', '2017-02-01', 8), ('Ahrayut', '2016-10-01', 75), ('Alison_Castle', '2016-03-01', 55), ('Automobile_aftermarket', '2016-10-01', 5), ('2008_GAINSCO_Auto_Insurance_Indy_300', '2016-07-01', 51), ('1937_Scottish_Cup_Final', '2017-04-01', 126), ('2005_Clipsal_500_Adelaide', '2018-02-01', 22), ('Farid_Farjad', '2016-04-01', 120), ('13_Tribes_of_Long_Island', '2015-12-01', 11), ('Afroneta_bamilekei', '2017-01-01', 2), ('Frederick_Stuart_Greene', '2017-01-01', 1), ('Andre_Braugher', '2017-04-01', 37655), ('1906_International_Lawn_Tennis_Challenge', '2017-10-01', 73), ('2009-10_NFL_Playoffs', '2016-01-01', 69), ('Cricket_Wellington', '2016-11-01', 2), ('Craig_Blazer', '2015-07-01', 21), ('Aeolidiella_orientalis', '2017-05-01', 3), ('Andre_Prokovsky', '2017-06-01', 4), ('Angela_McKee', '2017-11-01', 14), ('Airbase_Golubovci', '2016-10-01', 1), ('2011_ISAF_Sailing_World_Championships', '2017-05-01', 89), ('Bartica_Airport', '2017-06-01', 27), ('Agusan_Dam', '2016-09-01', 454), ('Bosque_Real_Country_Club', '2015-07-01', 42), ('Georges_Duhamel', '2017-01-01', 122), ('Allrounder', '2017-03-01', 63), ('2017_Missouri_State_Bears_football_team', '2017-09-01', 868), ('Allons_a_Lafayette', '2017-11-01', 17), ('Agathla', '2015-05-01', 105), ('1086_in_poetry', '2015-09-01', 25), ('Absolute_extreme', '2017-09-01', 1), ('Agathe_Bonitzer', '2017-12-01', 229), ('Chinese_Red_Pine', '2017-06-01', 18), ('Angular_dispersion', '2016-02-01', 11), ('Jean-Sebastian_Giguere', '2017-01-01', 2), ('Actinium-235', '2018-03-01', 4), ('Ago,_filo_e_nodo', '2017-02-01', 11), ('Aranea_cruentata', '2016-03-01', 1), ('2009_Korea_National_League', '2017-11-01', 19), ('Americom-8', '2016-08-01', 28), ('2006_Junee_Bushfire', '2018-03-01', 81), ('2013_Major_League_Baseball_Home_Run_Derby', '2017-09-01', 182), ('1928_US_Presidential_Election', '2016-12-01', 42), ('After-eighty_generation', '2016-02-01', 127), ('1932_Hawthorn_Football_Club_season', '2017-07-01', 16), ('Amelia_Elizabeth_Mary_Rygate', '2017-05-01', 2), ('Aline_Khalaf', '2017-12-01', 465), ('Akron_Junction,_New_York', '2017-07-01', 56), ('Apollo_moon_landing_conspiracy_theories', '2015-09-01', 4), ('1978_National_League_Championship_Series', '2017-03-01', 325), ('1959-60_German_football_championship', '2017-08-01', 5), ('Almost_a_Bride', '2017-01-01', 1), ('Andrew_Lysaght,_junior', '2015-10-01', 20), ('1902_Otani_expedition', '2018-02-01', 1), ('1892_Currie_Cup', '2016-09-01', 53), ('1988_USC_Trojans_football_team', '2016-10-01', 494), ('1944_in_Northern_Ireland', '2016-12-01', 46), ('Alfred_Acherman', '2017-07-01', 1), ('Arcadia,_Nebraska', '2017-02-01', 148), ('4_x_400_metre_relay', '2018-03-01', 1), ('A4030_road', '2016-07-01', 1), ('Chi-li', '2016-11-01', 3), ('Aircraft_fairing', '2016-11-01', 1861), ('Buddhism_in_Belize', '2015-07-01', 40), ('Alameda_County_Open', '2017-02-01', 33), ('Area_of_countries_and_regions_of_the_United_Kingdom', '2017-10-01', 6), ('2014_Weber_State_Wildcats_football_team', '2016-10-01', 47), ('American_Journal_of_Comparative_Law', '2016-04-01', 62), ('A_Teaspoon_Every_Four_Hours', '2017-03-01', 47), ('Astasis', '2016-03-01', 1195), ('Akhrakouaeronon', '2015-11-01', 62), ('Annenkrone', '2016-03-01', 40), ('Ballotine', '2016-12-01', 4753), ('2000_Kipawa_earthquake', '2015-11-01', 139), ('Archdiocese_of_cashel_and_emly', '2017-01-01', 1), ('Chevrolet_SS396', '2017-01-01', 1), ('Achyroseris', '2016-03-01', 1), ('Daniel_Pulteney', '2016-11-01', 29), ('2006_Major_League_Baseball_draft', '2017-07-01', 10637), ('Adetunji_Idowu_Olurin', '2016-01-01', 37), ('Ardatov,_Nizhny_Novgorod_Oblast', '2017-04-01', 18), ('Andrew_Hilditch', '2015-08-01', 398), ('A_Very_Merry_Daughter_Of_the_Bride', '2017-04-01', 67), ('1993_in_radio', '2017-08-01', 85), ('Deltan', '2016-11-01', 91), ('Adnan_Custovic', '2017-12-01', 26), ('Di_Gennaro', '2017-01-01', 4), ('237_AD', '2017-11-01', 1), ('Aaron_Gombar', '2018-03-01', 2), ('Acrolophus', '2017-04-01', 47), ('Alfred_Bergman', '2017-06-01', 27), ('Charles_Bebb', '2017-06-01', 39), ('Dirico', '2017-01-01', 24), ('1982_Major_League_Baseball_Draft', '2016-12-01', 90), ('DDT_wrestling', '2016-11-01', 4), ('1988-89_Houston_Rockets_season', '2016-02-01', 10), ('Acacia_loderi', '2015-11-01', 35), ('2015_Deauville_American_Film_Festival', '2016-10-01', 126), ('Andropadus_importunus', '2016-02-01', 9), ('Antonio_Bacchetti', '2017-04-01', 52), ('Ann_Trindade', '2015-09-01', 49), ('5_x_Monk_5_x_Lacy', '2016-05-01', 37), ('Barlochan,_Ontario', '2017-06-01', 2), ('Achaian', '2017-03-01', 35), ('Flow_rider', '2017-01-01', 1), ('Antiblemma_discerpta', '2018-02-01', 1), ('1997_Illinois_Fighting_Illini_football_team', '2017-11-01', 331), ('Ahrntal', '2016-03-01', 540), ('Apollo_Conference', '2015-10-01', 329), ('Algenib_in_Perseus', '2016-01-01', 1), ('Craig_Norgate', '2016-04-01', 42), ('Antwerp_Zoo', '2015-12-01', 879), ('Cold_Contagious', '2017-06-01', 161), ('Bolito', '2016-11-01', 181), ('Chinese_bridges', '2016-11-01', 1), ('14th_Indiana_Infantry_Regiment', '2017-04-01', 115), ('Bindunuwewa_massacre', '2015-07-01', 52), ('Eastshore_Highway', '2016-11-01', 2), ('Daemonologie', '2017-01-01', 1655), ('Aero_Pacifico', '2015-07-01', 1), ('Blue_Ribbon_Schools_Program', '2017-06-01', 557), ('Ash_Township,_MI', '2018-02-01', 3), ('Al-Hatab_Square', '2018-02-01', 450), ('Alje_Vennema', '2018-02-01', 187), ('1920_All-Ireland_Senior_Football_Championship_Final', '2016-05-01', 40), ('Criss_Oliva', '2016-11-01', 801), ('Bethlehem,_Ohio', '2017-01-01', 16), ('1976_WHA_Amateur_Draft', '2015-08-01', 47), ('Angela_Fimmano', '2017-06-01', 17), ('Alexander_Bonini_of_Alexandria', '2017-09-01', 1), ('Anarchist_faq', '2015-05-01', 13), ('Aleksander_Benedykt_Sobieski', '2016-05-01', 240), ('Cape_Florida_Lighthouse', '2016-04-01', 6), ('Fernando_VI_of_Spain', '2017-01-01', 3), ('Crossing_number', '2017-06-01', 29), ('1984_NSL_Cup', '2017-05-01', 26), ('Barbara_Weldon', '2015-06-01', 29), ('Andreas_Olsen', '2017-01-01', 32), ('Battle_of_Baima', '2016-04-01', 2), ('Amory_Hansen', '2016-05-01', 26), ('Akhmimic', '2015-11-01', 41), ('Al_Awda', '2018-02-01', 18), ('Adelheid-Marie_of_Anhalt-Dessau', '2016-07-01', 70), ('Americans_for_Technology_Leadership', '2015-10-01', 90), ('Belizean_diplomatic_missions', '2017-06-01', 3), ('African_communist', '2016-05-01', 3), ('Andosol', '2016-09-01', 246), ('Alan_Attraction', '2016-05-01', 15), ('A_Yank_in_Rome', '2015-12-01', 70), ('2004_in_the_United_Arab_Emirates', '2018-02-01', 33), ('Additionality', '2017-06-01', 371), ('Assassination_of_Trotsky', '2015-06-01', 47), ('Alice_Sotero', '2018-02-01', 27), ('Agyneta_platnicki', '2016-04-01', 4), ('Alexandra_Vasilyevna_Velyaminova', '2015-07-01', 30), ('1881_in_Chile', '2016-06-01', 16), ('Arterial_ischemic_stroke', '2018-01-01', 57), ('Astro_Glacier', '2015-09-01', 27), ('Chester_Earl_Merrow', '2017-06-01', 58), ('Alejandro_de_la_Madrid', '2015-11-01', 1630), ('70936_Kamen', '2017-08-01', 1), ('AK_Steel_Holding_Corp', '2015-08-01', 8), ('1124_Stroobantia', '2017-10-01', 23), ('Asian_Wedding', '2016-10-01', 15), ('23837_Matthewnanni', '2015-10-01', 18), ('Acharya_Jagadish_Chandra_Bose_Indian_Botanic_Garden', '2017-03-01', 4893), ('Betsy_Hodges', '2016-04-01', 560), ('Arthur_and_the_Invisibles', '2015-08-01', 14924), ('Arkansas-Ole_Miss_football_rivalry', '2015-05-01', 7), ('Asia_Cup', '2015-09-01', 5938), ('Arginine_racemase', '2016-12-01', 15), ('585th_Field_Company,_Royal_Engineers', '2018-03-01', 1), ('1975_Stagg_Bowl', '2017-08-01', 6), ('Dame_Commander_of_The_Most_Honourable_Order_of_the_Bath', '2017-01-01', 1), ('Askajian', '2016-02-01', 26), ('2006_Nebraska_Cornhuskers_football_team', '2015-08-01', 975), ('Cicero_Francis_Lowe_House', '2015-07-01', 10), ('Conan_IV,_Duke_of_Brittany', '2016-11-01', 252), ('2005_World_Modern_Pentathlon_Championships', '2016-07-01', 38), ('1946_Aleutian_Islands_earthquake', '2017-03-01', 2019), ('ANKRD17', '2017-09-01', 19), ('1970_Maryland_Terrapins_football_team', '2017-11-01', 42), ('Ali_Dehkhoda', '2017-04-01', 1), ('1244_in_art', '2015-07-01', 22), ('1520s_in_Denmark', '2016-01-01', 20), ('Abdoulaye_Gaye', '2017-12-01', 10), ('An_Angel_Has_Arrived', '2016-03-01', 36), ('1453_BC', '2015-08-01', 26), ('2017_National_Games_of_China', '2017-05-01', 1293), ('A_Night_in_Sickbay', '2016-05-01', 251), ('Dateline_Diamonds', '2017-01-01', 53), ('419_guestbook_spamming', '2016-02-01', 5), ('Familiar_bluet', '2017-01-01', 4), ('Abu_Bakr_Mirza', '2017-10-01', 86), ('7272_Darbydyar', '2017-11-01', 4), ('Ages_of_consent_in_Latin_America', '2017-03-01', 961), ('1982_Japan_Soccer_League_Cup', '2016-04-01', 14), ('2810_BC', '2015-07-01', 9), ('Druga_Liga_Republike_Srpske', '2017-01-01', 1), ('1998_Swedish_Rally', '2017-09-01', 34), ('1567_in_Norway', '2015-10-01', 89), ('126_Army_Engineer_Regiment,_Royal_Engineers', '2016-03-01', 5), ('2017_American_League_Wild_Card_Game', '2017-10-01', 25120), ('August_Follen', '2017-01-01', 2), ('Ala_Gertner', '2015-11-01', 876), ('Glenwood,_Harford_County,_Maryland', '2017-01-01', 3), ('Applied_ecology', '2017-12-01', 730), ('Ariarathes_V_Eusebes_Philopator', '2018-03-01', 5), ('2006_AFC_Champions_League', '2017-09-01', 947), ('60_minutes_2', '2016-10-01', 2), ('Embryonic_shield', '2017-01-01', 2), ('2001_Meath_Intermediate_Football_Championship', '2015-11-01', 8), ('Apparition_of_Christ_to_Madonna', '2017-06-01', 5), ('Hoosier_Road_Elementary', '2017-01-01', 1), ('Arua_Uda', '2016-12-01', 29), ('Array_comprehension', '2015-11-01', 8), ('Baszki', '2015-06-01', 36), ('Akron_Neighborhoods', '2016-01-01', 4), ('Catholic_Church_in_Costa_Rica', '2017-06-01', 85), ('Canada-Sweden_relations', '2015-07-01', 1), ('Barza_Radio_Community', '2016-11-01', 6), ('Dalhousie_Middle_School', '2016-11-01', 5), ('Alliphis_bakeri', '2017-11-01', 2), ('Bartica_massacre', '2017-06-01', 53), ('30th_January', '2015-11-01', 10), ('1920_revolution', '2017-05-01', 5), ('Amyraldism', '2017-08-01', 828), ('AA_Jefferson_District', '2016-05-01', 45), ('Eunebristis_cinclidias', '2017-01-01', 1), ('A_Scott_Connelly', '2017-06-01', 5), ('Antony_Durose', '2016-07-01', 19), ('Arval_Brethren', '2017-11-01', 579), ('Anthidium_dissectum', '2017-05-01', 2), ('Aru,_Democratic_Republic_of_the_Congo', '2017-04-01', 81), ('1956-57_West_Indian_cricket_season', '2017-04-01', 2), ('2014_Moscow_Film_Festival', '2017-08-01', 2), ('Anna_Gurji', '2017-06-01', 27), ('Allen_Memorial_Medical_Library', '2016-07-01', 120), ('Anton_Sistermans', '2017-02-01', 36), ('Clotheshorses', '2017-06-01', 1), ('36_Stratagems', '2017-08-01', 25), ('Attack_of_the_crab_monsters', '2016-10-01', 16), ('30_rock_awards', '2015-09-01', 2), ('Aeroflot,_Uralsk_Civil_Aviation_Directorate', '2017-08-01', 2), ('Amblyseius_parabufortus', '2017-06-01', 3), ('Indian_coral_tree', '2017-01-01', 3), ('3285_Ruth_Wolfe', '2016-02-01', 9), ('Anderson_da_Silva_Gibin', '2016-08-01', 73), ('5001st_Composite_Group', '2017-03-01', 4), ('Danzik', '2016-04-01', 8), ('4810_Ruslanova', '2016-03-01', 2), ('Arkendale,_Virginia', '2016-04-01', 14), ('Al_Francis_Bichara', '2016-09-01', 239), ('Cayena', '2017-01-01', 1), ('A_Glass_of_Darkness', '2017-04-01', 95), ('GMC_CCKW', '2017-01-01', 887), ('Alabama_State_Route_107', '2015-11-01', 13), ('2011_in_motorsport', '2017-12-01', 26), ('Adecco_General_Staffing,_New_Zealand', '2017-12-01', 86), ('Anbargah', '2015-10-01', 6), ('1995_Asian_Cup_Winners_Cup', '2016-06-01', 7), ('1986_Wales_rugby_union_tour_of_the_South_Pacific', '2016-12-01', 30), ('Adya_Goud_Brahmin', '2017-03-01', 2), ('Akcakiraz', '2015-08-01', 5), ('24249_Bobbiolson', '2017-12-01', 4), ('Ahmanson_Theatre', '2016-02-01', 801), ('Abdullah_ibn_Jahsh', '2016-10-01', 196), ('1937_in_Chile', '2015-08-01', 24), ('2000_in_England', '2016-01-01', 57), ('A_Deepness_In_The_Sky', '2017-08-01', 2), ('Area_code_678', '2015-07-01', 480), ('Avalon_Hill', '2017-01-01', 880), ('Anna,_Duchess_of_Prussia', '2015-12-01', 315), ('Alexandr_Syman', '2017-04-01', 24), ('7400_series_logic', '2017-11-01', 2), ('Greenleaf_Township,_Minnesota', '2017-01-01', 1), ('Acetylsal', '2017-04-01', 6), ('Earth_and_Man_National_Museum', '2016-11-01', 43), ('Affetside', '2015-10-01', 185), ('1971_CFL_season', '2015-08-01', 202), ('Beth_Bader', '2016-11-01', 21), ('Enrolled_Nurse', '2016-04-01', 5), ('Al-Azraq', '2016-12-01', 22), ('4th_South_Carolina_Regiment', '2015-07-01', 42), ('Amanda_Overmyer', '2017-02-01', 356), ('Auto_wrap', '2016-02-01', 8), ('Anonymous_internet_banking', '2015-07-01', 98), ('Curatoria', '2016-11-01', 3), ('A-roll', '2016-05-01', 134), ('Accra_hearts_of_oak_sc', '2017-10-01', 4), ('Apostasy_from_Judaism', '2015-12-01', 45), ('Acantharctia_tenebrosa', '2018-01-01', 3), ('Abigail_Keasey_Frankel', '2017-11-01', 25), ('2008_Paraguayan_general_election', '2016-01-01', 1), ('Adams_motor', '2015-09-01', 37), ('Drummond_Community_High_School', '2017-01-01', 17), ('Andrews_Nakahara', '2017-10-01', 474), ('10th_Maccabiah', '2017-04-01', 30), ('Ackerman,_Rick', '2015-08-01', 4), ('Dumri,_Buxar', '2016-11-01', 35), ('Asking_Jesus_into_your_heart', '2016-09-01', 1), ('Adamowicz_brothers', '2016-12-01', 161), ('Alien_Musibat', '2017-12-01', 2), ('Ahmad_Al_Tayer', '2016-04-01', 39), ('Analytical_phonics', '2016-01-01', 520), ('Do_It_Good', '2016-04-01', 281), ('2004_Kumbakonam_School_fire', '2017-12-01', 2114), ('1977_Chattanooga_Mocs_football_team', '2016-08-01', 3), ('Globe_valves', '2017-01-01', 11), ('Abelmoschus_crinitus', '2016-04-01', 18), ('1874_Yale_Bulldogs_football_team', '2016-02-01', 37), ('Climer', '2017-06-01', 1), ('Auchroisk', '2017-06-01', 37), ('2010_Albirex_Niigata_season', '2016-10-01', 19), ('Adhocracy', '2017-06-01', 2217), ('Chios_Massacre', '2015-07-01', 1110), ('African_Red_Slip', '2017-02-01', 221), ('1976_Portland_Timbers_season', '2016-07-01', 41), ('Alsace-Larraine', '2015-09-01', 2), ('3750_Ilizarov', '2017-07-01', 12), ('Aleksandr_Shkaev', '2017-05-01', 1), ('32_bar_form', '2016-01-01', 12), ('Aequatorium_jamesonii', '2018-03-01', 14), ('Abade_neiva', '2016-09-01', 2), ('Arakvaz', '2016-08-01', 23), ('207_Sqn', '2017-10-01', 2), ('Ducal_hat', '2016-11-01', 10), ('2_Degrees', '2017-03-01', 19), ('Ahmeddiyya_Islam', '2016-03-01', 4), ('Amidi-ye_Kohneh', '2017-11-01', 13), ('Contributions_to_Indian_Sociology', '2016-11-01', 42), ('Clark_Leiblee', '2016-04-01', 5), ('Abraham_of_Strathearn', '2017-09-01', 14); diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/error.log b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/error.log deleted file mode 100644 index 05323c26d96..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/error.log +++ /dev/null @@ -1,1000 +0,0 @@ -2023/01/15 14:51:17 [error] client: 7.2.8.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:02:09 [error] client: 8.4.2.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:46:13 [error] client: 6.9.3.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:34:55 [error] client: 9.9.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:39:17 [error] client: 4.6.9.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:22:46 [error] client: 9.4.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:45:31 [error] client: 1.1.7.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:18:37 [error] client: 2.1.6.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:46:08 [error] client: 8.1.6.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:17:15 [error] client: 7.6.3.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:54:34 [error] client: 3.4.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:47:56 [error] client: 1.9.8.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:05:37 [error] client: 9.6.9.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:48:03 [error] client: 7.1.8.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:43:20 [error] client: 8.9.5.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 20:04:15 [error] client: 3.9.3.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:24:19 [error] client: 6.7.7.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:16:05 [error] client: 8.7.2.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:30:34 [error] client: 4.8.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 00:36:21 [error] client: 3.8.6.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 22:53:56 [error] client: 7.3.7.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:40:45 [error] client: 3.8.4.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:37:35 [error] client: 4.6.8.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:46:59 [error] client: 7.9.1.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:28:40 [error] client: 6.1.2.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:09:10 [error] client: 7.1.4.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:27:55 [error] client: 3.5.7.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:13:26 [error] client: 8.1.4.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:09:24 [error] client: 5.4.7.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:48:25 [error] client: 2.1.7.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 18:14:39 [error] client: 9.3.2.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:54:18 [error] client: 1.5.4.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:09:53 [error] client: 7.6.8.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 17:53:07 [error] client: 8.4.1.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:06:54 [error] client: 8.1.8.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:52:53 [error] client: 5.5.3.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:54:07 [error] client: 3.6.9.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 02:24:43 [error] client: 8.3.3.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:17:31 [error] client: 1.5.9.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:52:34 [error] client: 2.1.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:13:39 [error] client: 8.1.1.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:02:28 [error] client: 6.1.4.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:57:56 [error] client: 8.4.8.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:17:30 [error] client: 5.5.8.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:54:44 [error] client: 5.9.7.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:01:40 [error] client: 3.7.2.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:25:06 [error] client: 8.5.1.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:04:01 [error] client: 4.5.6.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:04:16 [error] client: 9.7.7.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:05:31 [error] client: 4.3.3.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 11:46:25 [error] client: 6.1.4.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:00:07 [error] client: 4.4.5.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:30:50 [error] client: 3.9.6.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:39:56 [error] client: 2.3.4.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:16:51 [error] client: 1.1.3.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:15:58 [error] client: 7.2.2.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:07:10 [error] client: 3.3.7.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:40:21 [error] client: 5.1.2.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:13:14 [error] client: 9.2.9.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:08:24 [error] client: 7.2.7.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:12:19 [error] client: 6.8.1.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:11:04 [error] client: 7.3.2.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:15:35 [error] client: 6.2.5.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:39:48 [error] client: 5.4.8.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:59:33 [error] client: 5.2.3.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:34:44 [error] client: 2.6.5.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:24:18 [error] client: 7.5.5.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:37:05 [error] client: 1.1.7.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:38:10 [error] client: 8.3.6.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:14:12 [error] client: 8.9.5.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:05:33 [error] client: 7.2.6.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:59:32 [error] client: 6.1.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:36:57 [error] client: 3.5.8.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:58:43 [error] client: 6.7.5.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:51:24 [error] client: 5.5.6.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:29:28 [error] client: 6.7.5.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:39:15 [error] client: 9.3.7.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:10:32 [error] client: 3.2.6.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:44:06 [error] client: 3.7.8.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 19:25:28 [error] client: 8.7.5.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:38:17 [error] client: 2.9.1.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:26:26 [error] client: 5.3.6.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 18:38:54 [error] client: 6.2.8.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:16:25 [error] client: 7.9.7.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:32:52 [error] client: 2.9.1.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:07:09 [error] client: 5.9.7.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:57:30 [error] client: 3.9.6.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 19:44:16 [error] client: 3.5.5.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:14:01 [error] client: 6.3.1.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:25:12 [error] client: 7.7.9.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:02:52 [error] client: 7.7.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:33:32 [error] client: 1.3.7.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:01:35 [error] client: 5.9.8.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:36:28 [error] client: 2.2.2.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:29:50 [error] client: 7.2.7.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:54:07 [error] client: 9.3.7.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:57:59 [error] client: 6.6.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:09:03 [error] client: 4.9.3.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:19:19 [error] client: 7.2.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:19:15 [error] client: 8.3.3.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:35:45 [error] client: 8.6.9.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:41:19 [error] client: 7.2.1.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:16:58 [error] client: 7.1.7.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 01:48:25 [error] client: 8.7.2.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:16:24 [error] client: 1.1.1.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:31:27 [error] client: 9.5.8.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:43:32 [error] client: 7.8.9.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:26:34 [error] client: 1.9.6.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:58:47 [error] client: 8.3.8.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:29:38 [error] client: 5.1.1.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 12:47:46 [error] client: 9.3.5.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:32:32 [error] client: 2.4.8.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:45:56 [error] client: 3.4.6.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:26:02 [error] client: 7.7.2.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:00:30 [error] client: 1.9.5.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:58:18 [error] client: 4.5.5.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:30:18 [error] client: 9.6.8.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:10:12 [error] client: 2.9.6.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:59:06 [error] client: 5.6.2.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:29:38 [error] client: 5.2.5.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:38:28 [error] client: 8.1.9.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:32:44 [error] client: 7.6.4.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:23:09 [error] client: 7.9.9.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:45:07 [error] client: 7.8.2.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:59:30 [error] client: 6.8.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:57:21 [error] client: 1.2.5.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:27:11 [error] client: 3.5.1.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:27:52 [error] client: 2.5.1.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 11:03:31 [error] client: 4.5.4.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:24:11 [error] client: 8.5.6.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:16:55 [error] client: 8.7.8.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:50:00 [error] client: 7.4.2.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 23:34:47 [error] client: 8.2.8.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:40:05 [error] client: 5.9.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:47:26 [error] client: 4.3.7.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 04:19:15 [error] client: 4.7.5.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 10:17:52 [error] client: 9.8.8.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:52:58 [error] client: 3.1.9.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:27:47 [error] client: 6.6.7.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:50:53 [error] client: 8.9.7.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:57:05 [error] client: 3.6.1.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:22:18 [error] client: 6.5.3.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:06:33 [error] client: 4.9.9.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:29:03 [error] client: 6.9.1.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 00:58:37 [error] client: 9.5.5.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:50:03 [error] client: 3.3.5.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:58:00 [error] client: 3.6.3.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 18:55:11 [error] client: 5.5.3.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:02:45 [error] client: 7.9.7.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:56:11 [error] client: 8.5.2.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 14:52:50 [error] client: 7.2.4.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:07:33 [error] client: 8.8.3.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 10:16:29 [error] client: 1.4.8.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:37:03 [error] client: 2.2.7.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 10:01:11 [error] client: 3.2.8.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:38:40 [error] client: 4.3.4.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:19:30 [error] client: 6.1.9.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:44:01 [error] client: 2.5.1.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:37:23 [error] client: 4.5.8.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:44:33 [error] client: 3.5.4.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:25:36 [error] client: 5.5.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 07:37:28 [error] client: 2.8.4.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:47:33 [error] client: 5.5.1.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:38:30 [error] client: 1.5.4.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:46:11 [error] client: 1.2.5.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 02:55:42 [error] client: 1.3.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:28:40 [error] client: 1.2.3.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:44:04 [error] client: 6.3.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:31:38 [error] client: 8.3.8.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:17:42 [error] client: 5.9.3.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:20:57 [error] client: 7.7.6.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:32:45 [error] client: 2.3.9.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:11:40 [error] client: 2.5.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:18:17 [error] client: 8.3.9.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:22:25 [error] client: 1.1.8.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:31:51 [error] client: 5.4.4.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:01:10 [error] client: 2.5.8.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:57:21 [error] client: 6.5.2.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:32:40 [error] client: 3.4.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:18:06 [error] client: 9.1.7.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:19:47 [error] client: 7.6.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:42:37 [error] client: 5.6.5.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:21:01 [error] client: 5.4.2.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:38:40 [error] client: 9.4.8.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:56:19 [error] client: 3.1.6.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:32:45 [error] client: 6.8.9.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 05:58:16 [error] client: 5.3.8.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 00:27:39 [error] client: 8.8.3.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:45:58 [error] client: 8.1.2.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:55:56 [error] client: 2.2.7.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:07:19 [error] client: 4.4.4.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:25:40 [error] client: 9.8.9.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:30:56 [error] client: 6.2.7.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:34:05 [error] client: 6.1.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:36:51 [error] client: 7.1.4.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:26:56 [error] client: 9.4.1.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:30:06 [error] client: 7.5.8.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 04:26:33 [error] client: 5.7.2.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:18:39 [error] client: 5.6.3.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:39:57 [error] client: 7.8.3.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:56:19 [error] client: 1.9.7.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:01:29 [error] client: 5.8.8.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:47:43 [error] client: 2.2.2.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 19:21:04 [error] client: 2.1.1.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:02:29 [error] client: 9.7.8.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:17:01 [error] client: 1.4.6.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:34:32 [error] client: 6.2.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:46:20 [error] client: 6.5.6.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:56:33 [error] client: 6.4.9.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:37:41 [error] client: 5.6.8.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:28:54 [error] client: 1.2.5.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:11:57 [error] client: 2.8.5.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:44:57 [error] client: 1.4.7.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:48:48 [error] client: 3.3.9.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:32:31 [error] client: 1.5.3.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:15:34 [error] client: 2.4.2.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:01:17 [error] client: 2.6.6.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:24:23 [error] client: 9.8.4.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 18:23:32 [error] client: 7.8.2.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:27:32 [error] client: 8.3.2.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 07:02:49 [error] client: 6.5.3.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:47:34 [error] client: 5.3.9.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:19:26 [error] client: 8.6.7.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:37:10 [error] client: 4.5.7.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:00:53 [error] client: 1.9.7.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 11:10:28 [error] client: 7.5.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:13:23 [error] client: 2.3.1.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:32:45 [error] client: 1.5.5.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:52:24 [error] client: 9.4.1.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 09:43:02 [error] client: 2.7.4.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:08:31 [error] client: 8.3.8.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:18:12 [error] client: 4.6.4.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:02:23 [error] client: 8.6.2.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:09:50 [error] client: 3.7.1.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:20:03 [error] client: 6.5.7.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:06:28 [error] client: 1.1.6.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:44:13 [error] client: 3.6.4.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:22:57 [error] client: 2.3.4.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:53:01 [error] client: 2.2.5.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:12:30 [error] client: 2.6.3.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:14:14 [error] client: 8.4.8.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:30:45 [error] client: 6.1.6.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:18:22 [error] client: 1.5.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:53:45 [error] client: 1.7.9.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:04:48 [error] client: 6.2.1.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:36:09 [error] client: 2.2.5.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:29:16 [error] client: 7.8.3.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:54:45 [error] client: 9.3.3.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:58:21 [error] client: 2.1.6.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:14:55 [error] client: 7.9.8.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:32:25 [error] client: 8.8.1.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 11:33:17 [error] client: 4.6.8.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:27:16 [error] client: 2.4.5.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:01:30 [error] client: 7.7.1.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 04:45:38 [error] client: 4.6.3.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 20:20:55 [error] client: 1.1.8.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:04:06 [error] client: 6.5.6.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:58:45 [error] client: 1.2.6.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:31:14 [error] client: 5.4.2.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 10:13:26 [error] client: 1.6.4.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:59:43 [error] client: 5.9.1.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:28:25 [error] client: 5.6.8.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:09:12 [error] client: 4.3.4.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:07:01 [error] client: 1.1.8.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 12:30:47 [error] client: 6.5.7.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:25:09 [error] client: 4.8.6.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:04:11 [error] client: 5.1.8.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:48:08 [error] client: 4.2.5.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:17:12 [error] client: 3.8.9.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:06:06 [error] client: 4.1.5.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:29:27 [error] client: 8.3.6.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:50:31 [error] client: 1.8.3.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:40:07 [error] client: 5.8.3.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:07:40 [error] client: 9.1.7.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:28:18 [error] client: 5.7.2.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:33:34 [error] client: 7.6.3.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:58:06 [error] client: 1.9.6.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:19:27 [error] client: 7.2.5.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 17:43:39 [error] client: 7.2.3.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:19:01 [error] client: 1.6.5.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:25:04 [error] client: 7.2.3.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 19:49:23 [error] client: 1.4.4.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:00:29 [error] client: 1.2.6.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:48:28 [error] client: 5.7.7.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:31:49 [error] client: 6.7.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:01:04 [error] client: 1.8.5.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:30:40 [error] client: 9.8.3.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:34:44 [error] client: 1.3.7.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:12:22 [error] client: 1.6.1.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:42:02 [error] client: 6.2.5.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 07:03:57 [error] client: 4.9.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:00:45 [error] client: 2.6.3.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 20:28:59 [error] client: 7.4.8.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:46:16 [error] client: 8.8.9.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:04:11 [error] client: 4.1.1.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 18:38:50 [error] client: 3.4.5.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:09:53 [error] client: 3.1.6.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:36:48 [error] client: 1.1.6.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:57:10 [error] client: 6.5.1.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:53:20 [error] client: 8.3.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:54:23 [error] client: 5.3.7.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:15:49 [error] client: 2.2.8.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 04:50:08 [error] client: 4.9.7.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:42:19 [error] client: 2.4.8.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:02:38 [error] client: 9.4.2.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:57:08 [error] client: 9.4.3.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:28:37 [error] client: 3.8.3.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 10:05:47 [error] client: 3.7.7.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:48:52 [error] client: 9.9.8.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:46:42 [error] client: 6.6.2.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:00:21 [error] client: 7.2.3.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:24:38 [error] client: 8.6.2.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 14:10:45 [error] client: 3.6.7.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:01:39 [error] client: 9.8.2.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:47:11 [error] client: 6.2.7.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 02:07:20 [error] client: 7.4.7.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:15:01 [error] client: 7.5.8.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:57:07 [error] client: 2.3.8.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 17:50:55 [error] client: 1.7.2.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:13:46 [error] client: 1.7.5.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 17:40:08 [error] client: 3.7.5.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:03:47 [error] client: 6.9.1.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:38:50 [error] client: 9.1.5.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:56:34 [error] client: 7.4.9.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:58:14 [error] client: 4.5.1.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 00:15:04 [error] client: 5.7.4.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 10:26:02 [error] client: 6.7.5.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:42:28 [error] client: 6.2.3.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 10:37:44 [error] client: 6.6.8.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:16:46 [error] client: 5.6.2.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:29:52 [error] client: 2.9.8.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:42:35 [error] client: 5.1.5.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:43:41 [error] client: 1.1.8.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:24:56 [error] client: 8.7.5.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:44:06 [error] client: 5.6.4.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:20:58 [error] client: 7.6.4.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:32:38 [error] client: 8.3.9.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:26:49 [error] client: 5.2.9.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:15:19 [error] client: 2.8.5.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:58:49 [error] client: 9.7.4.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:09:28 [error] client: 3.9.3.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:35:18 [error] client: 3.8.6.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:04:01 [error] client: 9.4.8.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:10:04 [error] client: 4.8.3.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:44:55 [error] client: 6.8.2.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:46:37 [error] client: 8.9.4.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 17:54:09 [error] client: 1.5.5.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 04:10:33 [error] client: 3.4.4.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:32:17 [error] client: 8.7.5.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:40:14 [error] client: 5.1.1.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:46:50 [error] client: 7.2.9.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 07:24:17 [error] client: 1.9.1.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:55:21 [error] client: 9.5.1.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:42:33 [error] client: 6.7.3.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:46:46 [error] client: 4.8.7.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:16:20 [error] client: 3.9.6.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 18:53:39 [error] client: 9.4.3.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:22:56 [error] client: 2.7.7.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 03:53:25 [error] client: 7.9.6.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:25:09 [error] client: 2.6.1.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:44:45 [error] client: 1.5.6.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:01:18 [error] client: 7.4.8.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:30:56 [error] client: 5.5.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:17:14 [error] client: 9.4.4.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:17:43 [error] client: 1.6.4.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 03:30:01 [error] client: 3.3.6.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:19:14 [error] client: 2.6.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:16:27 [error] client: 8.1.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:49:37 [error] client: 2.7.6.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:14:10 [error] client: 8.2.2.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 12:22:46 [error] client: 8.8.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:24:43 [error] client: 9.3.6.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:28:40 [error] client: 7.4.3.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:38:41 [error] client: 8.5.7.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:19:59 [error] client: 4.1.7.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 10:18:06 [error] client: 6.4.7.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:06:17 [error] client: 2.1.7.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 19:19:50 [error] client: 5.7.4.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 13:49:37 [error] client: 9.5.1.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:08:03 [error] client: 4.6.7.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:34:28 [error] client: 9.1.7.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 12:47:13 [error] client: 1.9.5.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:20:49 [error] client: 4.4.5.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:11:27 [error] client: 5.5.6.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:15:22 [error] client: 2.9.4.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:06:15 [error] client: 7.9.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:27:41 [error] client: 8.3.4.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:41:12 [error] client: 3.7.8.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:34:44 [error] client: 5.9.2.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 04:31:52 [error] client: 8.6.6.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:09:26 [error] client: 9.7.9.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:41:44 [error] client: 6.9.3.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:21:30 [error] client: 6.9.6.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 19:20:51 [error] client: 6.2.2.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:11:18 [error] client: 9.7.1.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:18:46 [error] client: 4.3.2.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:06:42 [error] client: 9.2.5.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:54:24 [error] client: 8.7.3.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:28:01 [error] client: 9.6.9.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:05:06 [error] client: 4.1.2.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:53:28 [error] client: 5.6.5.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:21:32 [error] client: 4.6.4.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:22:44 [error] client: 2.4.8.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 14:43:47 [error] client: 4.6.3.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:08:00 [error] client: 2.3.8.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:25:30 [error] client: 6.2.2.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:44:00 [error] client: 9.8.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:59:49 [error] client: 5.1.9.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:03:40 [error] client: 1.2.3.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:18:27 [error] client: 7.9.2.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:20:23 [error] client: 8.6.2.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:57:24 [error] client: 8.2.5.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:55:12 [error] client: 7.9.6.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:11:59 [error] client: 5.8.8.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:00:56 [error] client: 7.1.1.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:12:31 [error] client: 2.1.9.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 23:36:19 [error] client: 3.4.6.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:07:53 [error] client: 8.4.9.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 19:12:11 [error] client: 3.8.6.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:17:03 [error] client: 1.7.6.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:16:50 [error] client: 8.7.6.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:14:55 [error] client: 4.9.8.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:32:13 [error] client: 4.7.9.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:25:00 [error] client: 2.1.8.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:52:40 [error] client: 4.5.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:50:04 [error] client: 6.6.7.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:49:55 [error] client: 3.1.7.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:54:30 [error] client: 9.7.6.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:31:48 [error] client: 3.5.1.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:18:43 [error] client: 2.6.8.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:25:35 [error] client: 3.3.6.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:53:17 [error] client: 2.2.9.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:53:27 [error] client: 1.7.2.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:58:08 [error] client: 4.6.2.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 23:52:39 [error] client: 9.3.6.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:51:37 [error] client: 4.6.2.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:25:43 [error] client: 8.9.1.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 11:43:40 [error] client: 5.7.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 09:18:45 [error] client: 4.4.2.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:24:08 [error] client: 4.2.1.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:46:10 [error] client: 6.6.8.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:05:58 [error] client: 9.6.2.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:43:58 [error] client: 7.6.6.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:07:25 [error] client: 4.1.1.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:29:54 [error] client: 8.3.6.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:19:15 [error] client: 4.1.7.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 05:49:56 [error] client: 7.7.6.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:28:52 [error] client: 5.5.7.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 03:26:04 [error] client: 7.1.8.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:19:40 [error] client: 9.1.9.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:14:19 [error] client: 7.4.4.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:05:48 [error] client: 2.8.8.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 17:27:13 [error] client: 5.5.5.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 05:27:13 [error] client: 8.2.5.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:22:24 [error] client: 4.4.5.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:15:08 [error] client: 8.9.8.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:19:54 [error] client: 2.3.4.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:53:36 [error] client: 2.5.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:47:10 [error] client: 4.4.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 04:53:52 [error] client: 4.4.4.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:16:25 [error] client: 8.5.8.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:02:29 [error] client: 7.5.8.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:56:22 [error] client: 6.3.7.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:08:35 [error] client: 4.7.7.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:44:53 [error] client: 1.7.9.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:45:19 [error] client: 1.4.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:34:10 [error] client: 5.6.4.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:18:01 [error] client: 9.7.7.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:47:10 [error] client: 4.3.2.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:18:15 [error] client: 8.2.7.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:53:58 [error] client: 9.2.7.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 14:54:46 [error] client: 4.7.1.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:22:00 [error] client: 6.2.7.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:55:30 [error] client: 5.9.4.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:16:52 [error] client: 3.1.2.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 12:59:52 [error] client: 3.3.9.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:02:53 [error] client: 6.5.5.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:32:02 [error] client: 9.7.4.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:52:03 [error] client: 3.7.6.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:04:30 [error] client: 8.3.6.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:11:17 [error] client: 5.2.9.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:13:44 [error] client: 8.7.3.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:25:59 [error] client: 7.7.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:13:14 [error] client: 5.1.8.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:43:06 [error] client: 7.1.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:08:41 [error] client: 7.2.2.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:45:24 [error] client: 3.1.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 02:05:41 [error] client: 5.4.4.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:38:53 [error] client: 5.5.6.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:03:51 [error] client: 5.7.2.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 18:20:20 [error] client: 3.3.3.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:33:50 [error] client: 3.4.3.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:10:04 [error] client: 8.6.6.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:16:11 [error] client: 4.7.4.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 18:44:01 [error] client: 1.5.2.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:23:50 [error] client: 9.3.1.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:25:15 [error] client: 9.6.4.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 13:48:00 [error] client: 8.9.8.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:40:26 [error] client: 3.8.8.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:06:40 [error] client: 7.1.6.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:05:52 [error] client: 6.3.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:16:49 [error] client: 9.2.9.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:30:31 [error] client: 4.2.1.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:54:03 [error] client: 1.3.4.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:03:50 [error] client: 2.5.7.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 00:49:52 [error] client: 2.2.5.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:23:15 [error] client: 5.4.6.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:48:54 [error] client: 6.5.1.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:28:07 [error] client: 1.4.6.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:14:10 [error] client: 9.1.1.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:27:15 [error] client: 8.5.9.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:06:57 [error] client: 6.5.7.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:39:36 [error] client: 1.6.9.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 19:11:00 [error] client: 5.7.2.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 10:41:25 [error] client: 1.7.1.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 03:10:23 [error] client: 5.9.3.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:28:49 [error] client: 2.6.6.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 07:00:05 [error] client: 2.8.7.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:29:47 [error] client: 8.7.3.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:39:10 [error] client: 2.3.9.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:03:21 [error] client: 6.8.1.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:28:38 [error] client: 5.4.5.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:29:19 [error] client: 7.9.6.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:18:52 [error] client: 4.6.3.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:18:29 [error] client: 3.8.9.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:22:58 [error] client: 8.6.6.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 07:13:17 [error] client: 4.9.4.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:59:11 [error] client: 2.7.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:03:23 [error] client: 1.3.4.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:57:50 [error] client: 6.4.3.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 17:56:35 [error] client: 9.7.9.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:02:29 [error] client: 5.9.8.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:09:05 [error] client: 3.9.7.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:00:57 [error] client: 4.1.5.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:09:13 [error] client: 8.6.2.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:44:55 [error] client: 6.7.7.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 21:44:37 [error] client: 2.9.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 18:09:09 [error] client: 3.8.8.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:30:13 [error] client: 7.6.2.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:34:21 [error] client: 9.5.3.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:38:24 [error] client: 4.2.4.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:50:48 [error] client: 9.7.9.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:37:11 [error] client: 9.1.4.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:11:20 [error] client: 2.3.3.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:03:50 [error] client: 8.7.7.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:58:10 [error] client: 4.1.3.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:28:51 [error] client: 3.7.8.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:17:43 [error] client: 7.3.2.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:18:51 [error] client: 9.8.8.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:09:29 [error] client: 2.4.5.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:22:38 [error] client: 6.4.3.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:33:26 [error] client: 5.4.8.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:49:41 [error] client: 4.2.7.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:19:36 [error] client: 9.1.4.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 18:57:12 [error] client: 5.9.6.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:10:14 [error] client: 5.4.9.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:59:24 [error] client: 8.8.2.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:11:50 [error] client: 1.7.1.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:48:45 [error] client: 9.9.1.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 03:27:28 [error] client: 3.8.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:03:01 [error] client: 7.5.1.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:25:24 [error] client: 3.7.7.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:52:05 [error] client: 8.4.2.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:59:47 [error] client: 5.7.5.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:33:28 [error] client: 8.8.8.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 07:45:18 [error] client: 2.8.1.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 15:37:24 [error] client: 6.2.8.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 17:38:54 [error] client: 9.1.4.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:29:30 [error] client: 1.9.3.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:39:56 [error] client: 9.3.8.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:56:31 [error] client: 5.9.7.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:20:16 [error] client: 1.8.9.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:44:09 [error] client: 4.6.6.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:07:22 [error] client: 9.1.9.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:37:42 [error] client: 7.6.9.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:40:54 [error] client: 7.9.3.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:46:02 [error] client: 3.1.8.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 23:25:04 [error] client: 7.2.6.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:12:50 [error] client: 9.1.5.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:37:49 [error] client: 1.7.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 20:07:22 [error] client: 9.3.2.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:35:59 [error] client: 4.3.7.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:35:45 [error] client: 1.4.6.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:43:38 [error] client: 6.9.3.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:55:26 [error] client: 5.7.1.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:49:30 [error] client: 6.9.8.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 10:28:22 [error] client: 5.6.5.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:18:38 [error] client: 8.3.8.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:21:52 [error] client: 9.9.2.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 10:45:39 [error] client: 4.3.5.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 14:59:52 [error] client: 4.4.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:17:58 [error] client: 1.4.7.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:15:22 [error] client: 4.2.2.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 11:26:12 [error] client: 6.2.2.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:41:11 [error] client: 2.4.8.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:34:21 [error] client: 6.1.8.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:07:42 [error] client: 9.8.4.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:09:31 [error] client: 6.1.2.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:38:35 [error] client: 6.6.5.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:26:29 [error] client: 8.1.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:12:03 [error] client: 8.4.2.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:04:31 [error] client: 1.8.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:50:01 [error] client: 9.2.1.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 18:17:18 [error] client: 4.2.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:09:47 [error] client: 9.1.6.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:24:06 [error] client: 3.6.3.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:58:37 [error] client: 5.6.4.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:10:39 [error] client: 9.9.4.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 19:27:15 [error] client: 4.4.1.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 19:04:29 [error] client: 9.9.9.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:23:05 [error] client: 4.1.9.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:54:36 [error] client: 5.1.3.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 10:08:51 [error] client: 6.2.8.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 13:01:41 [error] client: 6.7.7.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:47:39 [error] client: 9.5.7.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:14:12 [error] client: 7.9.9.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:13:58 [error] client: 1.7.4.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:34:39 [error] client: 5.9.9.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 11:30:26 [error] client: 2.4.7.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 07:39:37 [error] client: 4.8.2.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:27:04 [error] client: 2.1.1.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 00:14:48 [error] client: 7.8.2.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:11:48 [error] client: 1.3.9.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:31:56 [error] client: 2.5.7.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 19:25:01 [error] client: 7.6.3.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:31:59 [error] client: 6.5.9.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:00:40 [error] client: 2.6.9.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 10:43:46 [error] client: 8.4.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:21:06 [error] client: 6.5.7.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:08:22 [error] client: 8.2.9.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:26:02 [error] client: 5.4.5.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 22:14:04 [error] client: 2.8.3.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:33:41 [error] client: 3.1.6.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:00:13 [error] client: 4.7.6.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 18:20:35 [error] client: 6.5.8.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:56:44 [error] client: 1.3.2.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:52:35 [error] client: 3.2.4.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:44:58 [error] client: 6.9.9.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 16:38:18 [error] client: 7.3.8.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:14:32 [error] client: 6.6.3.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:48:18 [error] client: 5.2.2.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:39:42 [error] client: 9.5.3.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:21:38 [error] client: 8.8.5.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:16:04 [error] client: 4.5.6.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:40:49 [error] client: 6.6.4.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:31:58 [error] client: 4.8.1.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:31:13 [error] client: 1.9.9.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:14:35 [error] client: 2.1.9.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:08:17 [error] client: 9.3.1.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:12:12 [error] client: 1.4.2.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:19:11 [error] client: 3.2.5.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:33:51 [error] client: 6.4.7.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 17:16:45 [error] client: 4.4.6.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:31:28 [error] client: 6.6.4.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:37:52 [error] client: 2.1.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 05:15:14 [error] client: 8.5.6.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:37:21 [error] client: 7.1.3.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:37:59 [error] client: 1.6.9.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:15:54 [error] client: 3.2.3.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:57:39 [error] client: 1.4.5.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 13:57:02 [error] client: 9.3.1.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:59:22 [error] client: 4.8.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:56:39 [error] client: 1.8.9.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:49:08 [error] client: 5.6.7.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:32:47 [error] client: 9.6.7.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:37:52 [error] client: 2.2.2.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:59:59 [error] client: 4.8.4.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:26:15 [error] client: 7.8.3.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:43:30 [error] client: 5.8.7.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:22:47 [error] client: 8.2.4.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:13:04 [error] client: 1.3.5.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:44:58 [error] client: 9.8.1.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:58:42 [error] client: 9.7.5.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:45:35 [error] client: 7.9.3.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 00:16:19 [error] client: 4.7.9.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:36:40 [error] client: 2.9.3.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:17:52 [error] client: 5.3.8.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:37:17 [error] client: 9.5.4.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:01:03 [error] client: 1.4.7.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 20:53:49 [error] client: 7.3.1.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:16:59 [error] client: 9.3.7.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 23:51:13 [error] client: 1.7.6.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:09:44 [error] client: 4.7.6.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:06:32 [error] client: 9.5.1.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:04:27 [error] client: 3.8.5.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 03:05:08 [error] client: 8.4.3.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:50:11 [error] client: 7.2.7.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:34:47 [error] client: 3.3.8.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:27:10 [error] client: 2.8.1.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 07:12:17 [error] client: 9.8.5.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:56:24 [error] client: 6.9.1.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 07:24:53 [error] client: 1.1.1.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:47:47 [error] client: 2.7.6.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 04:17:50 [error] client: 6.4.8.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 17:41:00 [error] client: 3.7.9.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:28:31 [error] client: 7.4.9.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:57:46 [error] client: 6.5.5.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:29:25 [error] client: 2.6.2.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:45:17 [error] client: 8.6.4.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:15:47 [error] client: 2.2.1.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:58:16 [error] client: 3.9.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:59:32 [error] client: 5.8.5.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:32:36 [error] client: 8.3.4.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:46:04 [error] client: 6.7.3.5, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 02:45:42 [error] client: 1.3.6.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:50:57 [error] client: 4.3.3.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:54:41 [error] client: 5.2.1.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:26:54 [error] client: 6.6.5.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 12:53:42 [error] client: 1.5.3.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:21:25 [error] client: 7.5.2.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:37:50 [error] client: 1.3.9.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:56:29 [error] client: 3.4.5.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:01:13 [error] client: 3.8.2.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:32:55 [error] client: 2.9.4.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:59:21 [error] client: 5.3.2.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:29:18 [error] client: 6.4.5.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:49:45 [error] client: 8.2.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:15:27 [error] client: 4.5.1.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:23:49 [error] client: 1.2.8.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 06:20:15 [error] client: 9.6.5.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 17:03:57 [error] client: 7.9.2.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 10:48:24 [error] client: 3.5.4.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:56:12 [error] client: 6.5.2.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:42:39 [error] client: 7.9.8.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:19:11 [error] client: 9.2.4.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:02:27 [error] client: 9.1.6.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:51:15 [error] client: 7.9.2.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 14:16:08 [error] client: 9.3.6.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 00:56:31 [error] client: 9.5.1.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:20:12 [error] client: 9.5.1.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:08:44 [error] client: 1.1.4.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:23:28 [error] client: 5.2.3.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:56:04 [error] client: 5.3.1.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:16:37 [error] client: 2.8.8.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:30:52 [error] client: 7.1.6.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:31:58 [error] client: 6.7.2.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:52:38 [error] client: 6.9.7.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:09:21 [error] client: 1.8.3.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:45:43 [error] client: 4.9.1.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 06:18:29 [error] client: 5.2.9.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 01:53:05 [error] client: 5.1.6.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:58:17 [error] client: 6.2.6.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:46:20 [error] client: 2.5.4.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:24:13 [error] client: 6.9.7.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:20:29 [error] client: 9.8.4.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:26:37 [error] client: 7.2.5.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:49:18 [error] client: 6.7.4.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:58:36 [error] client: 2.3.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:41:07 [error] client: 9.3.3.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:26:08 [error] client: 6.4.3.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 12:31:13 [error] client: 3.1.3.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 09:08:50 [error] client: 5.8.9.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:39:36 [error] client: 8.4.8.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 21:29:28 [error] client: 8.7.5.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:23:09 [error] client: 5.9.4.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:53:16 [error] client: 5.7.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:41:48 [error] client: 4.7.2.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:41:01 [error] client: 1.1.7.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:32:20 [error] client: 4.2.8.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 13:20:53 [error] client: 1.6.5.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:00:53 [error] client: 3.2.1.9, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 08:53:04 [error] client: 4.9.1.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 14:48:07 [error] client: 7.6.3.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 21:29:47 [error] client: 5.4.8.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 23:49:22 [error] client: 4.1.1.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:01:41 [error] client: 5.6.1.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 13:31:03 [error] client: 7.7.3.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:32:13 [error] client: 5.4.1.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:47:05 [error] client: 1.6.8.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:55:33 [error] client: 9.7.3.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 04:44:46 [error] client: 8.2.8.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:48:05 [error] client: 7.7.3.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:33:01 [error] client: 3.8.7.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 03:32:11 [error] client: 7.3.8.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:24:31 [error] client: 8.3.4.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 17:03:50 [error] client: 5.8.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:35:59 [error] client: 4.5.7.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:20:21 [error] client: 1.6.6.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:19:50 [error] client: 4.7.7.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 01:41:52 [error] client: 9.5.4.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 05:21:17 [error] client: 1.8.8.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:51:06 [error] client: 3.8.9.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:54:20 [error] client: 7.8.6.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:08:31 [error] client: 8.1.5.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:52:44 [error] client: 2.9.3.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 16:00:35 [error] client: 3.3.4.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:34:33 [error] client: 9.1.4.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:46:08 [error] client: 2.7.1.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:44:08 [error] client: 5.5.1.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:56:38 [error] client: 5.9.7.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 04:38:14 [error] client: 6.6.3.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:03:52 [error] client: 1.7.5.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:47:41 [error] client: 9.9.6.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:14:34 [error] client: 6.4.8.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:02:20 [error] client: 2.1.1.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 11:49:59 [error] client: 1.1.6.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:59:18 [error] client: 3.6.6.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:27:03 [error] client: 6.6.4.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 18:25:18 [error] client: 5.3.5.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:14:16 [error] client: 1.1.4.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:57:48 [error] client: 4.3.1.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:59:54 [error] client: 4.2.2.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 11:28:24 [error] client: 8.1.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 05:04:18 [error] client: 6.2.6.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:32:57 [error] client: 1.5.9.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:27:56 [error] client: 9.9.9.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:16:44 [error] client: 3.8.1.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:22:25 [error] client: 9.1.5.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:44:30 [error] client: 7.1.9.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:12:30 [error] client: 5.6.7.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:26:44 [error] client: 7.8.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 15:46:21 [error] client: 8.2.1.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:16:58 [error] client: 2.2.9.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:36:00 [error] client: 1.5.1.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:46:10 [error] client: 5.5.7.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 10:10:28 [error] client: 8.4.2.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:25:45 [error] client: 4.4.3.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 12:40:13 [error] client: 1.2.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:47:42 [error] client: 2.6.6.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 06:34:36 [error] client: 9.9.6.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:36:09 [error] client: 8.5.1.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 14:37:28 [error] client: 3.7.4.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:05:28 [error] client: 4.5.4.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 19:49:34 [error] client: 4.2.8.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:09:45 [error] client: 9.2.1.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 22:08:17 [error] client: 9.1.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:38:40 [error] client: 9.9.4.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:26:43 [error] client: 1.5.4.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 18:51:57 [error] client: 1.4.3.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:27:06 [error] client: 6.1.5.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:47:03 [error] client: 2.9.2.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 02:23:41 [error] client: 4.4.5.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:20:01 [error] client: 8.1.2.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:24:13 [error] client: 6.4.9.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:48:49 [error] client: 6.1.4.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:11:28 [error] client: 9.7.8.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 07:45:27 [error] client: 3.6.1.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:15:25 [error] client: 1.4.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:19:22 [error] client: 7.5.2.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 23:05:53 [error] client: 2.5.7.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 01:02:12 [error] client: 8.9.3.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 15:49:46 [error] client: 4.1.8.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 03:02:23 [error] client: 6.6.1.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 09:31:34 [error] client: 5.8.1.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 00:38:29 [error] client: 5.8.5.4, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:52:40 [error] client: 5.7.8.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 17:46:01 [error] client: 4.2.1.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 13:59:13 [error] client: 1.4.2.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:57:23 [error] client: 7.4.2.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:26:49 [error] client: 6.9.5.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 14:23:30 [error] client: 4.1.5.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:41:27 [error] client: 6.4.9.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 17:02:59 [error] client: 6.8.3.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:33:45 [error] client: 2.4.7.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:12:44 [error] client: 4.8.8.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:58:48 [error] client: 2.8.8.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:52:02 [error] client: 6.5.4.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:40:08 [error] client: 2.9.3.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 18:05:22 [error] client: 4.9.4.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:08:33 [error] client: 4.7.6.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 08:43:50 [error] client: 4.9.8.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:07:10 [error] client: 7.7.6.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:44:58 [error] client: 4.9.3.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:17:39 [error] client: 6.7.9.5, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 02:15:40 [error] client: 4.8.8.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:13:11 [error] client: 6.8.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:02:02 [error] client: 7.9.3.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 02:34:34 [error] client: 9.7.6.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:42:14 [error] client: 6.4.7.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 23:45:24 [error] client: 8.8.7.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:56:14 [error] client: 1.2.6.2, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 02:56:51 [error] client: 2.4.5.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:32:04 [error] client: 3.8.5.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 21:29:40 [error] client: 5.1.1.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:28:22 [error] client: 2.9.5.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:39:35 [error] client: 9.6.7.8, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 23:08:32 [error] client: 7.9.6.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 11:33:50 [error] client: 7.8.5.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:26:25 [error] client: 7.5.5.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:07:59 [error] client: 8.4.4.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:18:48 [error] client: 8.3.2.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 04:45:40 [error] client: 3.7.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:00:21 [error] client: 8.3.2.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:42:39 [error] client: 5.7.4.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 14:35:00 [error] client: 4.3.5.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 09:15:18 [error] client: 9.4.3.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:07:58 [error] client: 2.5.7.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:39:23 [error] client: 5.9.5.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:17:08 [error] client: 4.3.7.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 07:56:19 [error] client: 2.2.1.7, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 20:12:59 [error] client: 2.3.3.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:21:34 [error] client: 8.4.9.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:46:46 [error] client: 8.6.7.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 23:39:57 [error] client: 9.5.7.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 21:36:09 [error] client: 6.6.2.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:41:49 [error] client: 1.9.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:22:59 [error] client: 2.4.8.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 22:09:16 [error] client: 1.4.9.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:22:13 [error] client: 7.3.3.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:00:27 [error] client: 9.7.2.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 04:43:41 [error] client: 7.8.7.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 09:52:03 [error] client: 7.7.4.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 13:09:31 [error] client: 5.2.8.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 14:14:14 [error] client: 2.8.2.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:34:09 [error] client: 9.3.6.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:36:14 [error] client: 8.5.4.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 13:37:59 [error] client: 5.2.2.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:30:17 [error] client: 5.2.1.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 14:23:59 [error] client: 2.3.6.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:06:15 [error] client: 5.5.1.3, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 19:28:30 [error] client: 2.6.3.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 17:10:27 [error] client: 3.4.1.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 10:31:25 [error] client: 9.7.9.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 05:47:15 [error] client: 8.7.3.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 08:45:12 [error] client: 9.8.5.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:52:03 [error] client: 6.8.6.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 13:09:32 [error] client: 6.2.2.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 20:04:29 [error] client: 4.7.6.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:28:36 [error] client: 6.9.7.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 16:43:56 [error] client: 3.7.4.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:53:32 [error] client: 7.5.4.8, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 09:22:15 [error] client: 5.8.7.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 04:23:31 [error] client: 4.3.9.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 05:18:49 [error] client: 1.9.9.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 13:00:01 [error] client: 3.5.9.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:22:16 [error] client: 9.1.6.3, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 22:15:10 [error] client: 9.4.9.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 08:44:31 [error] client: 2.3.3.6, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 01:30:36 [error] client: 5.5.1.6, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:47:23 [error] client: 3.1.1.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 03:55:43 [error] client: 8.9.8.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:12:46 [error] client: 5.4.6.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 22:25:18 [error] client: 6.9.5.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:29:28 [error] client: 8.6.3.2, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:31:04 [error] client: 3.3.5.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 19:12:33 [error] client: 9.8.1.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 15:17:32 [error] client: 9.3.3.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 00:13:41 [error] client: 9.7.2.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 15:25:03 [error] client: 8.9.2.5, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 18:21:31 [error] client: 6.2.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 05:05:49 [error] client: 7.4.6.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 16:30:02 [error] client: 7.6.7.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 15:31:23 [error] client: 7.5.2.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 21:37:53 [error] client: 7.1.2.8, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 08:29:27 [error] client: 2.3.4.6, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 00:17:07 [error] client: 3.2.7.1, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:30:14 [error] client: 6.5.4.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:20:40 [error] client: 9.6.8.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 09:12:17 [error] client: 6.4.5.5, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:00:43 [error] client: 1.4.9.2, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:07:34 [error] client: 3.7.4.1, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 22:46:22 [error] client: 6.7.5.9, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:15:26 [error] client: 4.6.3.4, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 22:01:32 [error] client: 7.3.7.6, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 10:18:41 [error] client: 7.4.3.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 20:48:17 [error] client: 6.8.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:19:10 [error] client: 2.8.7.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 22:32:25 [error] client: 2.1.5.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 12:55:32 [error] client: 9.7.6.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 20:55:43 [error] client: 9.4.6.3, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 09:07:14 [error] client: 1.6.7.8, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 03:30:28 [error] client: 7.7.5.3, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 01:30:49 [error] client: 6.1.2.9, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:40:25 [error] client: 5.8.4.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 19:59:59 [error] client: 6.7.3.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 06:57:56 [error] client: 3.1.6.1, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 01:56:39 [error] client: 7.4.4.9, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:29:29 [error] client: 6.6.5.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 12:32:58 [error] client: 2.4.2.2, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 10:24:39 [error] client: 1.5.8.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 00:05:31 [error] client: 4.7.3.7, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:42:07 [error] client: 2.1.3.7, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 19:57:36 [error] client: 4.5.1.8, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:58:05 [error] client: 1.7.9.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 07:32:47 [error] client: 7.4.4.2, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 12:19:34 [error] client: 8.5.2.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 11:20:30 [error] client: 9.8.2.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 23:59:04 [error] client: 5.1.5.8, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 00:11:38 [error] client: 6.8.3.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 08:59:51 [error] client: 5.1.1.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:25:10 [error] client: 9.2.6.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:28:43 [error] client: 9.1.2.6, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 16:56:48 [error] client: 4.1.2.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/15 19:34:30 [error] client: 1.8.8.5, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/15 14:57:56 [error] client: 7.7.6.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/15 21:13:42 [error] client: 2.5.1.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:53:31 [error] client: 6.5.2.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 03:30:47 [error] client: 3.8.2.3, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 16:23:50 [error] client: 1.7.2.4, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 11:51:37 [error] client: 7.2.3.5, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 04:13:19 [error] client: 4.7.8.1, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/16 02:20:27 [error] client: 8.6.2.2, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 12:14:54 [error] client: 1.4.5.7, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 13:48:09 [error] client: 4.5.4.4, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -2023/01/15 18:08:19 [error] client: 7.3.4.1, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/16 11:32:21 [error] client: 3.3.6.4, server: example.com "GET /img/close.png HTTP/1.1" -2023/01/16 06:29:42 [error] client: 1.9.2.9, server: example.com "GET /apple-touch-icon-precomposed.png HTTP/1.1" -2023/01/15 20:57:36 [error] client: 4.7.1.4, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 09:42:59 [error] client: 6.5.8.3, server: example.com "GET /phone/images/icon_01.png HTTP/1.1" -2023/01/16 01:30:29 [error] client: 3.1.8.9, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/export.parquet b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/export.parquet deleted file mode 100644 index eb83250bfc9fe554f790fb0669c21d9f8987192f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22969 zcmaI833MCP`93@fBQmnb*W+t>tjLNZIg;ZfmLu3j4eC&i+N@HuAA3a_Cj$POBv>@2W$uN4^!XX)+TF)oJwfA zX_=`+ZXup+2!)#DwP{r~w65j(=>+Fv}#0Exmz<7y~NkPH04*4$($^WDe0JPasboZB6oCd zmN%v_)0<69q^1OZy(U!OD0iD?OzzY&Ik`C$vR##G2-P>s{pv0)iaA@GR*aZDkk)pE zFwu=IwyQ(oT`4uDWi-=J(mCHb9(gdEN}1`5O<~iDZGp?eo6Ky=_b}hulu6jsP=>=> z+jg~tl1X_mu%c=6@xeB^PfZO#_c&^000y8O?~x5ha5a%QbjB$DtgcyPuf54 z>cBMUs=RSn?l-Lwb5vO%ozJzlwJRShr-Zf`*mf~_FoUJAY`@{^+nXl2whdU#gqk+w z-n5F(4xi_&Z*OUoHz-{-@*p<9YRR2g;HPWq!-In|ZXgp)_*%W#IQ8;?uH+_aJ)H>! zyVW!#ruQ&wQRyST0sBa~j)~b04~(f+rU-kYxdppBh3VFHRS$SW^0xfOln0|`+E6d3 z^*yZY;;oFCURr*HZ)pxnS zBdsK`Q{>)W-;zWJZWY-LNv$WJnKPNAv1}b_tVM2OTNCmn-d-#BcFD;>$u_a76Dz5i zDHRFB+<|mvt8VH<9@12~3voN9gBe(FAmL*!?2pzU5y@N9 zSO;BEj`w!z*xuODiRnpJkNKwgPCXl$S}AlOywvHtna+ugY{Yz#@)0#NMjV`1-4*Om z6-4ZwRh~{0F@*aMujVu9Y;^VKXf&dtF*oPODhO zux+lbOVOi<)flEEsaj@WtE)>%kH`r08g9r;+5SCow-a%%r7hb5wo6g#F^f4XQ-ww8 zQu|aac`phY%0(=n!e;$gq%LgJv=-fSrytRwD@hZ#Mn2{QEtS%Y$>8kY5jEJ+QQPz0 zT4alHtf?klx28)qV@ON#+AO9zirVK}5$aNL*=aMbB=MQj(f%$in#q}&w3@U<{IX$= znsK~ZwV$e7h~KGrt&BCA9NFHbC33N}natrrROE5oa4Z|mAi6h(x-?@%k+IbcWasHU zY?rB6ZKgb=rInZ|cc`c+T3}UMT3)A`NqN1BeX3|9Q>UIBR^mvJ;T+-|%V4HQEGMQR z93z8V2-O>qMDD|WG4;9og>Y<4Gkn{HE)^eZ8*+wqRWVGZ#EC7L$Qn|k5Z1LF3Mu4- zT`0|_BktEV3s_+i6G2I!$cqsr7F6Z+2{o6t@l#`*6w&Pb8bq`*u8eB<1-7H57&CDvimjT= z`A$;$JW^VTAS(LYYdPc&DXP+E3q8_?LBPdyW=8R7=^*A^&)VK4$2x>e+SIM7`Mji! zXQg$HV`fKchOCiB--EBmS9;ZPb)L`ZB`wwko}u~uUG!@cK`V4+vCMznEjB)3F*V^w=o0~Ql=4Cvem5JFDl zxgjk!MNfIemJOl&qq1jpEi zvSvRNu==fX)ool+71Ldka7vbCMz!!Aj4VD?G~vPKs1Hpa3H4fO1-n5W*7Rgc_^<~% zc@R4#v0yWEg1xwGQ^UIsr?oSy#H^_nzDc$;DwKy~DZ5jkaQM#e%7Hd9&M;UldM zp%(=!?x=TcVbdDxQQb@~ELfEX%3(}q(v&Tf zvvK=6T1&hD+5^bOR<-TAO@9_TkORz_t6W3CjeV-RM`3#{36*5_$B<-hf^+|H3d^Q@NjM){n_3iD=a;MathjGd)bTx5q0Edz` z+e+Op6|zBwd|S-cw@kdr1l>OuDgvjO>8)Sch~^NamQbjf-cm62BqvGIrdpu=A>=Eo zVevfakRWv;u*i1KuL1(~>L!?8V2Aru-ibWD;e<{*t@^Z(j{thuX-JK0*p+HDn+El; zf9}Dav4E^&^yvZ)&^LU#2gyYPpQ^RR8FRML$JR)CMgfdf63Hw*!jZ3w8P#_kk4ejs zE{@z?#476atu^2+0QrE^*`#!aw+Fa+1fay&6LX~zy_#as>VnFO0$##an*6#py;CuE zBA&2ZN%AaVlUeESii&W`pyJC@4}0acrXBjvn@Cd;(@icbYEsj|Ua*C9R88TRq@jR`+eZ2NmZo~& zAW1qZ?-gvWe9SZl=_vxaTwn)Pa-hWb=*$~k*w-39(LgD~Uj_HfeAz|bbj;j3aRO_o zwi`H5FtVAW_ZEcPc@*{Z+(o_;Pg_f447nmscn6!7$)5-hu4-z3s;LjjN*33EFr)8t zywusZ#b?&`gGM2zVd63wdX=L_(vay#QNz{RyL!|EQ>-T(G0Z$MuqA!paYj7j`@#J# zCjgzL)o|9S17ip4$wiz?%cOHDOQQ!lr-Xo1O_YMcl%nk^3gPFhr2&y3)ylK)v6|^v z?nUNQ$K?KKzwJE6n0#@1n+vxJlBp84fL>WpE1{f$9VZMdtu3(U7iQhW{Vrgyv^Jtj zPx1Bbt@YGK1jO~2?#tES=XSt9P*jT~9R74N3wE~=MQHC$?mu(R2JHOJL+*cY?sJPB zKHHoh(a?tV%qjY;#f&JgGpn_i)-=mo!rhV_&Z01*z{^`EUgd%f^$ph?<&u$&bZxlu z8-X58S-Fk;*D-C?0OJ8*0ExtM>V_Jm1VpR_lwt)g;9^R0%R$=D@w&Q8vBlN18Et8O ziPI%-HTCg?w)6xiVg=B3dIbjpR8+$QEo~*w5Ze@&Qgxc*JV^}%q_Mfteri)PspM4X z2$+CGTBThMR!Z8sZ0GKss@~r(#mkgnjH{^fNV$lsjZvN=QeaRzGG>%(+Lt{X*&6?lrbFjJW9e4xy0#- z$HCy`ZmItSX1>(nYydXv4BHEx&0FOygB=rHTywC8%<$~4oU)0_C`n&^ps=#Q$w~Ds z#L2Ic064)mcV<<{9od*dcb7W*k;%<*x|8RmgyP#Mj3EeA=_sW|IXi^4lVf0o!>XRq zmR=*E@~isvFMJvtU;Ub#U%cmDCg(GpY9;~1CRUZLLDB#r)fbQH1 zJk#1{i?6D0YiyGTm0^(Xlo?N(?nwtzPm3K+X&7|Pkj`d}YWjqu23So>k3jBHETR;0 zW;#MnrY)1aeJj`A(iVJK+s&--b@8G426@k4Vg|4_vw@ z%Yc*Uvp2e={=qymBW^-&DVY75Q;Zzpi-3wUnHR=;;ZqNz#!qGjd!tv%|w_ z@-6nduB3p&eWy*_UcCYVBcw3DYTZc%bAO(w459w@D^Z9(2AG93%t+) zWRA$!Qn~Js+Ev8Ys2K15+SXQnmXOcMVBOUc2?7YcnNY(o?IP2r}|HND0#az`525fw5o z9|cCF0F$Ki3K-cj=Mee9Y%)2U6|{6L*jc$u^h9D=yUNGIqi9I#|GDt^Or zNUYM8)4Kp{0VspfK14upeH!e(e;^n_A{8svqN8CR{2 zM<7m}*>pN*FJTPAI0OULb=3Xof{?L8k^7*7>>Nb_7E2vX7K&>WX&bmUn$S_5X{c1H zszvEnq15K&q!}Z-arsd*L#)C{UkK$Ut7kw9jfkm;n#-F3D+4Wxj)<2O)bPiJ3RHF;)*e(xB_ldTrp@cOAwIOxglHi5C+0fB3j`{{Q)qhGL!>0GwDobD^3;|_n{ zS_nSSJaPauc%it6xR8f7cXY}Z3^cSewuBQ58`9&{Bd+F#Mp=p}_RpPt-GEwpaFPSu zB!=toO484fABuVCkE;7wjzjJ_Z;MM0Zc7aXto2U3XKLu z78pbY$uy-XvM)GHZUWn>fYIsm#iX^UMjE#Ctd@ysJ4S4qAt+=rxyl>M$$dh#TEuj^ z&o1+9P{)SzQkCzB^4|f_V(5$=W(Kni!2-*Kyh}a&nhwA;#4`w)rQ%;xqpwyl9TUGL z|EZ`|kp3YLK|qg=*#5zGq-l$j8!=-vUc__|8y(OrgeCC1k^pb;6W zl9iL-H!0In@bP+1TI%e;+!u=e43r>{UFZqWJ92rY9}kv_dW` zCciF{j%C9!dS9_?J(MQk7t-VcFt=7gbWq1+1mobiNyim^4!!b7BPu*P&;{T@_rnnYNw4 zuGPsB7rz0?d-7#*M6AWLs7^5@Z7K0R2bpf$PNdoa-dT(Nm`@%~yl)p-%h5?nUM(lL z7woyv-;MGRHRTa4HeJTmH#N|Ib50Zhzz&L6?!%|w>amdCQ#aqxH9bMt&sT0mO|TQ^l8DlS&af) zOq|Uq?x#v-u3;L;eu2@*%O&IxCQ=$_Bsx+6c^6WWwAoSY83e9F#KkA}I;9A7ImJ=H zqhXP-SJxt+qpe^;7}u@v$B;QJ+k^`pF=!HL^$u2w(&b)01?`J`;$qb-SwhIYg;OVy z?gDZhWkyNAg9pmV%s%YvM}FHc*yFW23OQOoDfCo~nNjG*Q8HW%_Be=*JBr@edu)AZ z*^-5-?iiAE5tq;d{)75_b0d%HNo1(TqkP2?fUW0Vfk5-Q^w}kBbY0^Rb zMLe{h%=CXl67ok7s)&YOhC%Nu^#F240MiU*p-YM*17&sTVpi^Iu9C_o zC|A0q5e4dxMvB~|yl^T@?kWI|PLmOl@TKC{MI5LJdD-JaV5vaG6NOdxgJYUle^!^t z&n19YIiG~&3l&%fhKXrJF2VG4Y}+a~gc_%o2@?2gmi)@c<&=a&V6~)eH3uPCyn-Y1 z3rTgU6SN<>nCxCee(z;WGVBf?MXsbn5a41m6cOMAhz#|JvM_3)WM?;~iQgf;kEO_v ziDG57w_8_6aSKK~urXxi^lTCF7i?t*+d(@If^VVqBkS%lz#-T zA#d^FJDG6hZ9*RGljmtoUZov^k_my7ww2Nu!mRgQ#zV`M$uU)ILbVTi(P9jt7m~BE zP#jtW;cli`ycI&12}YQc{=l|{+~qukX_USU*~U51>qC!g&q2Pup|u(5lC`Y8@}}p> z{Z2+96IFbdIjT+u_z~1o@uyN`)O(7R5Z|Hg&`GXPKm|3Fz$<_**ZD%A{&E6MJ~OW8 zQoU>GtBZs#%(+6}c9Kg=C+>7^9)pOdkpFVDnK-v}c0<-DbEN`m1)y(;3tT#TQDDA* zpyNdJ2my~HQ<4Bx)LKqrB1yVLH(|Rpdi6r-½xMsr7^u2tv)T(|$B8ASi01Ikb z{8N$b1!jBQb@elHnVUnGl&EEU_F)a1ntV>T8kn@XdePc} zG^~_>5~b}AGehoQ`(1;fdg=IvdbP?KiR2(-l6M?TLK@+v2=eRSytd1P7(~^Wz1<(p z>A_A@l98a#b)C188G*#GSN!zpE;PR~_>9@J8EMWY{v*I7NM{wJlX<+SzP2sciBd^!bg>OV zvZ$mfh_{;Nt{fy9$o%vn!nHLwG(W;b$wLLKnsGnIPt1~SPA=HmFj+R`Wzv-olrf9x zqe5GIL$JM3-rC;_ypxNmy&F07u*gI~c+*6QdNH$sq!%!XeLnGfq|>x#svHaZuVtFa z)t;t}XnLSu*pb!sm>Dn0Ol_GvldW$cNBrV*lp91B?*J*M;F}7$w5&EfijrtQ)BRfnQV(niS^Lu^l(Wn%xlLq@WoI~MuV*C$N4e7s zqPABKj4lWd#L1)F9tiRh?2mA1x5t8|vkL^0SLXT|x>X zQD)_3rH7AlHMPovV~~tyYe=)lcJ9Q2d=$0Ywv&^VGRCw?4suAc%`Gxodb_6YX9pAH z_GL`98fIHVXWN|)v$hQui)_A`zpZmf)E$){6*KKsuavPHH1fHl!8P9(W|}HbDx({G z*zW7fYt$$T9QL)YWRzueP1Sh|Y=322+v~)Fa3b)j7i~>dkz+|ytf}Fmx=OZ|dC`aMmuko8J;;ruXXNOPo6Ayg8S!8YwtYiBIg*kzHW(8e|W^D zLBIf7vUk)NnXxQ1>nMn__|FAmxSU^W#)e6@g4+x#c2|j`obOSMG>Hhlwi*bf*nu*D zc|Fz9f+4oD*;q+?iCBdOPrB zm#HTey3@^en53q_i!_NwAq1$&r|s1q)O&ewB&X7!OTa~93Gyz_Mk3_6QZ#I(n|<#R zU|GNoSqe6j-46OaL6eKTv2@}dQc+ujZo+0|2RuCqWzQv`o~ZZ4?yfu0$0QFIyVnRj z%rF{V98`XszJ$d!{NBw2KE_ECAGnC`)BwJIoIbhkSi(k4pn7uILP8faG4c}S&=3t& zhdL2Q&@v1q)vcw&7c-r-&hLVZm4mH;jMe&Xk6B{Ojx3%U9aHFfjnP2IqSV_A1$t*y0Ya0Jq@B||)tNoe7e?hVJ0;ZM|(N&1+J zNB6jwoLTM(=Q$E2WBYG7Bq>r{&KOf~czYmG^<)9J$Uhx^unm+a$lCQ2e`1rW?;l)7 zZtxYb-Gk)0g4uN-AnBx>G098b*&~W1$PLY*M!J#{+8Y6b$fthyJquU+tnkW=zIjS@&pQ8l+`u<{J=#A{5>-d=eWS4FDSL+DPT`>Y1HiB%$9LYPtNDI=5Il0&&p0WV5-Yc^5DB11ypxTeY_K^`UC(hsq3WoA&oI|Ub+*rf`6qAlR+i%1# z{fshwDP*zl7BT6-X-xg>u{Buy0XUbkaYdgvm)_ycQ>6_xU||y#ysqbJsGFi6v#gFV z30x%oi;tHw34QwVK#By4$?J>U`$bn@=SF!b*rHrV4wVS{NS+G(dWl!nMiNjN4UJ?9 z!Pn5r*2yEa0wlrB*{M@2hnCqFVUJaD$VtajMkQDJ`;ZtU9Vh~-pMN7B_1bg*MU(P=f4Q{dK`IFXU6nihF_$Du3& zG10*Sv2O|au7cHbRTq{yA*x|LNU<$4wbQu)We+0It>VFQ$P-K4${^D*{VNX)co2>g zG3xbH4lm%WY-%byRX(?DmNMGmL4fr~LR>bT6{~{)^PTzsW9tBX;>29&f$J(3A2UsQ zUNsLZkV!T-2+_^hh;)q$Ph=DltzuGSe-piY8M(><#~EbYxLHfiZrgk_(%>=*cROD54|2pZQJ`2xt#0$866q z8G@APq{%`jaEPv|bo&CRPj9-Ncsr>4v{UF>*RSkS{Zn^%K&>+JP}UIt$U(??m$(pf zus&JhbX8s$x}?wx32t14(xchN^3U8=MyA{z*@CYVnw)*i)dd&lh%C{EDx`Y2ULdTE zK!mFG-N`{qBlG?E*uyR!-3vvU&%vEDv6ehQJxHd|+(C~6_6`r44)7Od)8wZGWL1g% z5g`k2o-Eci*WG`V^eug@9Ky@{Vr9z`-+G%kRSq_P^@8aFm{`z3h?B!5&<}=IOf2EU zXgG==mb2*c{I;4r-(p`S{!h8k52u4DT|HTr%(^w*9?LY%WPa zOuLnc?J2gtmFz3!y44uX@tDG=ko>R zFvkt+IkE^fQFS*g!+v^{<3W$(6G3K(yvVcKaCnYf>te>q;~@^+M^SOD$u~Pd@780^ zGL?w8wYEC^Df(lT|;vno~g7MfuL!qdmePb+zQGWBd@`hfZSwcd>*z(C77$UNuMZm z_L$JRyKS>zbgMaApR*^IGE1Z11S;B4Gri&XA^T1uT4zZzJ?CZsWJ0D`LBW zU#DnE`t>3a0|EvM6QBw&S&l}_+7j(?*;ak~tS^B%Bm^E^J z>BMp-LLaF0!_%zldcK|6 zh+-_riDlxnNdDp^ODNZ;MaYQH1>*)nH(m6nX>snfF8FEX2&xN|{T{dnVOsW0xg<0~ zUMLXKI^O$&k)GxcHedJB2cBi1c;TdsVdTe#zG%2XSZp6K^VJIpLJFaVlc z8t|@N%gEjEBIuDceT_mRAuaj6xUkFvyACXlR%WtQdh*N5$RC0(Y`YBN&)3FEI@w+3 zYnzij@I9r`>m8-*IMzVh#>pG7bWVF{+Y%Pp>?btRepTW$N9FpaHs1%Vl$vPf3@uA{ zFPgoKF{q=0&MzP>E2O@O_uU^koE<J?)ao?O3gP} zHa}aX?4Tpn@M#S|SI?$5WWCj&mri{}{$0ffVFD)KI@lA{*$T3~*!@^t-L$wdyvzeu``X{6JB4%uZ-B@g$wOg2=aUOlvu z=?dE!W%fxIXd-%Wrr1x}NS25T*xs~zo14{k5@{t6m$HL=Sm_$a10$zRl;xGdeA`wG zIs_mT+}cDtCjB9T&$<)k=3(NBq7AF05>()-&v&?nAkKrNE`3=HmXM=bY0g2{c_v?j z^#rb4H1A*r5K~TGM;m40wQy?<(j=G!dbQfP_#E(9Dq*Tf2wKijBGn;ezbaz0={@JV zZ9I>DL1Ze#fN=3%D98!eztkjMyaY2^Pv2=r;Ep-Onlg~g_Qqg)lRQX91GH=zxxawY zI_F?62Fugx2g_{N(W6CjMUwz*Vz!IJg|3DmT>KqMRfAafgl+034w^dYa+4wlI9z(1 z-s8bSdlt}rMYTgp6pRvmJ1wq=#ihOw*VNEPPO9-e7iw=p&}@N440&3ile|;v??OBi zv5l42(sfX9hB~qT=<`Khc~eKIxuXLL(o%pS70uv@N7XaMtCw*`ZWrCbJK?1=q8Ylu z#Y!5zw~14Shw0nGQ&&@fTCzh5rbGp{^wra$fmlnEt&8VuUsEE|2cJDZWw>s;_Mf zMuJ_zxSowFR9ot)uLrxAx)zWfP96+inFzBMtgFJ8&sM%(%`^l)AZ=pht3h8U`ob+C z-~Hq=2NS!15n5V;Ev@7o%EL;lRQylpxE%`Gpw7aD8gVp`pxwV?&>vaSj+nr4OW$;V`+PMHnZ?G^1N`3OS=0(Cc}w2InA*SOUJvfi!)wCj1Xlc^tEg zyzO8z($F8z5epeftF#g9ZF>BgO$F2A?F>#;U0QmJrIAp0-lvD7!|p2CEF>@ zevr>(WM>@6=zs1cHn>TYT# zp86AhL%wMxTPfVMNORjDl~&L{6y=>-I~P{{$4&mH5YgN|^B`-Dk&(trA0jG!ZO-S$ zr@$REr0$~cEoTgSs`r7pesRv>etZdIRO!?=kN%q?49S1$$>#;OM`)zRvrU7MBc;aa zmy6w_%L!XTGWATO^aDA)8K7;8L%g(&d|BD@yA^AKveey)lRs&67vOHkmYFWKx5A7` zJ{9w`fKyN8p0bvOHf3S{h(@ctVPLSY=*fw@VL6zYb!A}eOs903S$TVt05M84X4cif zD2t>9ttqXNpUWnns2*u@!M1BzSyeo-9F_@1i;-)b!XPklYWg_zZYkv)HM z#vzZ#72;k@)-RZN4sayU1_>A?bS<%tBT^l^LlG}uC@MbA(&0c_&8XVtvW5fV?7rLMf7}9q5c+*-$kyY}CWy<*wNX}f$q(!a71@9lsRoYW_4t-}gCyDc?A@V};b`W^XS zQ$JGvF;4hsFfsO15mz;}c^vxE0%tlGeLE|+OgOPy1V~A}$*K!n^tj{bM*^FxC2xyNj?6Ap ze#dqu$XJn`t=`7-187K2Uzk7g0Mkg139&T9(VW1V7CmmERM>=YP|~Al`2oWxH1f*A ze1lC~6W}`0nK*3?W7&qCu$Tag)6FY}I&rP&x>>kyd04^86bw~cSST$4DbUpxDv@5^ ziHF&wLFPHwL4`ai5t|@HwTx0r?^s1@s`+ZbV*9&nI!CUok{)M43;RmRUy8XwOrBYR zLI*bY-`afFwwFcakY#z9a=L#q=iiFc z1rV)e0}ON#)~aP8>jCezU`?^kyP2(TrsFljh}^Zqq_1HO zl}6p&EA#v&MSnb&X@}~?^wCX42tfCZb(J-)*|%%+&a)kAMjq@6Hq2~;bqAej@O;{jSLjHa7G3G zEK~Xihcjv<*vV!i;;Dp1pL4zw;bE=<_BgSd(PnP5clu#<#z7h!V@uDBFH1DU zCx_pDq}EEnM+*N_KS+s)gC($oi;ubLt~!RzB<`yOb&DXMi$)udvl51FEvjono9$9~ z6Sk~(P^N_n1?0UZ;N3l|^J04=xr~wz7n55`(ThK|B47PnP>%`cstrS8MG*k{))Z0T zVUQjIElB}VrjBJYRX1}pKXUDD4esIsz@M@3--Q7a{G9v~=N>vu^snlb!kJ6h^+4$z zOqd>jY~@#!^g-{iGLCv@L;kSAPC8!UX=;XLwj1p8#v*2DYNgHL8O&SHAwo?&=#r2c z6geXnmh*Yf6}`626@zYo&a-$&WnNzVxeF-(t}WVIK=1SCk^h>t;z$vh@w+yraaKpw z&@dKIt00wRnBMRy0^IW_C}drg(4Rk+B;FHXMyA(uJ!*s;X=NmF<#9f(nk777d0af? zz?$wXftSck2L9+A1TI1|YhMLp(vK^AdoY3Rp*>GKB^@qLlhle}R_aR|HmnkW=wL&+ zqS~wA&!to|r62x{3kFi40Z2g}E<3Kw-aK_RDLanJJR_4yKMT~@17=Rg*Iv&f&jD?e$lJx&s}98?psZvm52KlTnKQEM6-+6JaiA^$4m(%B$! z6a*>+9Y8ThUS8yam;rSl9^T~Ckd(c$`zsl4nc#3|vpPs34&zDC9*qJEGDP zOL)oHk)?!Zq-vqXUX$-_>2M&bNq50=Y|dAzS;O@GOfAaWfO)qrwC$i3oV#=(bT?n8Z-ksz!k>UMN2Icb zoVHkDes$qOq!WDhx=C2Lo**%&S0D*s&SPOT$)_nyyr zIGJ>ESI_iB)qi-#s-9ZILAb1XhtSoPmA}u2Azi~kTc(Zxwzi(b1i6+P^tI*W`}M4m zp)Lo&)*~zUVelFI6XeM%ggijZFP(vFk+sRErDSutPv@m{N+C(rZk|Z2YCM)t`&{9q{9XvhR@SKhAcZKV?el1Ox zx6_fPYj_r=?~mYf#2vCP7SCIPmX(U-PSP)&P)d^S$+D{IV!DlI;-st_fpTdN zXTSn^sHnHq2|G(FrRo@eFuO}3vwj@hAd8uxV+ao}FBdkW zbDpM+9uCa|a$E@mzwSV|PQc+MpsYBmUd|zS;TVs?GaJ)H8^_}KRC(TXgk+*l?q-be zA@S=aNSB&;18+MWiRknt^kt%^+{DRlP{>J?2c0!%e}z*%kjb+%C*fd2sClA=QX(Di zg6BIF4Am0}XC7$J8k_*KyQJflulDTcI=i>@&tKNIi0B3ON}M0GlwsIWd}OQwr+PIL zt|nww`qyJg#K~9}{DH)Mk-_*)xVLrwVW!FF<_kZC2~mhgkBGE4_}u1AYgBg{gE`=3QZdwU9;`p7G#`I3pJ!cOA>9^rE6#34h0c?E+`rN9p^^Z6)3`w0`g>)v!H? z4F2W-S?v%%r_d?jv}tiMTXBXaBQE5yTbH?M(Q^Fzl?P1`rB2#kIaVd0mu@Q64xS=C z1nb-Xd=cp@3FJxXC2NJ4+|<%EbGyrcG7`r*E4EMHEy5-D_llmqVYLT-#2LZ_OW_ zDcumb%Zu7(0%)LKy~&|9po#aaCoOdLB$x@aqqcfsIGYCExuej5`=NU0;Zym++=wbh ze7=v-!?=VaZ*jJ-nT1miO}zxAWY+ynFTl{Y zV))C}l=DVh84L(VT3;7S3=II4`SF5|RVR=Z;MM*$l>DwADnxMV~pM;biXym*yniQjM@nEiEh zZfi-v!L)|o$5AXCcdqU;+HjZcYk?UkU|X|Qs6$KZ3!3sy-EQE zdl7piTSA|8%{|G=Tj{nggqNuYw~?-uOq3q1q=y}3o1YG^y5I+CAK|;b_;Ldo90P6T zs-`)a86#J5&h!{gL#y=L<7}W1DU&?8v z|5(Kj;5aJyCv@;(I#~jx%9?m0(B}kd%KOyUuE`(GQ?<&rbxfqHZ5^o~?$3(E4~lwU z1>h&3#+~Z)Rh(v+^ccsdzrco|jFaI~E@oQdYuu_7RuSJEqJj{jgQ~^Ee}J*1XKxYU zb!@BKZ<73^@c5`#Qj#*{r>T6Unc_J6m1rXZFj}4pFY}?d1rdQ_bl&Y zod1&Tk2z{~U;kJ^?Y-|jRv6mn{U_gaVEaFd+74a+PqOOBJO31#~GlL^xpo)7rGWd+jL+k`fT&T*k7J)c_Z`Qv#sx(wfMQV zLzhIKYyWWfU!Gg}$-VDAcgz?27C*o0$bsnd$9{k4FVC<3`N(_EAII4JFRWq9VlPNs zVD}3heDk3fI)$+R#V+TT*o$G8y8Fd$PxjD@YrT{HfA#n;js2^)X3y?_t*gE7(7)D) zX8bSpHN6~rslRP*_e&dAeRJrgjce?I0|Q-U>VZu?fjtK{_chNQ7#s)(ULG3SqQ1Q4 zcy-UqTTjl;y}WJvWZ;$KW0$J0oRHYF=am!n`{rIbDK!&#_2kUU>Z_;hn%nd0si%E2 z_o{q`z53wxv&x1KD(41nJQ(?X^ZN&*7lo@|i(Rs1_%-zk^~TqRug<>zTKxLS>emyy zFCBheyLr!zukX0+zV}}rxpSua-}=2T5C1#)z}$`hHXi=w{ePQ}+LyeM+E*5TW9O5B zo8Cz8Z~ov7>xJ-=H!}ye#NW&wRBw87^o{HXZ;rh)x#X=~hc1o3mHTkdO>d2Va^DAU z{pyREC2ya0oOEYHqyw-Scbz_Tjs~3q4-*-tU`UNxXMK z+xs`acj2mUKYZ^GYmQ!e=%OxBJM_n%>b-|9?rS-G=uZRPOXn^b+N#Z6di?O-xyw!- zJv?{$_OqA1e?{yv?fpL|ZruC+mHOWfzkgNg@ueSJoq0w3;F?|U@BQG~)4o0Y!F6XG zz3jv5&k}chICXCIEg#feES3M-}3Pt4}bg7$A5kF=;fc>xlbJV zHZ~f%%{VgAVa?cCh%RjyMz}AsZ?>jhr>!*KvWAx)s?|cWScYZ)9Fge)s0C4{rbN zt<%2y^t-pum|yq(J7<*}-_M@AHTethQ>bw7P_@2kd7pFZ%x9Y1~c@OPj6^!cOn zSN#0NzH;;DFP~iU*Pp-I-}?E_U%#++#ea?**k=Ccn}hMc{^#2_#yU~B zW6S0-NVEBWnEXFi%qzPdU$NHxzu&O-d)pVkyKN1_JcDrXI~k@9V;zQuaSq0f82d2Z!HU50h|Aoi_Kps{?q$K&(W1klkFngyp>1Qur+r7(aYvnan+7c zL8h>VuPgEsSFu}IRg$JFOYgQhtII-Sb@>|Cjs>;uh6=x@dSP2--=bRYD&MlI>aQNe zB+vW5CmG*0qyC>szL*uSXR9xMU|#jV?wgmN%I`! zZpL9_pFYaLF#*SXmT?wwj01zisKW?j%){_u6k#Yu92<8z9Ly8>SD%e%Y#0kLPQ(z3 zHZh?hq3Cj!Ikre(2%ah`k{F7|6?oi>@dZW-<6?|O7&xBG9F5Umq%jNeIyWBAE4qjY z;_+@gz6|4Bi~)>7jExwz82?jrCo_T9RO9hxjN^;EOeuc$;xUUcAEO=PF^vBf`Iu$+ zT>&23F;v{dPXcbDDt{9mJhK!-!boClF8V(B2+MR9xo}Y&a|}i?#u^L}qq*n`=Kt#D z{G*yW&p7_xo0}hmToY)5Nec1uLx{9#0|dw5iZ_5l3upk>qHbM44pO$Rs67}N*h>QP zqrzg_^=Lb7kg3#-Q*^7RTI&+PR$XbM(j#-~0(Bc}wF^DCu^rF4Z?rpSfApW7v+bOG z&imZwectzd?t7oSd6Jv^ZbN5Y$j@3D$a=`1Le6KKhz|01Am0R!gAA~ZEg=oi&q6K$ z-vW0)F8D6m^QhYg*#{cIX|Sq%jvOjqMozH;39ur$%9;u8WnydvQ+o)Gdn^%utz?j$ zCA*`RFk}sCSAv&VC$5H>yaw-w$|=kH3|WAvg>6@P1$hGUWl)Pjx5GRJI6*Sd!=?nK z%JNMlHylDhPZq;&C9r`@;1P`10)0O0MA)o@?or5D-~!kJ%jkU98Rh<2M%cLZ`D%6gCn3Wj@#3;vWrpM=QRgmzCsp9TNE zL^%xo6ze0;Lf?kt5q5}pAb$o~1h0eI;bY_|8zjqN>&NlmKosk`cMgAst{BJbQARFH zD^OrAzC{0j8K%fJ_?$6uo-Cet8vdE#i-KGSR`_(aJccZpm?Isq`Q`1Xdl$YJ{p?5m zpTGs^H(A2yQ;&#tkT+1ii#`S|B?#2F;Y*O5u=vUIm;))B!}zJF7(7*tD5sx^L;-#u zL5sG1^+*J5Vd@Gb$QRkIaR}JP+n<jIw`7aEq&BmzE0Jninbi18M35g5e8d*D{rwvH?MA*U{Nr z*~r$j^egi95A(s}ou!MTpE|>ZMW>W{C`%CSv8f)tVZ(dgWn-g(MOoSpeFN6QLSy## zOoH?BVSn^DBm(E0llAvMx@6|uzQSpW%~dzH?wu3f_sFw$PVNXhyY?rkO+g_ny7y$r z3Fp{4P4%S+pNGy@k+Tv_i>B;1W5pNb32k@pI>N}}?|=Zh|-JpSXI z)h%I8)md6;U16;pD^4wMt{>8mL@`I!95A&Nko1@Js>g(G=56>%*Q&0=0bsoQZh~c zUMZgBG3M3}alW&3v~j1Ia|r&f&Wgk4R$s;oakMe7URW4#N4w=k4U?nJK%7R&{2-AW zlN7P)Hf>YIxjVvHiSktQs?f*3zVc>*qF)oP`m}4m(ce7l(v-=#xY1_kf^D}fy(l~1 zG2~?Y9=WzrbM#o@xWsVM#80)0dWtcbw7(Gj`t}5MeVs4tley&zGkKMS>{NoSf0?)a z;eE&D>Q`!TK3)Bk+Qcn5de`rg+Zv_}WQLL|Gn=ag!9#b}s~dPUE9c4P19ao z!^JZyeK=I%l`G8-S2z?nlCZpfl2mkvHf8qV6%qUfZ> zCT>I&K3ZhFV&(;}jB+a&Gx4?8$(QG)cjQ!h*+8m9n{s=v?@%f8%x^QBen@&`ow3*J zhJwEz+eobxO^$^V()d{6!O@F?Pf{4Q9L#Tbo9+fIHB_M%bw&vd^qq@X7Y8) zpw6vx2_BD8X;czdO@~{l)J`T=cZKZihAls7b2$cOG3r~wTtgQr9{-E|#B~C-#1Mdh;#8d+>%tTH@tgVgC(8K>oGDu8+n%t+wY#0X@@B$0g^q z2mcr#Ql+iv)(b*A{VOV6F~4m&p`=W@Cg^tv?Yr0h!|e2a@n#V#TA6sWY=&YP#Q~<( zN>OxsdN(hKPG_$?*Th#_2Uqu{6|@dLR+T)aa;t5;`K?dv_*UJh-CHvdU`FLdEP8XFdxlknU(c9?k1vZ<+wi#!vEn^{~2{`;L_saj)qRF$k< TXW-)rx&I;_eCjx`Y4bk;4Lwte diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.results b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.results deleted file mode 100644 index fdc03901e52..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.results +++ /dev/null @@ -1,14 +0,0 @@ -

Top 10 IPs

- -
- -${data} -
IPRequests
- -

Query information

-
-
Rows read
-
${rows_read:Escaped}
-
Time spent
-
${time:Escaped}
-
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.row b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.row deleted file mode 100644 index 449fba0b9bc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/html.row +++ /dev/null @@ -1,4 +0,0 @@ - - ${ip:Escaped} - ${total:Escaped} - \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list-nested.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list-nested.json deleted file mode 100644 index 7bf195ba46c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list-nested.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "page": { - "path": "Akiba_Hebrew_Academy", - "title": "Akiba Hebrew Academy", - "owner_id": 12 - }, - "month": "2017-08-01", - "hits": 241 - }, - { - "page": { - "path": "Aegithina_tiphia", - "title": "Aegithina Tiphia", - "owner_id": 3 - }, - "month": "2018-02-01", - "hits": 34 - }, - { - "page": { - "path": "1971-72_Utah_Stars_season", - "title": "Utah Stars: 71/72 season", - "owner_id": 432 - }, - "month": "2016-10-01", - "hits": 1 - } -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list.json deleted file mode 100644 index 7c749a3a7b9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/list.json +++ /dev/null @@ -1,17 +0,0 @@ -[ - { - "path": "Akiba_Hebrew_Academy", - "month": "2017-08-01", - "hits": 241 - }, - { - "path": "Aegithina_tiphia", - "month": "2018-02-01", - "hits": 34 - }, - { - "path": "1971-72_Utah_Stars_season", - "month": "2016-10-01", - "hits": 1 - } -] diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/mysql.sql b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/mysql.sql deleted file mode 100644 index 527c20d94d0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/mysql.sql +++ /dev/null @@ -1,51 +0,0 @@ --- MySQL dump 10.13 Distrib 8.0.31, for Linux (x86_64) --- --- Host: localhost Database: test --- ------------------------------------------------------ --- Server version 8.0.31-0ubuntu0.22.04.1 - -/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; -/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; -/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; -/*!50503 SET NAMES utf8mb4 */; -/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; -/*!40103 SET TIME_ZONE='+00:00' */; -/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; -/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; -/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; -/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; - --- --- Table structure for table `some_table` --- - -DROP TABLE IF EXISTS `some_table`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!50503 SET character_set_client = utf8mb4 */; -CREATE TABLE `some_table` ( - `path` varchar(255) DEFAULT NULL, - `month` date DEFAULT NULL, - `hits` int unsigned DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `some_table` --- - -LOCK TABLES `some_table` WRITE; -/*!40000 ALTER TABLE `some_table` DISABLE KEYS */; -INSERT INTO `some_table` VALUES ('Bangor_City_Forest','2015-07-01',34),('Alireza_Afzal','2017-02-01',24),('Akhaura-Laksam-Chittagong_Line','2015-09-01',30),('1973_National_500','2017-10-01',80),('Attachment','2017-09-01',1356),('Kellett_Strait','2017-01-01',5),('Ajarani_River','2018-01-01',30),('Akbarabad,_Khomeyn','2017-03-01',8),('Adriaan_Theodoor_Peperzak','2018-02-01',88),('Alucita_dryogramma','2015-09-01',1),('Brit_Med_J','2015-07-01',1),('4th_Metro_Manila_Film_Festival','2015-09-01',80),('Alialujah_Choir','2018-03-01',221),('1953-54_SM-sarja_season','2016-09-01',1),('Air_Force_Song','2018-02-01',19),('4-6_duoprism','2016-03-01',30),('Ashley_Spurlin','2017-06-01',94),('Asfaq_Kayani','2017-10-01',1),('1607_in_architecture','2016-06-01',7),('4-way_speakers','2015-10-01',2),('Blue_Heeler','2015-07-01',149),('5_Euro','2017-04-01',16),('2009_Spa-Francorchamps_GP2_Series_round','2016-04-01',12),('2015_Guru_Granth_Sahib_desecration','2015-11-01',6821),('Agriculture_Marketing_Service','2016-07-01',2),('2006_Football_League_Cup_Final','2015-11-01',1711),('2008_Uber_Cup_group_stage','2016-02-01',40),('1923_PGA_Championship','2016-08-01',97),('Fannie_Bay','2016-04-01',6),('AlchemyAPI','2016-04-01',344),('Cinema_of_Italy','2017-01-01',1217),('Arodes','2016-11-01',36),('Damien_Marley','2015-07-01',168),('Al_Jumayl_Baladiyat','2015-08-01',5),('2015_Alabama_State_Hornets_football_team','2017-06-01',32),('Aglossa_tanya','2016-03-01',1),('73rd_Pennsylvania_Infantry','2017-01-01',12),('2015_European_Junior_and_U23_Canoe_Slalom_Championships','2018-02-01',31),('African_Leopard','2016-08-01',64),('Faverolles,_Orne','2017-01-01',5),('Aaron_Fukuhara','2015-11-01',17),('Annular_ligaments_of_trachea','2017-01-01',31),('2014_US_Open_Series','2016-11-01',35),('A_Better_Mousetrap','2018-02-01',4),('Dibaklu','2016-11-01',1),('At_Samat_District','2015-06-01',35),('Aaron_Peasley','2017-05-01',32),('Apistomology','2015-12-01',2),('Buyat_Bay','2015-07-01',54),('1942_Estonian_Football_Championship','2017-05-01',22),('Action_for_Autism','2016-06-01',346),('100_Hz','2015-06-01',72),('2003_Arizona_State_Sun_Devils_football_team','2017-05-01',82),('Antona_obscura','2016-09-01',1),('Akiko_Sugiyama','2015-12-01',32),('Elysburg','2016-11-01',8),('2017_New_South_Wales_Cup','2017-09-01',38),('2011-12_Gold_Coast_United_FC_season','2017-06-01',1),('Agency_for_the_Prohibition_of_Nuclear_Weapons_in_Latin_America_and_the_Caribbean','2016-04-01',15),('Albert_Dunn','2017-08-01',87),('Hahamakin_ang_Lahat','2017-01-01',984),('2013_Spuyten_Duyvil_derailment','2017-11-01',5),('Ayling','2017-01-01',5),('Anti-Establishment','2016-10-01',1),('1951_Spanish_motorcycle_Grand_Prix','2018-01-01',48),('2009-10_Brunei_Premier_League','2017-08-01',4),('23_Ursae_Majoris','2016-08-01',90),('1927-28_Austrian_football_championship','2017-08-01',4),('Andrew_McKeever','2017-10-01',3),('Clinocottus','2017-06-01',23),('2006_State_of_Origin','2015-11-01',7),('2013-14_Los_Angeles_Clippers_season','2015-07-01',8),('Cor_Jesu','2017-01-01',1),('Besseringen_B-Werk','2017-06-01',158),('Amy_Hempel','2017-07-01',1091),('Franc-Comtois','2016-04-01',2),('Allium_giganteum','2017-07-01',1103),('Abishai','2016-08-01',56),('Abraham_Clark_High_School','2016-04-01',88),('Baku_chronology','2015-06-01',1),('22nd_MEU','2015-10-01',39),('2015_Open_Engie_de_Touraine','2015-10-01',195),('Churchill_Bowl','2017-06-01',30),('AGMARK','2017-08-01',117),('American_standard_wire_gauge','2017-12-01',3),('Araby,_LA','2015-05-01',2),('217_BC','2016-12-01',202),('2008_Trinidad_and_Tobago_League_Cup','2016-02-01',6),('Alazan_Bay','2015-12-01',22),('Aluminum_fencing','2015-11-01',48),('Achilles_tendinitis','2016-10-01',5884),('AFP_Peacekeeping_Operations_Center','2017-01-01',64),('2013_Xinjiang_clashes','2016-01-01',1),('Arborea_Giudicato_of_Arborea','2015-09-01',3),('1941_Cleveland_Rams_season','2017-06-01',40),('Ju_Posht,_Rasht','2017-01-01',3),('Ascalenia','2016-07-01',10),('Aplectoides','2018-02-01',4),('European_Cup_1969-70','2016-11-01',14),('Armen_Mkertchian','2016-05-01',9),('2015_Aspria_Tennis_Cup_-_Singles','2018-02-01',1),('14_August_1947','2017-11-01',6),('Adobe_Creative_Suite_1','2015-05-01',1),('IC_chips','2017-01-01',2),('Austo_AE300','2016-07-01',4),('Date_palms','2015-07-01',79),('BCS_bowl_game','2017-06-01',13),('AR_Border','2017-06-01',1),('Aranda_de_Duero','2016-04-01',256),('1919_Wake_Forest_Demon_Deacons_football_team','2016-01-01',16),('All_The_Wrong_Clues_For_The_Right_Solution','2017-10-01',9),('Allan_Campbell_McLean','2015-06-01',131),('Bradford_Council_election,_2011','2017-06-01',5),('Astronomy_and_astrophysics','2015-09-01',62),('Dutch_Antillean_people','2015-07-01',57),('Army_Radio','2018-03-01',711),('BBVA_Bancomer','2016-11-01',709),('Lake_Aloha','2017-01-01',30),('Andy_Bean','2018-02-01',3092),('1941_Pittsburgh_Steelers_season','2016-05-01',147),('Aniopi_Melidoni','2016-06-01',4),('Aglossosia_fusca','2017-09-01',3),('Art_books','2017-04-01',36),('1929_Washington_Senators_season','2017-04-01',47),('Antaeotricha_congelata','2016-12-01',10),('Douglas_C-54G-5-DO_Skymaster','2017-01-01',1),('Chris_Jamison','2016-11-01',827),('Ace_Blackwell','2015-11-01',9),('Abdul_Qadir_Fitrat','2018-02-01',32),('Arnoldo_Vizcaino','2017-10-01',1),('2012_Open_EuroEnergie_de_Quimper_-_Doubles','2017-12-01',3),('Dale_Rominski','2017-01-01',7),('ADHD_coaching','2015-06-01',50),('Claire_Yiu','2016-11-01',209),('Applicant','2015-10-01',253),('Apache_OpenOffice','2017-06-01',6031),('Abel_Kiprop_Mutai','2015-09-01',22),('Airdrome_Taube','2017-04-01',46),('Andrey_Viktorovich','2016-06-01',1),('American_Idol_controversy','2016-03-01',36),('Anthrenocerus_confertus','2018-01-01',17),('Appraisal_Subcommittee','2018-03-01',17),('Babusa','2015-07-01',3),('500_homeruns','2016-06-01',1),('Argentina_national_volleyball_team','2016-08-01',64),('Chief_prosecutor_of_Russia','2015-07-01',1),('Absolution_DVD','2015-06-01',1),('1,3-Beta-glucan_synthase','2017-05-01',440),('Dave_Sinardet','2016-04-01',26),('Adeline_Whitney','2018-03-01',10),('Allon_shvut','2016-07-01',3),('2012_Penn_State_Nittany_Lions_football_season','2017-12-01',3),('Coleman-Franklin-Cannon_Mill','2017-01-01',4),('Action_director','2015-05-01',93),('AD_547','2016-01-01',1),('Acta_germanica','2017-09-01',1),('Abu_Dhabi_Global_Market_Square','2017-01-01',35),('Kozo_Shioya','2017-01-01',7),('China_Investment_Corp','2017-01-01',2),('Dmitri_Zakharovich_Protopopov','2016-04-01',129),('Anatra_Anadis','2017-10-01',208),('Archaikum','2017-11-01',5),('2000_Webby_Awards','2017-04-01',360),('2003_BCR_Open_Romania_-_Singles','2016-08-01',2),('Abacetus_bisignatus','2016-09-01',79),('American_school_of_kinshasa','2016-01-01',1),('Anna,_7th_Duchess_of_Bedford','2016-08-01',8),('Black_majority_district','2016-11-01',3),('Dagma_Lahlum','2015-07-01',1),('Credit_Saison','2015-07-01',517),('Ariyankuppam_firka','2016-02-01',19),('Annette_Fuentes','2016-06-01',17),('Angerstein,_John','2015-12-01',2),('Annenkov_Island','2016-03-01',280),('Anne_Frank_museum','2016-06-01',67),('Annales_sancti_Amandi','2017-06-01',22),('L-FABP','2017-01-01',1),('Alvord,_TX','2017-06-01',12),('2006_World_Team_Table_Tennis_Championships','2016-05-01',119),('Angriffen','2015-12-01',9),('Anthony_Oppenheimer','2017-03-01',452),('Absamat_Masaliyevich_Masaliyev','2016-09-01',1),('Airborne_Museum_at_Aldershot','2016-03-01',41),('Aktiubinsk_Oblast','2015-08-01',7),('100_East_Wisconsin','2015-05-01',782),('7th_Bangladesh_National_Film_Awards','2017-08-01',91),('Alejandro_Reyes','2017-12-01',35),('Applied_philosophy','2018-03-01',539),('Adhemar_Pimenta','2016-06-01',146),('Break_the_fourth_wall','2016-04-01',66),('Annoushka_Ducas','2017-10-01',411),('ATC_code_J01CA01','2015-06-01',1),('Evelyn_County,_New_South_Wales','2016-11-01',7),('Elastic_scattering','2016-11-01',1374),('1032_Pafuri','2015-07-01',35),('Andrew_Bromwich','2015-08-01',26),('Ishita_Arun','2017-01-01',249),('Aspergics','2016-07-01',1),('1857_in_Chile','2018-03-01',22),('Breffni','2015-07-01',38),('845_in_poetry','2017-08-01',2),('20321_Lightdonovan','2015-10-01',12),('Arthur_Chandler','2017-12-01',27),('CsISOLatin2','2017-06-01',1),('1900_Grand_National','2016-06-01',69),('Aeritalia_AMX','2017-03-01',3),('B_Sharps','2015-06-01',11),('544_area_code','2015-09-01',2),('30th_Guldbagge_Awards','2015-06-01',37),('Agrippina','2017-08-01',315),('Ardmore','2016-02-01',433),('Amplypterus_panopus','2016-03-01',23),('Alexander_Bukharov','2017-09-01',5),('Alaska_Raceway_Park','2017-01-01',46),('Albanian_National_Road_Race_Championships','2017-03-01',31),('1968_Democratic_National_Convention_protest_activity','2017-10-01',2802),('2012_Birthday_Honours','2017-10-01',427),('2000_NHL_expansion_draft','2017-06-01',1),('A_Town_Where_You_Live','2016-11-01',2920),('Ahmed_Shahzad','2018-03-01',25),('Elisabeth_Svendsen','2016-11-01',39),('2002_FINA_Synchronised_Swimming_World_Cup','2016-08-01',30),('Akatek','2017-04-01',10),('Animation_with_DAZ_Studio','2018-02-01',78),('Fergus_Craig','2016-11-01',119),('Ancel_Nalau','2015-11-01',5),('5171_Augustesen','2017-04-01',20),('Anne_McGuire','2017-11-01',329),('Australian_Photoplay_Company','2015-12-01',6),('1913_in_Canada','2017-04-01',137),('Arhopala_allata','2015-05-01',26),('Il_Paradiso_delle_Signore','2017-01-01',31),('Geri_Palast','2017-01-01',38),('Alan_Abela_Wadge','2017-03-01',77),('22nd_Tactical_Air_Support_Squadron','2017-10-01',7),('Avant_Stellar','2017-06-01',22),('Black_phantom_tetra','2016-11-01',205),('Billy_McCaffrey','2017-01-01',314),('Annie_Furuhjelm','2017-11-01',97),('1992_PGA_Tour','2017-12-01',307),('2008_Chilean_pork_crisis','2016-01-01',55),('2012_Currie_Cup_First_Division','2018-02-01',32),('Aleksei_Fomkin','2015-05-01',144),('Alexander_Krausnick-Groh','2016-05-01',101),('Adam_Richard_Wiles','2017-08-01',5),('ATCvet_code_QA01AD01','2015-09-01',2),('Abu_Bakr_Ibn_Bajja','2017-03-01',5),('Architecture-Studio','2016-04-01',94),('950s_BC','2016-02-01',257),('Abschwunges','2017-07-01',1),('Adonis_Geroskipou','2017-06-01',15),('2008-09_SV_Werder_Bremen_season','2016-03-01',3),('Closed_loops','2016-04-01',1),('AFC_Youth_Championship_1982','2015-12-01',10),('Aquila_Shoes','2015-08-01',209),('9842_Funakoshi','2017-12-01',11),('Educational_quotient','2016-04-01',21),('Antoni_Julian_Nowowiejski','2018-01-01',211),('Adi_Oka_Idhile','2017-11-01',16),('DEXIA-BIL_Luxembourg_Open','2016-11-01',3),('Andrew_James_Simpson','2016-03-01',43),('Alexander_Boksenberg','2017-12-01',61),('1827_in_Denmark','2017-03-01',39),('Afternoon_tea_with_suggs','2017-11-01',3),('Alpha,_MN','2017-06-01',6),('Ari_Onasis','2015-06-01',4),('1961-62_Football_League_First_Division','2015-11-01',1),('Andi_Lila','2015-06-01',2847),('A_Gathering_Of_Old_Men','2018-02-01',1),('Abul_Fazl_al-Abbas','2017-01-01',1),('Asgill,_Charles','2017-08-01',1),('Alexander_Arkhangelsky','2015-07-01',12),('1947-48_Portuguese_Liga','2015-06-01',1),('3rd_MMC_-_Varna','2016-07-01',3),('Alberts,_Wayne','2017-05-01',3),('Alois_Schickelgruber','2018-02-01',9),('Hefner_Stadium','2017-01-01',2),('410912_Lisakaroline','2018-02-01',26),('Academy_at_Mountain_State','2018-03-01',1),('617_Squadron','2016-05-01',489),('Al_Silm_Haji_Hajjaj_Awwad_Al_Hajjaji','2015-07-01',5),('Arturo_Merino_Benitez_Airport','2017-10-01',13),('AEK_Athens_Futsal','2015-06-01',10),('Aggaeus','2018-02-01',2),('Association_for_Retarded_Citizens_of_the_United_States','2017-08-01',3),('Kielce_pogrom','2017-01-01',335),('1351_in_poetry','2016-01-01',17),('1923_Princeton_Tigers_football_team','2017-11-01',41),('Auzata_semipavonaria','2017-01-01',2),('892_in_poetry','2016-01-01',6),('Anton_Krotiak','2017-12-01',2),('Arthur_Shelley','2017-12-01',23),('2003_Kyoto_Purple_Sanga_season','2018-02-01',9),('Frederic_Bowker_Terrington_Carter','2016-04-01',6),('2-orthoplex','2016-03-01',1),('Acacia_australiana','2015-09-01',4),('2012_Newcastle_Knights_season','2016-06-01',103),('Ann_Wrights_Corner,_Virginia','2017-07-01',19),('12557_Caracol','2017-03-01',5),('2001_African_Footballer_of_the_Year','2017-05-01',1),('Bass_Pyramid','2017-01-01',22),('A_noodle','2015-05-01',5),('Aed_Bennan','2018-02-01',2),('1886_Yale_Bulldogs_football_team','2017-10-01',58),('2002_Players_Championship','2016-06-01',54),('African_Skimmer','2017-07-01',2),('3rd_Guldbagge_Awards','2016-12-01',39),('Arrows_A19B','2015-10-01',1),('Archduchess_Elisabetta_of_Austria-Este','2017-08-01',1526),('America_Islands','2015-11-01',1),('1932_Olympic_Games','2016-01-01',9),('2011_Chinese_pro-democracy_protests','2015-11-01',2044),('Bank_walkaway','2016-04-01',113),('594_in_Ireland','2017-04-01',1),('Association_of_Municipal_Corporations','2016-12-01',5),('Andreas_Brantelid','2015-09-01',167),('Amarthal_urf_Unchagaon','2017-05-01',82),('3-methoxymorphinan','2017-04-01',146),('2382_BC','2016-07-01',10),('1763_in_science','2016-07-01',28),('Arvert','2017-04-01',77),('Ale_yeast','2017-12-01',19),('A_Man_Without_a_Soul','2018-03-01',17),('Air_Force_Base_Louis_Trichardt','2017-09-01',1),('Athirson_Mazzoli_de_Oliveira','2017-06-01',3),('Anthony_Chan_Yau','2017-07-01',181),('Basic_Enlisted_Submarine_School','2017-06-01',392),('Aboriginal_Lands_of_Hawaiian_Ancestry','2015-09-01',11),('Fondren_Southwest,_Houston','2017-01-01',4),('3_World_Financial_Center','2017-07-01',64),('1971_IIHF_European_U19_Championship','2017-09-01',9),('1937-38_Allsvenskan','2015-12-01',6),('Christopher_Ashton_Kutcher','2017-06-01',2),('Australian_rules_football_in_south_australia','2016-12-01',1),('Amicable_pair','2018-01-01',7),('Alan_Tomes','2015-11-01',82),('Alexei_Petrovich,_Tsarevich_of_Russia','2015-12-01',3887),('Alexis_Damour','2015-10-01',66),('Bankruptcy_Act_of_1938','2017-06-01',76),('Amphiphyllum','2016-06-01',14),('Conway_High_School_West','2016-04-01',1),('5447_Lallement','2015-11-01',10),('Gabriel_Iddan','2017-01-01',1),('1879-80_Scottish_Cup','2017-04-01',3),('2011_Eneco_Tour','2016-10-01',31),('1471_in_England','2015-11-01',94),('Ashland_Town_Hall','2017-01-01',5),('Archduke_John','2015-05-01',20),('2000_Cameroonian_Premier_League','2017-09-01',18),('1997_flood','2017-11-01',5),('Agile_management','2015-09-01',26677),('Am_841','2017-12-01',3),('Apprentice_Mason','2018-01-01',4),('Hales-Jewett_theorem','2017-01-01',2),('Alien_Abductions','2017-10-01',14),('Arjun_Menon','2016-02-01',370),('Anthokyan','2016-01-01',4),('Automobili_Lamborghini','2016-02-01',1110),('Alain_Prost','2017-04-01',25196),('Fartein_Valen','2016-04-01',90),('Antonio_Galli_da_Bibiena','2016-05-01',5),('Al_Jawf,_Libya','2017-03-01',600),('AD_695','2018-02-01',1),('Amir_chand','2015-11-01',1),('Alcis_obliquisigna','2017-08-01',1),('Chandra_Talpade_Mohanty','2017-01-01',306),('Algerian_safe_house,_Jalalabad','2015-06-01',3),('Jake_Milner','2017-01-01',1),('Alternate_Communications_Center','2017-10-01',1),('In_the_Bleachers','2017-01-01',42),('Alex_Puodziukas','2016-04-01',7),('Altarpiece_of_Pilgrim_II','2018-02-01',2),('Cybernetical_Physics','2017-01-01',3),('Christopher_Unthank','2017-06-01',2),('1982_Independence_Bowl','2015-06-01',102),('Ascoli_Calcio_1898','2018-03-01',1115),('Briggs-Rauscher_reactions','2017-06-01',1),('Adjadja','2018-02-01',45),('Afghanistan_from_Ahmad_Shah_until_Dost_Mohammed','2016-06-01',3),('Catholic_social_doctrine','2017-01-01',6),('2833_BC','2016-11-01',1),('Bethy_Woodward','2016-04-01',38),('Bateman_polynomials','2017-06-01',22),('1966_Buenos_Aires_Grand_Prix','2015-10-01',19),('A_River_Somewhere','2015-10-01',353),('2016-17_BVIFA_National_Football_League','2017-04-01',2),('1909_Major_League_Baseball_season','2015-10-01',362),('1988_Oklahoma_Sooners_football','2017-11-01',2),('2010s_in_Chechen_fashion','2016-10-01',1),('Accademia_Olimpica','2017-08-01',17),('Air_cooling','2015-07-01',2010),('Amir_Saoud','2016-11-01',22),('Alex_Auburn','2015-05-01',52),('Apamea_impulsa','2016-11-01',6),('Australian_federal_election,_2007','2015-07-01',1794),('Ain_Sakhri','2017-10-01',76),('Belosaepiidae','2015-07-01',68),('Acts_of_Parliament_in_the_United_Kingdom','2017-10-01',4070),('Equity_Office','2016-11-01',202),('David_Bintley','2017-01-01',51),('Aksel_Schiotz','2018-03-01',3),('Appropriation_Act_2000','2017-05-01',12),('Edward_Johnson_III','2016-11-01',491),('2006_Ohio_State_Buckeyes_football_team','2016-03-01',1452),('Battle_of_Fort_Beausejour','2015-07-01',97),('Abel_Foullon','2015-12-01',82),('Apollo_VIII','2015-10-01',19),('Carry_on_up_the_jungle','2015-07-01',8),('Armour_villa','2017-05-01',4),('201_Poplar','2015-08-01',265),('Arta_prefecture','2016-08-01',1),('2015-16_Ekstraklasa','2018-02-01',13),('Alport,_Ontario','2018-02-01',2),('Bongoland','2017-06-01',62),('Alfred_Charles_Post','2016-11-01',11),('Aam_Aadmi_Party_crisis','2016-10-01',1),('Andrea_Moda','2015-07-01',143),('Abdul_Halim_Sharar','2017-08-01',545),('Apostolic_Vicariate_of_Yunnan','2016-12-01',1),('Catherine_Steadman','2016-11-01',5218),('Agastachys_odorata','2015-10-01',38),('9783_Tensho-kan','2016-03-01',2),('AFL_Cairns','2017-10-01',337),('Abomey','2015-06-01',1062),('Anne_Crofton,_1st_Baroness_Crofton','2015-12-01',42),('Cash-flow_return_on_investment','2017-01-01',137),('Alberto_Arvelo_Torrealba_Municipality','2015-08-01',56),('Abyssinian_Shorthorned_Zebu','2017-09-01',124),('Albanian_hip_hop','2016-01-01',1812),('Alphonso_IV_of_Portugal','2016-02-01',12),('19th_The_Alberta_Mounted_Rifles','2016-10-01',1),('Chinese_shadow_theatre','2016-04-01',1),('American_Committee_of_the_Fourth_International','2017-08-01',4),('2014_Bahrain_GP2_Series_round','2016-03-01',80),('Alexandrian_orthodox','2017-09-01',2),('2010_Hurricane_Season','2015-05-01',18),('1938_All-Ireland_Senior_Camogie_Championship_Final','2017-01-01',1),('ATC_code_D01','2018-01-01',203),('Albedo','2015-08-01',23484),('Chavigny,_Meurthe-et-Moselle','2017-01-01',12),('Becky_Essex','2015-07-01',51),('Archaeological_Museum_Padre_Le_Paige','2018-02-01',2),('Abu_Bakar_Sillah','2017-01-01',5),('Back_chat','2017-01-01',2),('Anchylobela_dyseimata','2015-12-01',11),('Anthony_Overton','2017-03-01',261),('Bear_maul','2016-04-01',3),('Ambarawa,_Central_Java','2016-01-01',1),('Amber_lager','2016-11-01',87),('2nd_LAAD','2017-09-01',8),('Ashiya,_Hyogo','2018-03-01',24),('Angels_at_Risk','2018-02-01',74),('Audrey_Marie_Munson','2016-03-01',17),('1984_Australian_Football_Championships','2017-01-01',27),('Ammonia_fountain','2016-06-01',434),('Allister_Bentley','2018-03-01',11),('Alsager_Hay_Hill','2016-10-01',72),('1753_English_cricket_season','2015-05-01',51),('2009-10_New_Jersey_Devils_season','2016-10-01',1),('An_Untamed_State','2016-05-01',1109),('Beatrice_Carmichael','2016-11-01',5),('Abdul_Ghani_Ahmad','2017-12-01',115),('Arteria_suralis','2017-02-01',3),('Berzasca_River','2017-01-01',1),('Angel_Attack','2015-09-01',98),('1969_San_Francisco_49ers_football_team','2017-11-01',1),('Anthony_Beilenson','2017-09-01',114),('Crystalline_Entity','2016-04-01',180),('Granice','2017-01-01',2),('203rd_General_Hospital','2017-07-01',44),('Acrocercops_rhombiferellum','2017-12-01',20),('Ampliglossum_blanchetii','2017-05-01',1),('11553_Scheria','2017-03-01',2),('Ashkenozi','2017-02-01',1),('2010_Calder_Cup_Playoffs','2018-01-01',9),('Alice_Caymmi','2016-01-01',121),('Alfredo_Alvar','2017-04-01',44),('2006_Legends_Tour','2017-07-01',30),('Albano_Albanese','2015-10-01',53),('1943_Frankford_Junction_train_wreck','2016-08-01',510),('Evans_Court_Apartment_Building','2016-04-01',4),('Abu_al-Rayhan_Muhammad_ibn_al-Biruni','2017-11-01',1),('Abubakar_Muhammad_Rimi','2015-05-01',4),('Dostpur','2016-11-01',26),('Accessories_Council_Excellence_Awards','2016-03-01',14),('2006_North_American_heat_wave','2015-06-01',1161),('Amstelodamum','2017-09-01',12),('A_Very_Peculiar_Practice','2016-08-01',1860),('Allegorie_der_Liebe','2015-09-01',1),('Alex_Mackie','2017-02-01',95),('1812_Homestead_Farm_and_Museum','2017-09-01',29),('Argus_distribution','2016-03-01',8),('Anthony_Thomas_Stover','2017-02-01',1),('Arthur_Shallcross','2016-11-01',20),('Antoine_Francois_Fourcroy','2018-01-01',1),('Abbas_Halim','2016-11-01',21),('Akiva_Baer_ben_Joseph','2017-08-01',1),('Balatonfuered','2016-11-01',3),('Antemnae','2017-11-01',204),('Cling_Cling','2017-06-01',93),('B_flat_major','2017-01-01',28),('AirExplore','2017-12-01',930),('Auckland_Super_Sprint','2015-11-01',120),('Alfredo_De_Gasperis','2017-12-01',793),('Geoffrey_I_of_Vianden','2017-01-01',5),('Copa_de_Zaachila','2016-04-01',6),('Alboacen','2017-09-01',1),('BNH_Hospital_Bangkok','2017-06-01',2),('Agricultural_health_and_safety','2016-09-01',1),('Chiasms','2017-06-01',2),('Al_Karaana','2016-05-01',58),('Alberta_Highway_872','2016-11-01',1),('Among_the_mourners','2016-03-01',1),('Achema_Power_Plant','2015-06-01',55),('ATSE_Graz','2017-10-01',65),('Arthroscopy','2017-02-01',11721),('2010-2012_European_Nations_Cup_Second_Division','2018-01-01',7),('1967_Cincinnati_Reds','2015-08-01',4),('24th_Golden_Disc_Awards','2017-05-01',71),('Johnny_Floyd','2017-01-01',17),('Arthur_Rupin','2016-02-01',5),('Alpine_skiing_at_the_2011_Canada_Winter_Games','2015-09-01',38),('College_Press_Service','2017-01-01',8),('American_Psycho','2015-08-01',55567),('CBC_Winnipeg','2017-06-01',17),('Burning_the_process','2016-04-01',1),('2011_Stanley_Cup_playoffs','2017-01-01',1036),('Andrew_Mumford','2017-01-01',6),('1925_in_fine_arts_of_the_Soviet_Union','2018-02-01',28),('Aragvi_river','2017-02-01',2),('Andrew_Adamson','2018-03-01',16269),('Arcides_fulvohirta','2016-10-01',1),('Araya_Selassie_Yohannes','2015-11-01',423),('Apartment_house','2016-09-01',85),('Advanced_Art','2015-12-01',171),('1028_Lydina','2015-06-01',53),('2005_July_6_United_Nations_assault_on_Cite_Soleil,_Haiti','2017-04-01',2),('Adolph_Weiss','2015-06-01',98),('Adam_Jerzy_Czartoryski','2015-09-01',1237),('1980_United_States_presidential_election','2017-05-01',56),('1956_Oscars','2016-08-01',10),('Burundian_Senate_election,_2005','2016-04-01',1),('Amarolea_floridana','2015-07-01',3),('August_Bier','2015-12-01',514),('Arbelodes_sebelensis','2018-03-01',6),('Abiah_Brown','2018-02-01',1),('A_Maceo_Smith_High_School','2016-10-01',2),('1488_in_architecture','2017-12-01',6),('2009_AMP_Energy_500','2016-04-01',45),('1921_Baylor_Bears_football_team','2017-03-01',21),('Dmitry_Akhba','2015-07-01',43),('2004_Big_12_Conference_Baseball_Tournament','2016-07-01',37),('Abdisalam_Omer','2018-02-01',116),('Alma,_son_of_Alma','2015-08-01',53),('An_Phoblacht','2016-10-01',962),('2009_Turner_Prize','2016-01-01',75),('Jack_Zajac','2017-01-01',24),('1906_Wimbledon_Championships','2016-04-01',22),('Chuckwalla_Valley','2017-06-01',22),('Alien_Quadrology','2016-02-01',1),('Chalcidoptera_contraria','2016-04-01',1),('Alaska_Republican_Gubernatorial_Primary_Election,_2006','2016-02-01',1),('333639_Yaima','2018-02-01',7),('Aquila_hastata','2015-11-01',28),('Al-Fua','2017-07-01',1),('Anihilation','2015-07-01',28),('International_Toy_Fair','2017-01-01',1),('38th_Regiment_Indiana_Infantry','2017-01-01',10),('Andrea_Stella','2017-07-01',75),('Anselmo_de_Moraes','2015-09-01',562),('Applemore','2016-05-01',3),('Akpinar,_Kirsehir','2015-06-01',3),('Ant_nest','2016-05-01',53),('Catherine_of_Siena','2016-11-01',8806),('Barbos','2015-06-01',12),('Amlaib_mac_Iduilb','2017-08-01',2),('Alice_Janowski','2018-03-01',17),('Acacia_leptocarpa','2017-03-01',48),('Al-Hadi_Yahya','2016-01-01',39),('2015_British_Figure_Skating_Championships','2017-07-01',38),('Avenues_Television','2016-03-01',214),('Dendropsophus_sartori','2015-07-01',11),('1952_in_Germany','2015-05-01',63),('Armuchee_High_School','2016-04-01',27),('April_1_RFC','2017-11-01',2),('Caroline_Bliss','2016-11-01',972),('66th_Rice_Bowl','2016-06-01',17),('Alec_Smight','2017-02-01',173),('Alexei_Panin','2017-09-01',3),('Codeword','2016-04-01',84),('Dormice','2015-07-01',63),('2105_BC','2017-11-01',6),('5th_National_Congress_of_Kuomintang','2016-06-01',5),('Caminho_das_Indias','2017-01-01',5),('Agerbo','2017-11-01',2),('Abe_Anellis','2018-01-01',86),('Aceh_Medal','2015-07-01',33),('Alltech_Arena','2016-10-01',144),('Aly_Oury','2016-06-01',260),('757th_Troop_Carrier_Squadron','2017-07-01',2),('Alec_Peters','2017-12-01',2731),('Agua_Buena_Airport','2017-09-01',12),('Alessandro_Livi','2016-08-01',104),('Andkaer','2017-04-01',3),('Cateran','2017-06-01',135),('57th_Venice_International_Film_Festival','2017-04-01',180),('Brijal_Patel','2017-06-01',98),('Cnemaspis_jerdonii','2015-07-01',6),('Aluminum_sodium_salt','2016-10-01',3),('Arnaldo_Antonio_Sanabria_Ayala','2017-09-01',4),('Angels_of_Iron','2018-02-01',83),('Bugs_Bunny_Rabbit_Rampage','2017-06-01',422),('Admiralty_Class_Destroyer','2017-10-01',2),('Atlas_Media','2017-05-01',2),('Arcesilaus_i_of_cyrene','2017-03-01',1),('2011_Tajikistan_national_football_team_results','2017-04-01',13),('Artur_Shakhnazarov','2017-12-01',22),('747_Express_Bus','2018-03-01',20),('101-in-1_Party_Megamix','2017-10-01',188),('Fastpoint_Games','2016-11-01',32),('Analog_Anthology_1','2017-07-01',1),('Archival_bond','2015-09-01',119),('1985_Air_Force_Falcons_football','2017-09-01',4),('American_Airlines_plane_diverted_to_Miami_after_landing_gear_problem','2017-06-01',3),('Adaptive_Evolution_in_the_Human_Genome','2017-08-01',2),('Arthur_Strangways','2015-11-01',5),('1583_in_poetry','2015-09-01',68),('Andrew_igoudala','2015-06-01',2),('Euonychophora','2016-11-01',37),('Catechizing','2016-04-01',4),('1960-61_ice_hockey_Bundesliga_season','2018-03-01',3),('Buk_Vlaka','2017-06-01',10),('Arbor_Day','2018-03-01',16265),('Guan_Sheng','2017-01-01',73),('2014_Barcelona_Open_Banc_Sabadell','2016-08-01',57),('1976-77_Nationalliga_A','2016-04-01',1),('AFL_records','2015-11-01',16),('2005_Tour_Down_Under','2016-10-01',26),('92_BCE','2015-08-01',4),('Bento_Box_Animation','2017-01-01',1),('Alabama_Territory','2018-03-01',1195),('Abdul-Wasa_Al-Saqqaf','2016-07-01',21),('Archbishops_of_Semarang','2017-01-01',6),('Ambivina','2017-10-01',13),('Aghjaghala_Ulia','2017-08-01',2),('Blechnum_novae-zelandiae','2016-11-01',26),('Dictyosome','2016-04-01',19),('Arts_Council_of_Great_Britain','2016-12-01',785),('LBC_Radio','2017-01-01',3),('Ageo,_Saitama','2016-06-01',396),('Babla_Mehta','2016-12-01',674),('2012-13_Russian_Cup','2018-01-01',10),('Chandragupt','2017-06-01',6),('407th_Air_Refueling_Squadron','2016-01-01',96),('Aftermarket','2016-07-01',1253),('A_Portrait_of_New_Orleans','2016-08-01',18),('2000-01_Yemeni_League','2017-03-01',1),('Actinidia_chinensis','2015-11-01',907),('Amsterdam_Tournament_1999','2018-03-01',1),('Arthur_Iberall','2017-02-01',112),('Auricula_Meretricula','2016-02-01',103),('Archbishop_of_Lahore','2016-09-01',8),('Chippewa_Indians_of_Montana','2016-04-01',9),('Abidjan-Niger_Railway','2018-01-01',22),('29th_Annual_Grammy_Awards','2017-05-01',1087),('Ateles_geoffroyi_frontatus','2017-06-01',3),('Enrico_Cernuschi','2016-11-01',3),('A4183_road','2017-02-01',8),('Ahrayut','2016-10-01',75),('Alison_Castle','2016-03-01',55),('Automobile_aftermarket','2016-10-01',5),('2008_GAINSCO_Auto_Insurance_Indy_300','2016-07-01',51),('1937_Scottish_Cup_Final','2017-04-01',126),('2005_Clipsal_500_Adelaide','2018-02-01',22),('Farid_Farjad','2016-04-01',120),('13_Tribes_of_Long_Island','2015-12-01',11),('Afroneta_bamilekei','2017-01-01',2),('Frederick_Stuart_Greene','2017-01-01',1),('Andre_Braugher','2017-04-01',37655),('1906_International_Lawn_Tennis_Challenge','2017-10-01',73),('2009-10_NFL_Playoffs','2016-01-01',69),('Cricket_Wellington','2016-11-01',2),('Craig_Blazer','2015-07-01',21),('Aeolidiella_orientalis','2017-05-01',3),('Andre_Prokovsky','2017-06-01',4),('Angela_McKee','2017-11-01',14),('Airbase_Golubovci','2016-10-01',1),('2011_ISAF_Sailing_World_Championships','2017-05-01',89),('Bartica_Airport','2017-06-01',27),('Agusan_Dam','2016-09-01',454),('Bosque_Real_Country_Club','2015-07-01',42),('Georges_Duhamel','2017-01-01',122),('Allrounder','2017-03-01',63),('2017_Missouri_State_Bears_football_team','2017-09-01',868),('Allons_a_Lafayette','2017-11-01',17),('Agathla','2015-05-01',105),('1086_in_poetry','2015-09-01',25),('Absolute_extreme','2017-09-01',1),('Agathe_Bonitzer','2017-12-01',229),('Chinese_Red_Pine','2017-06-01',18),('Angular_dispersion','2016-02-01',11),('Jean-Sebastian_Giguere','2017-01-01',2),('Actinium-235','2018-03-01',4),('Ago,_filo_e_nodo','2017-02-01',11),('Aranea_cruentata','2016-03-01',1),('2009_Korea_National_League','2017-11-01',19),('Americom-8','2016-08-01',28),('2006_Junee_Bushfire','2018-03-01',81),('2013_Major_League_Baseball_Home_Run_Derby','2017-09-01',182),('1928_US_Presidential_Election','2016-12-01',42),('After-eighty_generation','2016-02-01',127),('1932_Hawthorn_Football_Club_season','2017-07-01',16),('Amelia_Elizabeth_Mary_Rygate','2017-05-01',2),('Aline_Khalaf','2017-12-01',465),('Akron_Junction,_New_York','2017-07-01',56),('Apollo_moon_landing_conspiracy_theories','2015-09-01',4),('1978_National_League_Championship_Series','2017-03-01',325),('1959-60_German_football_championship','2017-08-01',5),('Almost_a_Bride','2017-01-01',1),('Andrew_Lysaght,_junior','2015-10-01',20),('1902_Otani_expedition','2018-02-01',1),('1892_Currie_Cup','2016-09-01',53),('1988_USC_Trojans_football_team','2016-10-01',494),('1944_in_Northern_Ireland','2016-12-01',46),('Alfred_Acherman','2017-07-01',1),('Arcadia,_Nebraska','2017-02-01',148),('4_x_400_metre_relay','2018-03-01',1),('A4030_road','2016-07-01',1),('Chi-li','2016-11-01',3),('Aircraft_fairing','2016-11-01',1861),('Buddhism_in_Belize','2015-07-01',40),('Alameda_County_Open','2017-02-01',33),('Area_of_countries_and_regions_of_the_United_Kingdom','2017-10-01',6),('2014_Weber_State_Wildcats_football_team','2016-10-01',47),('American_Journal_of_Comparative_Law','2016-04-01',62),('A_Teaspoon_Every_Four_Hours','2017-03-01',47),('Astasis','2016-03-01',1195),('Akhrakouaeronon','2015-11-01',62),('Annenkrone','2016-03-01',40),('Ballotine','2016-12-01',4753),('2000_Kipawa_earthquake','2015-11-01',139),('Archdiocese_of_cashel_and_emly','2017-01-01',1),('Chevrolet_SS396','2017-01-01',1),('Achyroseris','2016-03-01',1),('Daniel_Pulteney','2016-11-01',29),('2006_Major_League_Baseball_draft','2017-07-01',10637),('Adetunji_Idowu_Olurin','2016-01-01',37),('Ardatov,_Nizhny_Novgorod_Oblast','2017-04-01',18),('Andrew_Hilditch','2015-08-01',398),('A_Very_Merry_Daughter_Of_the_Bride','2017-04-01',67),('1993_in_radio','2017-08-01',85),('Deltan','2016-11-01',91),('Adnan_Custovic','2017-12-01',26),('Di_Gennaro','2017-01-01',4),('237_AD','2017-11-01',1),('Aaron_Gombar','2018-03-01',2),('Acrolophus','2017-04-01',47),('Alfred_Bergman','2017-06-01',27),('Charles_Bebb','2017-06-01',39),('Dirico','2017-01-01',24),('1982_Major_League_Baseball_Draft','2016-12-01',90),('DDT_wrestling','2016-11-01',4),('1988-89_Houston_Rockets_season','2016-02-01',10),('Acacia_loderi','2015-11-01',35),('2015_Deauville_American_Film_Festival','2016-10-01',126),('Andropadus_importunus','2016-02-01',9),('Antonio_Bacchetti','2017-04-01',52),('Ann_Trindade','2015-09-01',49),('5_x_Monk_5_x_Lacy','2016-05-01',37),('Barlochan,_Ontario','2017-06-01',2),('Achaian','2017-03-01',35),('Flow_rider','2017-01-01',1),('Antiblemma_discerpta','2018-02-01',1),('1997_Illinois_Fighting_Illini_football_team','2017-11-01',331),('Ahrntal','2016-03-01',540),('Apollo_Conference','2015-10-01',329),('Algenib_in_Perseus','2016-01-01',1),('Craig_Norgate','2016-04-01',42),('Antwerp_Zoo','2015-12-01',879),('Cold_Contagious','2017-06-01',161),('Bolito','2016-11-01',181),('Chinese_bridges','2016-11-01',1),('14th_Indiana_Infantry_Regiment','2017-04-01',115),('Bindunuwewa_massacre','2015-07-01',52),('Eastshore_Highway','2016-11-01',2),('Daemonologie','2017-01-01',1655),('Aero_Pacifico','2015-07-01',1),('Blue_Ribbon_Schools_Program','2017-06-01',557),('Ash_Township,_MI','2018-02-01',3),('Al-Hatab_Square','2018-02-01',450),('Alje_Vennema','2018-02-01',187),('1920_All-Ireland_Senior_Football_Championship_Final','2016-05-01',40),('Criss_Oliva','2016-11-01',801),('Bethlehem,_Ohio','2017-01-01',16),('1976_WHA_Amateur_Draft','2015-08-01',47),('Angela_Fimmano','2017-06-01',17),('Alexander_Bonini_of_Alexandria','2017-09-01',1),('Anarchist_faq','2015-05-01',13),('Aleksander_Benedykt_Sobieski','2016-05-01',240),('Cape_Florida_Lighthouse','2016-04-01',6),('Fernando_VI_of_Spain','2017-01-01',3),('Crossing_number','2017-06-01',29),('1984_NSL_Cup','2017-05-01',26),('Barbara_Weldon','2015-06-01',29),('Andreas_Olsen','2017-01-01',32),('Battle_of_Baima','2016-04-01',2),('Amory_Hansen','2016-05-01',26),('Akhmimic','2015-11-01',41),('Al_Awda','2018-02-01',18),('Adelheid-Marie_of_Anhalt-Dessau','2016-07-01',70),('Americans_for_Technology_Leadership','2015-10-01',90),('Belizean_diplomatic_missions','2017-06-01',3),('African_communist','2016-05-01',3),('Andosol','2016-09-01',246),('Alan_Attraction','2016-05-01',15),('A_Yank_in_Rome','2015-12-01',70),('2004_in_the_United_Arab_Emirates','2018-02-01',33),('Additionality','2017-06-01',371),('Assassination_of_Trotsky','2015-06-01',47),('Alice_Sotero','2018-02-01',27),('Agyneta_platnicki','2016-04-01',4),('Alexandra_Vasilyevna_Velyaminova','2015-07-01',30),('1881_in_Chile','2016-06-01',16),('Arterial_ischemic_stroke','2018-01-01',57),('Astro_Glacier','2015-09-01',27),('Chester_Earl_Merrow','2017-06-01',58),('Alejandro_de_la_Madrid','2015-11-01',1630),('70936_Kamen','2017-08-01',1),('AK_Steel_Holding_Corp','2015-08-01',8),('1124_Stroobantia','2017-10-01',23),('Asian_Wedding','2016-10-01',15),('23837_Matthewnanni','2015-10-01',18),('Acharya_Jagadish_Chandra_Bose_Indian_Botanic_Garden','2017-03-01',4893),('Betsy_Hodges','2016-04-01',560),('Arthur_and_the_Invisibles','2015-08-01',14924),('Arkansas-Ole_Miss_football_rivalry','2015-05-01',7),('Asia_Cup','2015-09-01',5938),('Arginine_racemase','2016-12-01',15),('585th_Field_Company,_Royal_Engineers','2018-03-01',1),('1975_Stagg_Bowl','2017-08-01',6),('Dame_Commander_of_The_Most_Honourable_Order_of_the_Bath','2017-01-01',1),('Askajian','2016-02-01',26),('2006_Nebraska_Cornhuskers_football_team','2015-08-01',975),('Cicero_Francis_Lowe_House','2015-07-01',10),('Conan_IV,_Duke_of_Brittany','2016-11-01',252),('2005_World_Modern_Pentathlon_Championships','2016-07-01',38),('1946_Aleutian_Islands_earthquake','2017-03-01',2019),('ANKRD17','2017-09-01',19),('1970_Maryland_Terrapins_football_team','2017-11-01',42),('Ali_Dehkhoda','2017-04-01',1),('1244_in_art','2015-07-01',22),('1520s_in_Denmark','2016-01-01',20),('Abdoulaye_Gaye','2017-12-01',10),('An_Angel_Has_Arrived','2016-03-01',36),('1453_BC','2015-08-01',26),('2017_National_Games_of_China','2017-05-01',1293),('A_Night_in_Sickbay','2016-05-01',251),('Dateline_Diamonds','2017-01-01',53),('419_guestbook_spamming','2016-02-01',5),('Familiar_bluet','2017-01-01',4),('Abu_Bakr_Mirza','2017-10-01',86),('7272_Darbydyar','2017-11-01',4),('Ages_of_consent_in_Latin_America','2017-03-01',961),('1982_Japan_Soccer_League_Cup','2016-04-01',14),('2810_BC','2015-07-01',9),('Druga_Liga_Republike_Srpske','2017-01-01',1),('1998_Swedish_Rally','2017-09-01',34),('1567_in_Norway','2015-10-01',89),('126_Army_Engineer_Regiment,_Royal_Engineers','2016-03-01',5),('2017_American_League_Wild_Card_Game','2017-10-01',25120),('August_Follen','2017-01-01',2),('Ala_Gertner','2015-11-01',876),('Glenwood,_Harford_County,_Maryland','2017-01-01',3),('Applied_ecology','2017-12-01',730),('Ariarathes_V_Eusebes_Philopator','2018-03-01',5),('2006_AFC_Champions_League','2017-09-01',947),('60_minutes_2','2016-10-01',2),('Embryonic_shield','2017-01-01',2),('2001_Meath_Intermediate_Football_Championship','2015-11-01',8),('Apparition_of_Christ_to_Madonna','2017-06-01',5),('Hoosier_Road_Elementary','2017-01-01',1),('Arua_Uda','2016-12-01',29),('Array_comprehension','2015-11-01',8),('Baszki','2015-06-01',36),('Akron_Neighborhoods','2016-01-01',4),('Catholic_Church_in_Costa_Rica','2017-06-01',85),('Canada-Sweden_relations','2015-07-01',1),('Barza_Radio_Community','2016-11-01',6),('Dalhousie_Middle_School','2016-11-01',5),('Alliphis_bakeri','2017-11-01',2),('Bartica_massacre','2017-06-01',53),('30th_January','2015-11-01',10),('1920_revolution','2017-05-01',5),('Amyraldism','2017-08-01',828),('AA_Jefferson_District','2016-05-01',45),('Eunebristis_cinclidias','2017-01-01',1),('A_Scott_Connelly','2017-06-01',5),('Antony_Durose','2016-07-01',19),('Arval_Brethren','2017-11-01',579),('Anthidium_dissectum','2017-05-01',2),('Aru,_Democratic_Republic_of_the_Congo','2017-04-01',81),('1956-57_West_Indian_cricket_season','2017-04-01',2),('2014_Moscow_Film_Festival','2017-08-01',2),('Anna_Gurji','2017-06-01',27),('Allen_Memorial_Medical_Library','2016-07-01',120),('Anton_Sistermans','2017-02-01',36),('Clotheshorses','2017-06-01',1),('36_Stratagems','2017-08-01',25),('Attack_of_the_crab_monsters','2016-10-01',16),('30_rock_awards','2015-09-01',2),('Aeroflot,_Uralsk_Civil_Aviation_Directorate','2017-08-01',2),('Amblyseius_parabufortus','2017-06-01',3),('Indian_coral_tree','2017-01-01',3),('3285_Ruth_Wolfe','2016-02-01',9),('Anderson_da_Silva_Gibin','2016-08-01',73),('5001st_Composite_Group','2017-03-01',4),('Danzik','2016-04-01',8),('4810_Ruslanova','2016-03-01',2),('Arkendale,_Virginia','2016-04-01',14),('Al_Francis_Bichara','2016-09-01',239),('Cayena','2017-01-01',1),('A_Glass_of_Darkness','2017-04-01',95),('GMC_CCKW','2017-01-01',887),('Alabama_State_Route_107','2015-11-01',13),('2011_in_motorsport','2017-12-01',26),('Adecco_General_Staffing,_New_Zealand','2017-12-01',86),('Anbargah','2015-10-01',6),('1995_Asian_Cup_Winners_Cup','2016-06-01',7),('1986_Wales_rugby_union_tour_of_the_South_Pacific','2016-12-01',30),('Adya_Goud_Brahmin','2017-03-01',2),('Akcakiraz','2015-08-01',5),('24249_Bobbiolson','2017-12-01',4),('Ahmanson_Theatre','2016-02-01',801),('Abdullah_ibn_Jahsh','2016-10-01',196),('1937_in_Chile','2015-08-01',24),('2000_in_England','2016-01-01',57),('A_Deepness_In_The_Sky','2017-08-01',2),('Area_code_678','2015-07-01',480),('Avalon_Hill','2017-01-01',880),('Anna,_Duchess_of_Prussia','2015-12-01',315),('Alexandr_Syman','2017-04-01',24),('7400_series_logic','2017-11-01',2),('Greenleaf_Township,_Minnesota','2017-01-01',1),('Acetylsal','2017-04-01',6),('Earth_and_Man_National_Museum','2016-11-01',43),('Affetside','2015-10-01',185),('1971_CFL_season','2015-08-01',202),('Beth_Bader','2016-11-01',21),('Enrolled_Nurse','2016-04-01',5),('Al-Azraq','2016-12-01',22),('4th_South_Carolina_Regiment','2015-07-01',42),('Amanda_Overmyer','2017-02-01',356),('Auto_wrap','2016-02-01',8),('Anonymous_internet_banking','2015-07-01',98),('Curatoria','2016-11-01',3),('A-roll','2016-05-01',134),('Accra_hearts_of_oak_sc','2017-10-01',4),('Apostasy_from_Judaism','2015-12-01',45),('Acantharctia_tenebrosa','2018-01-01',3),('Abigail_Keasey_Frankel','2017-11-01',25),('2008_Paraguayan_general_election','2016-01-01',1),('Adams_motor','2015-09-01',37),('Drummond_Community_High_School','2017-01-01',17),('Andrews_Nakahara','2017-10-01',474),('10th_Maccabiah','2017-04-01',30),('Ackerman,_Rick','2015-08-01',4),('Dumri,_Buxar','2016-11-01',35),('Asking_Jesus_into_your_heart','2016-09-01',1),('Adamowicz_brothers','2016-12-01',161),('Alien_Musibat','2017-12-01',2),('Ahmad_Al_Tayer','2016-04-01',39),('Analytical_phonics','2016-01-01',520),('Do_It_Good','2016-04-01',281),('2004_Kumbakonam_School_fire','2017-12-01',2114),('1977_Chattanooga_Mocs_football_team','2016-08-01',3),('Globe_valves','2017-01-01',11),('Abelmoschus_crinitus','2016-04-01',18),('1874_Yale_Bulldogs_football_team','2016-02-01',37),('Climer','2017-06-01',1),('Auchroisk','2017-06-01',37),('2010_Albirex_Niigata_season','2016-10-01',19),('Adhocracy','2017-06-01',2217),('Chios_Massacre','2015-07-01',1110),('African_Red_Slip','2017-02-01',221),('1976_Portland_Timbers_season','2016-07-01',41),('Alsace-Larraine','2015-09-01',2),('3750_Ilizarov','2017-07-01',12),('Aleksandr_Shkaev','2017-05-01',1),('32_bar_form','2016-01-01',12),('Aequatorium_jamesonii','2018-03-01',14),('Abade_neiva','2016-09-01',2),('Arakvaz','2016-08-01',23),('207_Sqn','2017-10-01',2),('Ducal_hat','2016-11-01',10),('2_Degrees','2017-03-01',19),('Ahmeddiyya_Islam','2016-03-01',4),('Amidi-ye_Kohneh','2017-11-01',13),('Contributions_to_Indian_Sociology','2016-11-01',42),('Clark_Leiblee','2016-04-01',5),('Abraham_of_Strathearn','2017-09-01',14); -/*!40000 ALTER TABLE `some_table` ENABLE KEYS */; -UNLOCK TABLES; -/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; - -/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; -/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; -/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; -/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; -/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; -/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; -/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; - --- Dump completed on 2023-01-16 16:55:58 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/object-per-line.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/object-per-line.json deleted file mode 100644 index cda20604d9a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/object-per-line.json +++ /dev/null @@ -1,3 +0,0 @@ -{"path":"1-krona","month":"2017-01-01","hits":4} -{"path":"Ahmadabad-e_Kalij-e_Sofla","month":"2017-01-01","hits":3} -{"path":"Bob_Dolman","month":"2016-11-01","hits":245} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/objects.json b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/objects.json deleted file mode 100644 index ff5b12ecdcb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/objects.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "a": { - "path":"April_25,_2017", - "month":"2018-01-01", - "hits":2 - }, - "b": { - "path":"Akahori_Station", - "month":"2016-06-01", - "hits":11 - }, - "c": { - "path": "Abducens_palsy", - "month":"2016-05-01", - "hits":28 - } -} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/out.html b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/out.html deleted file mode 100644 index a3fa7fdfcc9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/out.html +++ /dev/null @@ -1,53 +0,0 @@ -

Top 10 IPs

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IPRequests
9.8.4.63
9.5.1.13
2.4.8.93
4.8.8.23
4.5.4.43
3.3.6.42
8.9.5.92
2.5.1.82
6.8.3.62
6.6.3.52
- -

Query information

-
-
Rows read
-
1000
-
Time spent
-
0.000211939
-
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.results b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.results deleted file mode 100644 index 11fb9a76417..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.results +++ /dev/null @@ -1,5 +0,0 @@ -== Top 10 IPs == - -${data} - ---- ${rows_read:XML} rows read in ${time:XML} --- \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.rows b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.rows deleted file mode 100644 index 50d989feba3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/output.rows +++ /dev/null @@ -1 +0,0 @@ -${ip:Escaped} generated ${total:Escaped} requests \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/proto.bin b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/proto.bin deleted file mode 100644 index 064c23a4efbc8d1b5db038ac0d220b019f0d5867..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33411 zcmaK#35+CJTHmLt@0^~a=jgeQ-8pxrbEmTI8k^n8tfRZSySi(t`q&u+L1boBW^_ho za%z=AEi1*%i{J;Nq#508lDq*LQX2ELQ9|jv~7WMjP?&AO6c>P!3c=V%> z7w#!1aTXnfL3!^WOzflgzRX9DzFK&?+-in{EG%9QTfMMdTy4hve%MGmjo@hO_Hy+)7v`{GX%UZJ?b==+GyNkbnrSL@I z-b+!EME!oS+t0$-efa*DxaKiE+O04PJ8`fR-;OeOy=VB{)6WzhD7UJ7Rt@Xtf=kV` z9Su9~(SMDfo_w+JSh=3XVb}?-HKVkivi|L;8)XM!%RTjrT;@}{Ofsm&{V=F!!?clw z?RMxs*Q%x-DHJMM+z+;*dT`PHZksm6&dm3l{JEc{!4?mgguzCfw1W*cH@@w6(ROD0 zslt71aF`5kh0S2KnZ|A%?-zN1-9@JMN8VXn9^AM9=wdtr7f40=)6OWkVT_)$Fu zn^cZ7on0*ocG>;$5*~c4aC*MD7}N)8H;a31_mKPi>S_I|*G!^eu-hGEN$mIM5BcmH z`Bk~M7rql*3Wq$t{mM>i>LZ1RN{cf~LEH(#tj2jqwf-P;NBzck@!JzzcfPnE4uf7d z3R_XubN76c&!!(M++9fqQE(xO5`SQ}V?QwUVBz>eus+CA_w5h(_WkpPmu6>XR(QU! zxWPHsSYR`3cYDG4?b%>A%HpUOWa*$&cMte!esp@K@X^_s(n4^4kPU+K{ECCv4V!T_ zs7Jl1mYD#$@4v(g{OI-MRq#v zSHmO;E=OU5<*W|6oG)TMzV1)mgN*Uz<=}cXLb>=xgGcQKJru-U^R6z}eEFk=hf6E7 zbHVocGRM#A(2rg-?%Ji^`(eHkB4!(5rxQm(B^-{IdR$tQ)S6LySl-?o|NS?ok*QVm zuN?;IUa;8@6Zb1qo1Z(yR+qDs-Eg<~8E*0X6NP)$!gd^Ww0>m5KC+|vhf~iM9xNxp z#X&n9COknB*5hI56!ESKKR&ilc-gkRoS<*qbhjV&IpZ|zMEzc{mv2@-3Z4ADCmDNI z>f1=tUM~##VQ1*{>J5qJ)JuiOm*%oMD%t7uhRJP2Fbp<3dmLsqv6Rzmg(bVOB! z;~!07amyG@&Z8f!#l1c&^u}|Rn?9%Aw@2T`n&b!m{_pV7vr_&pzew9@k~Z9azxPdi zGJUpisxsj0M{;NqvN%(CuCy{g8?5siPKxcwkNiUdZFe~P49`}RBS1K^pgic0ER!tA z2h&JsX=Wz4aNzb@4}4*{@G9yw7nHO301x7YYno9*b3Iy@N4Q0>+5 zR(8>`ZA!bWtjSg!gje4b!tP~k$y7@ zwzCwY727T$Hdh9Lf=R#66foV#Qb?|Zax zcR9ffAqj(y`{4cm!3Q@#Qh4w}h|vjKvgdN!{M&?&{mm1&GFCDtA2IABoNI$2yNxZy z87Cue{r-Kd`j`yM5J%$uk2z_{Zl@m?k&UpL#J#bHfBz?Vf~nUEAH~6yWaB%08nn|s zR({B0&3I!2viM!M6nXX;thIbXu{0A@vOy<``9g%@%DgSMuiD~{qPy6D>sc?9Rlk+u z|J)JDsJ=Z{coDn1RGeK#0Hq$t?Z{M_o0&Ak?us%MoqER6Z`CeE(Fk)0!yI9gS6O^o zOZ)wSzmp8zBe+cS8qO?omRGa5;oa?BvK1$sxXcwX0+-WXQ0_EvkH}OKcflmRNpUfa zJBe(rqB$3%o>O@$++R+Tc+d_Sn6XYj8aN|W}5ojN-RIcc2>gHAgDF5p<`R~zVv;HCC<)b$G6t6+o`=F0P)ht z3Qr)83(iokcN+M_dK6qsQR^IV+84hhP_)_{2%_MpE9t&Rt~!yYC8+1O$~%|F(lU^} zcB<5okM7iQ1i?PYH)w+yTDzqa?$CB3dd(L43$;)w=eRy zU!Jm)1q58Fwh9cK8 z6QCLNxNIv$YS7X;H^Zj7)k)3%qKKZAH@5MpVJ&J!QC9#5Z{y$)qK13&fXrm+4Pc4s z{iov2EmWfs)RM5*^qAp|CfA(Oj%8H>hA=oE59%19eku>=|LE?%$v2PS&v6hXgaMsN zuD9+%wB>2fg}SwK~S>qnDj{8 zH&@Ywxa+T~HBW1CTrdsF>vKL}dS9zPikPekWOc)&?S5c};VOz;S=|k)$UAb|c3{Oe z_a2Gu4w{f*yY0r#^C!;^l|)($ViJ^cs=(y0$YcKPZgN#FlyvG9y(%QnHZ!4FJl z-vXw?DkCZVTT|wf;EI&4NFL;xvVP&X5G(O*B?)V-edNN4sr=_D zfsAT>kOXgXE(F3c0NI-kZRxY7=y;fV8r+BvYPbq7wOBL#Cj*nyk~iH z5aTN|RGt-7bCq{~fCpN`90fZmX1UjL`)=0rgr0Tn!Wz2)*w0P1`L$U@2%jjw@>V>7 z*S`vxaH`zxCJIE{vzsIP-%|q1UBwLcfLHhS@|e`b`@3Hg&__>#OK}&m4z>n;?~9FJ zI4k%YXY~xJ46cQPsuyP+-p37`foFznON;YOiFcc0@tKkz&wb)%Jxz4F=nlB2=OaXg zYtKVcAOp=T0{leTpr^0xVNpE_x=XrEe=#8(MTevPdqL!w{@C<7h8n zpI`z8ELiSpXV61A+^S#Kb+u#Fp0`ZF+Kn~)%%PGO3y+r0%@qm8!eRqz2qV!Ovi7j& z*P#gNpC2*Nke%nzu($3!4NpBTeXXOZ9qc0XNyo?9W=G7KB?$`GYu+As36{M-@_M0Y zGOCE!hdx)twRMItZHLV8Bs*kq1?^i+6Ucy$kti*0T_iB>aF;FYoLii{%~3s?3(L?V zc~noS3Eq5zFw@ zJA=?uMy4*$;pQ%-2Sj1bIQ2w;(<4v_N5=7Pc5XwRDc-=~XFm8f7x`-8sWk*Ci-WgA z*hnYCik|vum;cTT1SNyZ>|iOJFqzx=)hR=DSg^R|0j`T_&_2NX3~^Vr zItLU2&bi=X+VljZF%3c`olewgrMH94o*<)pCfS?6 zLrii;p9YFqT0t8|B!6ZZhE=47k1Z_k;g9<cA}Sf!E#Tw6s&g z>R5~v)xAo1*J(!a2r&E_U-*S1raJKH5G`9AdlC;}H~=Q2e{p+Z+^;}*EELXmY}rNx z{H&Y+@_NnGI}+{M$Fym!emtnkSp`?CxMOdZMB<$wcogTd&R>I@aZlJTk5Q%n-*uc& zM#83S0Tf_FtbQf8xyEfz=!QkhU$5g&qFd}dEci||%#UAI={as^-VQ#bONf^Cgvi`s z$QAq$&;>gQw+ZL5%?5!n+^TiL-#5h(R{+zkK$OQG;U7=74eF>S9n5rQsjwnC{$ zIHB%8bVTBJ4ZsZ#8eE(yt(IpTW^`tr6Idrw8G=HAmwiILui<;_KBha`xOMGiTnm6# zBI$C6Z=e6RjCyHiZZ_Bs_Xe3qXHuZ&Oh1fHsNkOWbKh&E>f=!5o8T|3YZ<@ic18y8 zf10I&TL+@#_*E`q3ZGM2UNB}Jo9SVzxs|g7>Fn0tUXBfQ_ZRv8^8A8`q;3kM=r>=q z&@_rY%M+LIEueHTWeNrBwqH^BX+1bQK7iM5a=ardUnlzn%U$hl?p`%|YSwL(^hi8% zX$27zDPJE6kGrf+bzP{5-Gi?Ntd_UD4oWAE5FS-<4`G&vM+yY)<_8P&^CTh&L3NUD zmxSi05s4%{-vU;1q)~;tX{XaP( z+uiOa!!8Gm`G&4aySeR_emx>nDT>v>j7h4&5W` zU_ZWC_!ummoJVK0JAg|_1O4C*{Pla}1;!^B*ME-EpnTIb0waO4A@xiSCvxg$n=a(Y)TTn?glS$0E!36IsHt*B`-d9M8I3#am%rUfM zp|n);WB^GguR6NzLytqQ%XMwl&JO@t?)b$L{L-{cxtK_ae<#?+dJ}ze7;tiIw>vN+ zgN{E5*{}jBi|&Ht!n!w&y1{2GAk3O1GDtdN5l5kztoY-sdNYyW3I3#n{D5UpYNL}& zySc(=Slf9tkuQ$CgnjU$%%*~KMI0d!V%;-iCW>1ia^vi-NywmT0;MH)0S776vrr$N zz-@*Jm4!)I7C!6qAmIcpaTYJ>U`rPZLn;DVi=A=mBvJUcQ`q+k_;tuuu7-PiL@{Gs z`j^vYTa_}~AP?BQ6(tVT7^n-ufu)rdEHXJaafkLp3+jG)+R6Dc>V^$vr968ps1cKT zxM;|C$%3d#1SE*lJwL z!a)xl-721k{dbF1lpo2q)WID)im@R*Zt`r-ote!#2G@l*yB+nNLwgf%R$j|Jn%P5L z0XPodngyFxv5U8E`R$YyG3zvu&n-?Ug1OUg#44^V%=AY5rV!p=HG+z)N^^ezmh%o) z#IU8GL`IOSg;F9q?xsH4m7TcH!RFi$kmf9eiy0&LHiRBlWOJ4B5jk$&4uNMefn`_DZ^+?#I!soDi_+_uuI*<$4@k z#r$v9$L?90e?LmOw*INjaZAO5RaLw4B**b%6-U8so>BG*Q} zs&9;45UftrA?xFemWlV*p%$g(Spx}cQ3r74^8=EBmn4gOxVKJyA#d5Xkk)K2PG7P;2 zhU1(BhFtfA9!=>6qYW%)_;9gVy_PdA77?Acl8pJK;`}m9H6#Zs=|w_H6OSq*tOWMf z)+*G|jWEj*h6VGI1xv?~QN9@t^OUlvgCll?WMGsn!4O1ABZISX7RnZQieF!d_MoW5 z1Jp^(*df^B6kyJmW>z4xF5^X8M8hH)+!y4|WdF-G(7UKmq6>jr*f7T^xUUI)gC$Qd zlFK+`vMs3j!y~1_^JNGeRkK_OZ^inrTj4Fx)jmLlPu#yxz}#ahuwb853ZrvNAv~1` zJ&<>hXR!O4L%2sV%KD|C%n3s+Zw&fGKkl(400kwR8jUdWxXboqdbO}v?)B1I>|D7r z$~(|ASQN-%(jW)A6j8Jy1)qVnOx(n|AHgA9ilYRS)uqZOo!HV}nP$IBb7UwF17&jq zXNZYh@doQ6tVeCG#foX~z>1sa`IwG(aDY$7%C+NecpJ`&?1EdP@fm{UxTZVY%@|PG zn&1*xF7}m5rq_B%kH~kM^66e+WyBsaUq$Bp(hx)uY!5OxZKC;3<4`G)6RwXJKC(f$ z&pFkUG$SuUl#7dVG%&ek%!4~GSwAXJ?w&1jguo5d#y)Tr9MZyC$e9FqG!JR~D&>xj zS{fHIC9EPa>+?XDI*K(8G2F&sHw23yy>5~;)#uPZ$o@GPO9G%g6=ccC2idyP>;fSW z&q;E^Nsb|tV=fN|Ci9gZxhZ0OUNqh!yY7xbdN_s8sf2JU+e3=JVvkXEKxYX>$^j;l zu*dC$fR#x8G6IKacP82VX|hPA<>kfTt&lZV1_>N=<4)pX^8Y&OY4~;<+$S-bbZKsP z6(}vrhWUl;w!pplnaJ}_BVsZ=cMN=tkAF$XHOta{GR>uxio34jx~ciXi)BCn9?$rs z9Eu~YY?5KqI4qLM_ZCI5;-B5Ehv5mpd!DKBFhI<_tZ`|D*!OBO#5dP4-gqu|NAXyv zN(C#F5Zdau#&=MHg_U`!&Sqw5 zMt38@;;Grf3zJrY^=wfqR6_+5V-!=L33ZoK+Txgqclle=7G#^CuSl=h$40FGxH$_v zDCBPv46;3p3*f8~=GaeSv;#RXS8Nl_r0))iD4{xX@vtc5`=&5yvvbR{IjpkmUV$ww zEiD?p@6`YtlSODxiipji;GNyH%WR2iLcWGPtN*h!|3O(BSR<5&_y&v=0Dsbfx2VRr zi)tE`4kGY4vUGs=Az#g6vbwvPV|1m6{U$ye3$zs;9N=4&GrXD*-^DpL7Zy08ngLI7 zA;BGl-^WK1(4U4?o=RfK2e6ak#3zabEWh#XM|5)VTPm4gqc0O*n>oM0 z;l~P;6og@7Jo=Y)xf`Z`Bh}I2*#r&u`NBDZTBTAQqB3`R6u>j*14fnakw=s?h^h2O zPS5*>8F}Kl&CLrNR-$O7X4lDzen>rSqfQyoF}GBlqqHPRdW5k={T|5)jUKm51R(_h z9#AHr4AZQ-vXB1GwsvJ-CnCaZ09YA&7R0<~xOZe>$FeIjsN6;+MN@V|UsGyO^n}rz z?AtXmDifNuGsiw-#8pwFx>Yfb5TTTuI`R>XnaGq_9QspF>o)ubfuVYE6iCUIRDe>_ z(`?Y~qsgQRWJ9n8%l4GEzRMU{I2Cf<&0#X4on_(f1!Tcph$NXP9U)H0DNE_QFg5hg z5;B3u6GiAYK!yjvf6w`_N^F*Zx9hn{mI1dC*5##@;_?jAMzM~{j&i=h)aeNNrzA4+ z8P(EpglH8@suwKHBM$681t=qG+K}rv3cyfQi;y}NwHe}^*D(Vm#a`q^Jqm2J5l zEYCZx)1ZSfp(N%+6M=u=nDY=_X}hDC5Y`a&iWj4ORbhjopb91c62+!a0;(KU3f9s` zUaG_`-3yIb&Tdhq2TIQ`tkCgKRZM6`qD2D14$dIe(dU8Sq~_XbHO9VN4vB@b24>SC zN+aWbgL)+j+5}w5^W+v!X8w6MBLi3X_ftpdh ztv`O@W0jy?RHXrVJus5Hm5DsURZbMysb&N|sV(1Uco~c_Vec#-&VgV4H-+c zJz0yP=M~v%bpYN#URQ>-<3m+>m}#twB7E|Kb;|bnFPy-*%F80pJ*61PV{p!^A?J_= zrR5c8eT;1U_v!+me4psHxPuMVLIb@0gb16Jl8Y|aZ-xB-cn!tBTzIX#*WjtJ;+)JL zK|=tmNem=bGa#;_CS?ubv@->wxpOi|D=u}FD3(VfXQ0T1g37c;QZA3xvbqQhO&dyeH77q?pya*0O89{## zknl4`$B)V^t?J-GVIS93fd_3z`|OPaefIJ1OcT{(As34(c)PK=Q65vyIkKLt$26-t z{aWE8@c%2A7-})|>NjByOV&@aK5`)cS(48gFTk*L#KKc@D>7{txcUb$mBW2X4#l zIL-=6OB>1q>V&AC zyWsCXLma%0r6Nf@S!W`?^_OK>*TUN|F;3heVDorOk~+snWymII0Z5Cq@5{CYFpr2) z!MmvY#Kq@A0?Men!xKn8tqhvUviU^8%S~@DY`^}?5#kblvKUW-^Ggds;rDUR{b zsM^CK(4V+l+D$qRXNVo5?2M}Q2sMvxjc|%~Dij7$`OF5un92)#EJHUtI$QK8p=c<7 z9;Y0vi3l?r20ST+h1ya)90h^)V>`5yx@;y$6i_amvshZ~Q3`-i9k?UNeGfB5M1IC7 z>x@_nU%WTbRb!1Mp9ShPcv<X0fXOC3URi;%&) z)`PdBs%PDmg8VC1%k49apl+aqtE4gH_5A|fWtd$u|7oy!!`wHQ8Z`30CgHZzK#9@{ zZ2L8&-m#cGmv}^Ow-fJ;_*65v(i9)r_nKjy<#U7-;5bcj9R6P_e7x-V17+KNQJ8Kn zy+SKl3~t_m7tDE12ugxHjWo~ZH&pu{byRhb=VvS` z`!WT<$F-exUzw$X)x$x^ogltDt!<7}#Vj+<&CAQZb?F0VO#lp3hpfgp{- zB{%`PIwuNwAC=oS%3t*OPTcePd2P!@S<3-iM#d3C1yTXWzOCkxdYkN2UCD{ z)$8Z>R?*Y1pA-m{iYu7JOF}{*dY+1vr9P$**@|_~3q3o~?3OIfswY1tmYEId(bd7) z-POO$Z{g`dzclGmXyeeSedcsa9#=<4_5df~4^DJY)uo`wG@|#`=A!bZV18v1ODf+r zks`MIrKhke5fSz1sLfVna!1(^Z^PkWDAs}dc}ax=p`TIEAYh%>>N*|498y;!ayA0U zBF7ifURU)lwhSY&F!mZ^}oMy@j;li#k!RMVpFd6Ap;=^+_w>QQ2Ap`Hs+{IBnFRc;Z8V2Dp756$qMFTQsw-a!WbfEdmhW<824Kp&~u5w8hv>u z_FG|UWo?;&8=9-#woCdS7n@mwuA9gF=>rJI4mwPjK|7sM+Hppk;j+SYO59FDt< zEbvnTYgSa_YEi4>CJ@2xvU7I{-Od!0{QzKtE0YA3>3K_`eSS!Z zPPiT^r)wRDhY^_Idp>0#ddpU$1Tpp#h&n!}hxG(4TJ?bzY(kZA@**y`>m8x-H;B5r zp>bn-w9WVc-~0|a@{`;>YIl54WV?S#(17?ipidRZJPy>VbnQ9JWIQQk z8fWY8c9Xn9$5^``IiUw61~RIV5*r?Jml$i31ASMOrd}7l`aSW8@-^GB!44TYt(d)Aw-xHgqxci$>nXSYtqG(0wTknZ*;O9n^sIr&jYvsrqi99{Lkwq zzd!-vC^5%kke8Qc#|LZm0r=a7!Ak{bq7>|N6FvJJV+q}`o$j-%SU=c8m?{9f|wgPk9dh~26t^F>Co{PCSEWe zgsG-QI|J$zC!fa4=b(SPH>A|v-CD5syFYIgKb2KozY}*OUv($q?~LFDXQl_z)4(jq zn7SWZQhJL*RT(dW28yzTW#!~b^zC5 zREqAnOJ)QRMJ0j9einW|X&V_fZpT43nnhv)Kv)l?v>T~}D@Tk*L4N0_%@Eeqxkz^w z#pk5gJY;rY3y!#)F@u9GB;M~~liw0WjCXWP7Sa6U%2$m2oAJZ;sj1I(#T<1!VGXp# zS=%Q@aC}bos5C4jeN8)Wnj0!m}1^?Fw5Jsoe507AxT|bVJu$~oa zsJE5F${NG*yd5b?(SmsDyXi4zaU#YhXw@+kwxq|y9HVvEki}~?t3K{hX!)w^j3E4u z8v&l5jGch0QSmvgE0EVQ_R*b@3ZPz)?p1O9c+Yll6+7TQ(jth zZ1!$J(rm@C;N4o@AFOca=TA6%xe{D6n~v$B^v8Z_$n+8(nxF4&_SJe<_h?SqLU!{zSj)%%a;N5j3h?nVXwi zoLeF7&R5+^wY8eeA>&_t(h&LGx8pug$vDY2B$!F0u-AtC4Fax~RH42_#x!mPJYziEQ>q7S4(< z^ix+Pl|+OEloHF=Fr}Wos_Qr%X zuX@-q!(R1e9KVOAA%Hr(+rSsf*xvnh^&BiMsIBW7H3wZ0HpG2|5{GK_B|943#b(x2 z95MpTayFlPSiZ6`AW|Z$NZ-yxQKySMEwBfq0q&^*r5nVFlX>MP+l%VA2(p}v2{>>( zCm{PVlK#wu{Qkz&Lg6KQ)Ef}4h{0hC^x^SpMmqeAga9qMCCkYrJrSh~R&^#Ck_83R z3vN+@4huIS>Bl9^`N)l4s!ChbA@Q82cGVj7u*~W!SFi~r6vw2p6ZT<R}Op1Xt|_{WYYs3j8vb3`JBE^?st z8YYa8)^DH1B36{&=TiwtURx?`q^vnqF!+gv6;h>olG+eS@i!4KH08pT5+w<@;+9Kp z<9m#FKb&hLlhFeOIw`izzDJUvN?(QKt!9TYCe#tRbCsx^MG}^%*CCoUWvS$Kyt0N6 zT4zf$r6S$QB*!p2(5w+7Eb{3;DoyyJ>Z&*JU0nkE{zTkiX5v}hp3_JhIt{Yx46n*( zY?wcjL4^q?s!tet;A=H3WZ5@Bw7g&qE943huMK?lWMlQjQNCGN8wD?jyG$0ilsFN_ zMBrbk5GrlA$hyV>{ZS~C!gN{Yh8S*w7LbN#8+5WqU?{=EU7HQEP6xa3Ap&@cI4&@G z4HN)V=jVA?&xbX^dIAI-(P-yvXU%(zF z=WHNYT?F*V1ve5Xw6Ta6)RU)fR_e)o%1yHInNMT#&JTp!N$k5DTcoy0y)x0B508qJ8leW^fJ0Dq47Sr;e}|1yHT2nL}y;jfw|WRvUX(%e3xsj%JN+ z@*yp}UOr7G9)z~U7>p@SjLmuSm}E-T;+b;|s30$;& z&-~s4AT5Mxrbs2gTdLEe)Xkb=+*iyJi@t-OWo{Ji1f8QDJ`W_V*zj+j(7nX=W{Rc9 zkrri^D=Y4qBtq7E98FPAW};JIC-MAP|`8Fjf&3cg!a$SWu<}+IojIrQ{B?j;g#o z`jA08A|y3m!b)be*^YO`rkasa9_BMRq}`Vck81`3Fi9;7_I(4+OLDAv(IRxw*Gh(d zeheGQi7yA|%bQnrSFe&K#3*cbl;l#x%*G9aLqneC&}*{IwBbxtsZOMn&F6kIGGJCo z!{UG^3iN|AW-o*!u_rW$aTXW^9Z&TR|4l8MJ6~}D@1o^s3I*R)HdDtda&%(mf^kK6 z%eQKO>8uGD@<~Rl6}g=>^D@n|=8YpQqBsV8;GeT7#}>}iJ>{t|)zc3eWTe~{3iJK# zADcG5hd}%=u)7>$X!GG)V)}6aZeeD@Hl0rmO41c}Rd#pEm`it(V2_ zYz9m;mG$;&w9rBROv3*3MV~E`Hb(*Z9Ze8mV+1S*<0gs5y~&pnn5Rsu$a}?1`i?3B zQBD%w(!fA>9s>9GD)x&rG7k{*`1@*lyXLo3!lEh=V=*^(%Nw$OW`}%h^M`^vdyuzq z>Ucp_R366QvTTH11u!}gT!q?h*EK%kU#k?clJ?%A8*c{+-ttk@(rZ|ki()OZP|+uO z0*37XIF%anINDD2KuVM(Hiy^mp2e@91h2D(C2$bgE^Lg;75~4pf==_COe~(Y^$FeCG$@Vadl7Do9qfLFl@MgnCyh3J?Tp2a~n&njW)J4pGahD25 z=0ga?5vOBh-E!pOj3YH8KWG7+6P)!b)%nLmiHw&ZLX6IQBsuozd`VyXXt^g_E9i1mdFR=@Q9<)0@n8@sF*C zdvi&R_?V45*K#Tai0{>qxUA~HjD>>#bc%{En01CC?^2^U9fzm?+!gPg9Cz{Y7+xfcUAa8kL_)f+u08C7JKX!?R#u4*Xes;N}c$iB8)Y zB2<2b7AuKp6tZV}IMB$!Ela(9RXS5{VGFfAmVTu=7MyWh+3EKn0?#l6>eSH(R53GP(NX@vKG)rOP(x6Uq0g5${# zwRoApd|XkDt7a^#+JT^Hd>V$bGfIDBnB^-G!$~1o?PF7??^k{0eDH2CkN2cq3v;dx zvI!h+(}Ct^=4QrF!FsjSQh|vmCX=v2B{DIoW`F=y$T4YIpTZL8_{Dp%8FHMqv(u$GY-rDjJeFSnf1~Y?bN6jVp2_ zNu^lyLxsM_ZL&IH6H~3k_Nayj{yVAHLIC?--ljb+iIGow4bsaQ3k=ysqxBQr$i`Dh zF`0LUvvzwfWbp~rCE~gvSfl_9)IrabgI6b3o|+NTeN7WrAf$72Yc}k2r*xiFJkyjv zfF5EZi9b(;x1W&ZA`M1^5tzium0I9mCV(U9|YbXqt+=e^BqIv5m!FcN9nm7soy0cLok6JNP_MN*z)6A9|FrXFQIo%&n z(vF*TyJXPk1!#>j!2z`sNPT?Jgk8Fl-ln)CtxryXG9>wcmFHQal0q`H*^dIWyvuhT zQPHcbA0bTOAlHQ8Fwg43m#mGAh+S$S0-mp#CA1>w3YoIpY8ZZdLeS7xMpqaB=bl%-|0S&f#jZ^}4}&|OwWgFipWoVO zRE~+{+icL<+BFp}5lkGi5Jy!?L+3+rc}1$iesq`vBmjG;J=#2=WOM-Z8J?A^*Hk5l4@3z%5K!d9SvxQq{UJF4u<3$lu|Dx^3pN~$Mo*zGJR z#Xf;fg}^BHBUz5;Fawk{P*sfw^Gu9HLlr1IWGtGEN)FH&^>p1lvu?@OmBOpmLbs`i zKm!-Zp)@UfVt*l;{ID4ma{sc;hiYaD1l+Qv*ngK{c#P?^Zh>(c)P4RYpZ}6(YN?N_ zL!toaf(n3RUc^pi? zKPcO<&r*YUyh8NS_dk)I{j9TF4${I?Grz~P68cmIu%v1d7sZcrb)zhvTaomp#}?BG zQb#)W75KxW_rjV7i54}?bKvU$Usx&psjtRyXQ!J%G%|EhvxKcMc>;j^`DumPn0$EP zTFkstw*b2+712-FD@1mfa*tk6*hseyefpWbp2b~W8v2t4pvjGC8Wq*PUFxSd?PEhH z5CfnEvTKZ`KQxe1!twJA@C5zciZFOMnaL#T7p8E&Kv;t857&T?!RG|%nuypfH~BKh zNjwU7f0fQ@4??sdpJlGoeQNGXBI=J&0lHn>152du_uOT0^Fmqk3>m}@3putaA{)(? zxp;Gfu>fQo$NO~>(@v5G}++Zr_gDbnVZ+Mo^;^D`!>(~=~0CTD$QZ&wN7{~pT ztq)g6j^t{R59qSHF|CO(I&T}Qj56r4%m>(%X5T(Z2Md1OVQ3TXlPe`xjeW<&F$mS^wArc+S7dG&egc|O+*A$LmD0+m zgW4WhDzok)3iuXZ4rpfWzpK};%q~eFEF9GK5kmBH>cx)uHTfY+yzesiE{6?LZc6UY zlp9RlQL7cD4I~qN-I1N~Q64nc2^D#eEMxp8iG>@?gH#F^BHV4au(~?nneSoU;DiA= zDX5(9{d!JU$({r_Aaq*oo@DEC&?&-mMWU!bG9wpbmpd^z%qNHgfSBTU8Oxb5J{RDX zI)H6k@w&>WMiVwEj}Xs9Q->c@VIuh3Y7AMLS(#g;SQ4JuiI!lPlKtgNs<;4+Ucie; zzedA#4fUOb|0tDa=lK%pVT!ULG3)|Hy**-&E?VPe#98He8N~|s;q4e}0XW-2mzvQ& z`kfD-lskN&Oay~7&!}v>$jl5Jw9W4lCy#9cdpi%rKP$e}+z{sZOgR1@W_;yoY-JBs zOh-=44)7lz)l4qOcVkryoH9O`_G{392Qo62U+D|dGO@OUEETVUjzmM%hOd%17KsY`(CpYJm-4zg)FV^W*&nj5*)`|dFZ+39!smh_pjj$vB$m3&4Lx(C5y~Jz0FQcf?MC4bLgjgl% zhXniH3K-;lz5a<_OS`!F?ZV4U-zGmt;C|yABqCkvJepEL9PmUC^cQX%I|Ubx?WCtr z(f+nkTupevLsW=6PP3FR5gCFNJg|;ryCXO6S*JyO;jXQFj$MPP@BsIoEKw!2)iAdZ&CB}EUPq6 z!$=)l?V|k;;3cAcd?CCPlK3K|6i)R*PADd z8e1d7O&;B!f+TAJl)^aDKts{`da(d+fg^*H8wZOZ#64{&g`ggUDsP4G`7{0A$|UkR zg%%;{6DmK->60on=0gk{;-crjjDx^exv0mkWT*dg zC-9OMs$C2jtqFLcC1cc*o~yT6lY80F*=4r|>LJp*%%}C% z-4OxUL86Rh#*@O2VdHk-CcLx@Xnm9=l@=)Q<|)Dbc`D32`CGj4sx*r{>xC1!nHT}_ zJKGPFfn&1;4OJpA&Z9k-S~P}7B|h{YVL`?l`wwhthOcMcQ02CF6FL!B_`@q+sB3_L zb-z-0`A77zfAl=x-DeVkihncf%gF~O=PG1Ba6nFwTW>gkk|Mc2s*f~Q^@C%^RUi`T z^6dpTn6#tnS&%($m3Wl2xVw~sd0y(FtQVWZ^hY9e$8b*a+osPyeFE)-6Gi?8;EQ0^ zT|pj7O^)mBDsOd^djUgXo5znjZS(fY&2Evf_U)(ean_xDN04tnHHUjr>tGzZD)X>b zrehlQV-+E^z(Qi!m(7D8gXF%D@^TKO&nErCgR4^?^D}-Jy&HxZ(4^-*j5Pk>DY1kY zCpxn6C>bGxP2`<;5GV?!D;(rS)w8VndHu+!>nmc_DYPJZ$_LuZ`lzE;eb$7HH8xi7 zPm*k|;#<^lmrwW?e!R}JyFP%Tq?@V>QKnk7vj;9*17v{0d3&hk_RVWSCFg)nVb5`{ z!+SIo7pSQ+t}|F|vTo3lbQ-Elz{R8>B6ZrXa-zd*u+Ea3SnBIg7;1aGfDG6uXeuyC zOS+AOewQA)87C_{=J#zfWN4wnR1!Zb>_O;EUYz77beIT z5(o$vv4ZQQP6)8c`|v&t+BcL%JT!=shc3I+F1tdMIWi2|T$>ammIb1X$%)>RnxkW^ zKC}K*!^FH)d+UaKXqmw;soVsvpvlcB+Th7~(M3NW5ovi1TpYrL3wBf16dUBV#u8S6 z1s=yY#Tu3OrX^Vx7K=>Q(*&j*o86(G)V@D$De2O@5J?Sg<_=twm6tdQ7t1opZh7`x zOMk)wRqP7NN35lqXVtN3oG{~fsF?Puq0*OkwTap$Je6=Vh{y?_vHH&gP#GL6djbW~ z%b5<{M05*~7+ebhz@Q}W!t`8^iMF;D(tJN4@~Q4cuDOH%qD;vB7;no5P*e`5oEHmO zB4--oEe3<$0kS98ks@a9uTp(OGZO}*oAt9lm(?MVop@jA4#U)VCzl_+Vg^D5jd)Uv?(D^cWHNFj zXUsI%o>0CUuf`;30;Jz^t9m-|b%hyV3%v?#A-%za2Ja+MW{yHZGI zsXbaz$auDcWbLzbqPP(tj1aJ1+KXYc=K~R`kO2>?agmc5#m^R5Jc<-s%7%}CKjVFr zpIVU-#)d^*oi-N9LLgo#<&M}iN!4S@lG!^R7MEOp+q}|$u>KuPC=X4p!x0>YQU2t( z`egCZictvJxi*`+&BXuw9YMxse8JWAyKxc>qGto#8B5|GWuKRnNL2ENV*==SQpkBL zT&z{bP!dMuHfASv;yvPBw*(VzbvH0qc1V-IXJK@M<6Hqk0NqxIvW3Kqfc<2rt(@zt zDyYO!_0==w9#Qf4zfj!+F(#dS)XYq@_}k9^vXY%5BB`UqP}tAR3qOo{Q%xm=$O|0E z;d#3v?L8*RX58{YmeYlbe$Qe|MJPLw^acJx%&sZKJ<;oY@fCqXwh@b@*%0l#qS*}j z@0x6?UTQyo^Q}b^Rs}|0kkOjTR@jgbvT+abldqms-}jgDqLH5G^%RL5mGo(FO$h&BqrWavMVrF}lMq1wnmaVOEQ2QF z9mg1mDxTa2G~#}2wndc)0Nc={q>V|_l* z5QdUotwE<6114!?bEhcDw3mvLsFEWlLLqvz&rGUR47E0BXYn}-O5XLsn~BwPs>*;n zhJVrrvE}Ph7O{@CWIj;Ekj+ z^R#`CZ*0MEFzCp=xoK1~W4HhL``sK-nt2(V0_l;agpONj?M~E!)rhJKRxi$q0xT7; zzjCK3yP&!B8d^pDMonEhATf?J&(di@4IVrI?H&f3 z?KLx)z>M6OJ4O5R3n(-$QR$B=_~l)=0tA@gV8V!4Dv{cvQ@p^(P4XRI_8*-AzGCSa zKezRvjL~n<7kqy{*;oZvyC8n|Th%@Mmd4?ze%tD69X~_Up4{Z*-31IkX7K_Esi=6F zo)!`c9v^M;p-SB6mKJ7$P1OM9Eee)ZKP^D%(;I4IpfSZYFc>`|N{3;_xNlMMzK%}v zaasl;qMks_QULE;#MgMld=`g7)_bLzDoCg63}(?*Ww6E)n^Za6_Wk5`_K%2mn9&4w z=U7bjWCig`2xvZf$JD35pCLseI*@XRD}45#mZJBp)Z-yeUu>9vVxcj6*&Bnc<0356 zC7Qz{@0MkB?xC!nAb_aGO=jSwvMN<2fHAHF5HNZ#xSY%@hO7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/row.template b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/row.template deleted file mode 100644 index b8855dcb77a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/row.template +++ /dev/null @@ -1 +0,0 @@ -${time:Escaped} [error] client: ${ip:CSV}, server: ${host:CSV} ${request:JSON} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.capnp b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.capnp deleted file mode 100644 index 12859fac734..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.capnp +++ /dev/null @@ -1,7 +0,0 @@ -@0xec8ff1a10aa10dbe; - -struct PathStats { - path @0 :Text; - month @1 :UInt32; - hits @2 :UInt32; -} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.proto b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.proto deleted file mode 100644 index b86c5cd42c1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/schema.proto +++ /dev/null @@ -1,7 +0,0 @@ -syntax = "proto3"; - -message MessageType { - string path = 1; - date month = 2; - uint32 hits = 3; -}; diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.sql b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.sql deleted file mode 100644 index d782a6c5520..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.sql +++ /dev/null @@ -1,8 +0,0 @@ -CREATE TABLE some_data -( - `path` String, - `month` Date, - `hits` UInt32 -) -ENGINE = MergeTree -ORDER BY tuple() diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.tsv b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.tsv deleted file mode 100644 index 917a23a097f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/some_data.tsv +++ /dev/null @@ -1,2000 +0,0 @@ -Bangor_City_Forest 2015-07-01 34 -Alireza_Afzal 2017-02-01 24 -Akhaura-Laksam-Chittagong_Line 2015-09-01 30 -1973_National_500 2017-10-01 80 -Attachment 2017-09-01 1356 -Kellett_Strait 2017-01-01 5 -Ajarani_River 2018-01-01 30 -Akbarabad,_Khomeyn 2017-03-01 8 -Adriaan_Theodoor_Peperzak 2018-02-01 88 -Alucita_dryogramma 2015-09-01 1 -Brit_Med_J 2015-07-01 1 -4th_Metro_Manila_Film_Festival 2015-09-01 80 -Alialujah_Choir 2018-03-01 221 -1953-54_SM-sarja_season 2016-09-01 1 -Air_Force_Song 2018-02-01 19 -4-6_duoprism 2016-03-01 30 -Ashley_Spurlin 2017-06-01 94 -Asfaq_Kayani 2017-10-01 1 -1607_in_architecture 2016-06-01 7 -4-way_speakers 2015-10-01 2 -Blue_Heeler 2015-07-01 149 -5_Euro 2017-04-01 16 -2009_Spa-Francorchamps_GP2_Series_round 2016-04-01 12 -2015_Guru_Granth_Sahib_desecration 2015-11-01 6821 -Agriculture_Marketing_Service 2016-07-01 2 -2006_Football_League_Cup_Final 2015-11-01 1711 -2008_Uber_Cup_group_stage 2016-02-01 40 -1923_PGA_Championship 2016-08-01 97 -Fannie_Bay 2016-04-01 6 -AlchemyAPI 2016-04-01 344 -Cinema_of_Italy 2017-01-01 1217 -Arodes 2016-11-01 36 -Damien_Marley 2015-07-01 168 -Al_Jumayl_Baladiyat 2015-08-01 5 -2015_Alabama_State_Hornets_football_team 2017-06-01 32 -Aglossa_tanya 2016-03-01 1 -73rd_Pennsylvania_Infantry 2017-01-01 12 -2015_European_Junior_and_U23_Canoe_Slalom_Championships 2018-02-01 31 -African_Leopard 2016-08-01 64 -Faverolles,_Orne 2017-01-01 5 -Aaron_Fukuhara 2015-11-01 17 -Annular_ligaments_of_trachea 2017-01-01 31 -2014_US_Open_Series 2016-11-01 35 -A_Better_Mousetrap 2018-02-01 4 -Dibaklu 2016-11-01 1 -At_Samat_District 2015-06-01 35 -Aaron_Peasley 2017-05-01 32 -Apistomology 2015-12-01 2 -Buyat_Bay 2015-07-01 54 -1942_Estonian_Football_Championship 2017-05-01 22 -Action_for_Autism 2016-06-01 346 -100_Hz 2015-06-01 72 -2003_Arizona_State_Sun_Devils_football_team 2017-05-01 82 -Antona_obscura 2016-09-01 1 -Akiko_Sugiyama 2015-12-01 32 -Elysburg 2016-11-01 8 -2017_New_South_Wales_Cup 2017-09-01 38 -2011-12_Gold_Coast_United_FC_season 2017-06-01 1 -Agency_for_the_Prohibition_of_Nuclear_Weapons_in_Latin_America_and_the_Caribbean 2016-04-01 15 -Albert_Dunn 2017-08-01 87 -Hahamakin_ang_Lahat 2017-01-01 984 -2013_Spuyten_Duyvil_derailment 2017-11-01 5 -Ayling 2017-01-01 5 -Anti-Establishment 2016-10-01 1 -1951_Spanish_motorcycle_Grand_Prix 2018-01-01 48 -2009-10_Brunei_Premier_League 2017-08-01 4 -23_Ursae_Majoris 2016-08-01 90 -1927-28_Austrian_football_championship 2017-08-01 4 -Andrew_McKeever 2017-10-01 3 -Clinocottus 2017-06-01 23 -2006_State_of_Origin 2015-11-01 7 -2013-14_Los_Angeles_Clippers_season 2015-07-01 8 -Cor_Jesu 2017-01-01 1 -Besseringen_B-Werk 2017-06-01 158 -Amy_Hempel 2017-07-01 1091 -Franc-Comtois 2016-04-01 2 -Allium_giganteum 2017-07-01 1103 -Abishai 2016-08-01 56 -Abraham_Clark_High_School 2016-04-01 88 -Baku_chronology 2015-06-01 1 -22nd_MEU 2015-10-01 39 -2015_Open_Engie_de_Touraine 2015-10-01 195 -Churchill_Bowl 2017-06-01 30 -AGMARK 2017-08-01 117 -American_standard_wire_gauge 2017-12-01 3 -Araby,_LA 2015-05-01 2 -217_BC 2016-12-01 202 -2008_Trinidad_and_Tobago_League_Cup 2016-02-01 6 -Alazan_Bay 2015-12-01 22 -Aluminum_fencing 2015-11-01 48 -Achilles_tendinitis 2016-10-01 5884 -AFP_Peacekeeping_Operations_Center 2017-01-01 64 -2013_Xinjiang_clashes 2016-01-01 1 -Arborea_Giudicato_of_Arborea 2015-09-01 3 -1941_Cleveland_Rams_season 2017-06-01 40 -Ju_Posht,_Rasht 2017-01-01 3 -Ascalenia 2016-07-01 10 -Aplectoides 2018-02-01 4 -European_Cup_1969-70 2016-11-01 14 -Armen_Mkertchian 2016-05-01 9 -2015_Aspria_Tennis_Cup_-_Singles 2018-02-01 1 -14_August_1947 2017-11-01 6 -Adobe_Creative_Suite_1 2015-05-01 1 -IC_chips 2017-01-01 2 -Austo_AE300 2016-07-01 4 -Date_palms 2015-07-01 79 -BCS_bowl_game 2017-06-01 13 -AR_Border 2017-06-01 1 -Aranda_de_Duero 2016-04-01 256 -1919_Wake_Forest_Demon_Deacons_football_team 2016-01-01 16 -All_The_Wrong_Clues_For_The_Right_Solution 2017-10-01 9 -Allan_Campbell_McLean 2015-06-01 131 -Bradford_Council_election,_2011 2017-06-01 5 -Astronomy_and_astrophysics 2015-09-01 62 -Dutch_Antillean_people 2015-07-01 57 -Army_Radio 2018-03-01 711 -BBVA_Bancomer 2016-11-01 709 -Lake_Aloha 2017-01-01 30 -Andy_Bean 2018-02-01 3092 -1941_Pittsburgh_Steelers_season 2016-05-01 147 -Aniopi_Melidoni 2016-06-01 4 -Aglossosia_fusca 2017-09-01 3 -Art_books 2017-04-01 36 -1929_Washington_Senators_season 2017-04-01 47 -Antaeotricha_congelata 2016-12-01 10 -Douglas_C-54G-5-DO_Skymaster 2017-01-01 1 -Chris_Jamison 2016-11-01 827 -Ace_Blackwell 2015-11-01 9 -Abdul_Qadir_Fitrat 2018-02-01 32 -Arnoldo_Vizcaino 2017-10-01 1 -2012_Open_EuroEnergie_de_Quimper_-_Doubles 2017-12-01 3 -Dale_Rominski 2017-01-01 7 -ADHD_coaching 2015-06-01 50 -Claire_Yiu 2016-11-01 209 -Applicant 2015-10-01 253 -Apache_OpenOffice 2017-06-01 6031 -Abel_Kiprop_Mutai 2015-09-01 22 -Airdrome_Taube 2017-04-01 46 -Andrey_Viktorovich 2016-06-01 1 -American_Idol_controversy 2016-03-01 36 -Anthrenocerus_confertus 2018-01-01 17 -Appraisal_Subcommittee 2018-03-01 17 -Babusa 2015-07-01 3 -500_homeruns 2016-06-01 1 -Argentina_national_volleyball_team 2016-08-01 64 -Chief_prosecutor_of_Russia 2015-07-01 1 -Absolution_DVD 2015-06-01 1 -1,3-Beta-glucan_synthase 2017-05-01 440 -Dave_Sinardet 2016-04-01 26 -Adeline_Whitney 2018-03-01 10 -Allon_shvut 2016-07-01 3 -2012_Penn_State_Nittany_Lions_football_season 2017-12-01 3 -Coleman-Franklin-Cannon_Mill 2017-01-01 4 -Action_director 2015-05-01 93 -AD_547 2016-01-01 1 -Acta_germanica 2017-09-01 1 -Abu_Dhabi_Global_Market_Square 2017-01-01 35 -Kozo_Shioya 2017-01-01 7 -China_Investment_Corp 2017-01-01 2 -Dmitri_Zakharovich_Protopopov 2016-04-01 129 -Anatra_Anadis 2017-10-01 208 -Archaikum 2017-11-01 5 -2000_Webby_Awards 2017-04-01 360 -2003_BCR_Open_Romania_-_Singles 2016-08-01 2 -Abacetus_bisignatus 2016-09-01 79 -American_school_of_kinshasa 2016-01-01 1 -Anna,_7th_Duchess_of_Bedford 2016-08-01 8 -Black_majority_district 2016-11-01 3 -Dagma_Lahlum 2015-07-01 1 -Credit_Saison 2015-07-01 517 -Ariyankuppam_firka 2016-02-01 19 -Annette_Fuentes 2016-06-01 17 -Angerstein,_John 2015-12-01 2 -Annenkov_Island 2016-03-01 280 -Anne_Frank_museum 2016-06-01 67 -Annales_sancti_Amandi 2017-06-01 22 -L-FABP 2017-01-01 1 -Alvord,_TX 2017-06-01 12 -2006_World_Team_Table_Tennis_Championships 2016-05-01 119 -Angriffen 2015-12-01 9 -Anthony_Oppenheimer 2017-03-01 452 -Absamat_Masaliyevich_Masaliyev 2016-09-01 1 -Airborne_Museum_at_Aldershot 2016-03-01 41 -Aktiubinsk_Oblast 2015-08-01 7 -100_East_Wisconsin 2015-05-01 782 -7th_Bangladesh_National_Film_Awards 2017-08-01 91 -Alejandro_Reyes 2017-12-01 35 -Applied_philosophy 2018-03-01 539 -Adhemar_Pimenta 2016-06-01 146 -Break_the_fourth_wall 2016-04-01 66 -Annoushka_Ducas 2017-10-01 411 -ATC_code_J01CA01 2015-06-01 1 -Evelyn_County,_New_South_Wales 2016-11-01 7 -Elastic_scattering 2016-11-01 1374 -1032_Pafuri 2015-07-01 35 -Andrew_Bromwich 2015-08-01 26 -Ishita_Arun 2017-01-01 249 -Aspergics 2016-07-01 1 -1857_in_Chile 2018-03-01 22 -Breffni 2015-07-01 38 -845_in_poetry 2017-08-01 2 -20321_Lightdonovan 2015-10-01 12 -Arthur_Chandler 2017-12-01 27 -CsISOLatin2 2017-06-01 1 -1900_Grand_National 2016-06-01 69 -Aeritalia_AMX 2017-03-01 3 -B_Sharps 2015-06-01 11 -544_area_code 2015-09-01 2 -30th_Guldbagge_Awards 2015-06-01 37 -Agrippina 2017-08-01 315 -Ardmore 2016-02-01 433 -Amplypterus_panopus 2016-03-01 23 -Alexander_Bukharov 2017-09-01 5 -Alaska_Raceway_Park 2017-01-01 46 -Albanian_National_Road_Race_Championships 2017-03-01 31 -1968_Democratic_National_Convention_protest_activity 2017-10-01 2802 -2012_Birthday_Honours 2017-10-01 427 -2000_NHL_expansion_draft 2017-06-01 1 -A_Town_Where_You_Live 2016-11-01 2920 -Ahmed_Shahzad 2018-03-01 25 -Elisabeth_Svendsen 2016-11-01 39 -2002_FINA_Synchronised_Swimming_World_Cup 2016-08-01 30 -Akatek 2017-04-01 10 -Animation_with_DAZ_Studio 2018-02-01 78 -Fergus_Craig 2016-11-01 119 -Ancel_Nalau 2015-11-01 5 -5171_Augustesen 2017-04-01 20 -Anne_McGuire 2017-11-01 329 -Australian_Photoplay_Company 2015-12-01 6 -1913_in_Canada 2017-04-01 137 -Arhopala_allata 2015-05-01 26 -Il_Paradiso_delle_Signore 2017-01-01 31 -Geri_Palast 2017-01-01 38 -Alan_Abela_Wadge 2017-03-01 77 -22nd_Tactical_Air_Support_Squadron 2017-10-01 7 -Avant_Stellar 2017-06-01 22 -Black_phantom_tetra 2016-11-01 205 -Billy_McCaffrey 2017-01-01 314 -Annie_Furuhjelm 2017-11-01 97 -1992_PGA_Tour 2017-12-01 307 -2008_Chilean_pork_crisis 2016-01-01 55 -2012_Currie_Cup_First_Division 2018-02-01 32 -Aleksei_Fomkin 2015-05-01 144 -Alexander_Krausnick-Groh 2016-05-01 101 -Adam_Richard_Wiles 2017-08-01 5 -ATCvet_code_QA01AD01 2015-09-01 2 -Abu_Bakr_Ibn_Bajja 2017-03-01 5 -Architecture-Studio 2016-04-01 94 -950s_BC 2016-02-01 257 -Abschwunges 2017-07-01 1 -Adonis_Geroskipou 2017-06-01 15 -2008-09_SV_Werder_Bremen_season 2016-03-01 3 -Closed_loops 2016-04-01 1 -AFC_Youth_Championship_1982 2015-12-01 10 -Aquila_Shoes 2015-08-01 209 -9842_Funakoshi 2017-12-01 11 -Educational_quotient 2016-04-01 21 -Antoni_Julian_Nowowiejski 2018-01-01 211 -Adi_Oka_Idhile 2017-11-01 16 -DEXIA-BIL_Luxembourg_Open 2016-11-01 3 -Andrew_James_Simpson 2016-03-01 43 -Alexander_Boksenberg 2017-12-01 61 -1827_in_Denmark 2017-03-01 39 -Afternoon_tea_with_suggs 2017-11-01 3 -Alpha,_MN 2017-06-01 6 -Ari_Onasis 2015-06-01 4 -1961-62_Football_League_First_Division 2015-11-01 1 -Andi_Lila 2015-06-01 2847 -A_Gathering_Of_Old_Men 2018-02-01 1 -Abul_Fazl_al-Abbas 2017-01-01 1 -Asgill,_Charles 2017-08-01 1 -Alexander_Arkhangelsky 2015-07-01 12 -1947-48_Portuguese_Liga 2015-06-01 1 -3rd_MMC_-_Varna 2016-07-01 3 -Alberts,_Wayne 2017-05-01 3 -Alois_Schickelgruber 2018-02-01 9 -Hefner_Stadium 2017-01-01 2 -410912_Lisakaroline 2018-02-01 26 -Academy_at_Mountain_State 2018-03-01 1 -617_Squadron 2016-05-01 489 -Al_Silm_Haji_Hajjaj_Awwad_Al_Hajjaji 2015-07-01 5 -Arturo_Merino_Benitez_Airport 2017-10-01 13 -AEK_Athens_Futsal 2015-06-01 10 -Aggaeus 2018-02-01 2 -Association_for_Retarded_Citizens_of_the_United_States 2017-08-01 3 -Kielce_pogrom 2017-01-01 335 -1351_in_poetry 2016-01-01 17 -1923_Princeton_Tigers_football_team 2017-11-01 41 -Auzata_semipavonaria 2017-01-01 2 -892_in_poetry 2016-01-01 6 -Anton_Krotiak 2017-12-01 2 -Arthur_Shelley 2017-12-01 23 -2003_Kyoto_Purple_Sanga_season 2018-02-01 9 -Frederic_Bowker_Terrington_Carter 2016-04-01 6 -2-orthoplex 2016-03-01 1 -Acacia_australiana 2015-09-01 4 -2012_Newcastle_Knights_season 2016-06-01 103 -Ann_Wrights_Corner,_Virginia 2017-07-01 19 -12557_Caracol 2017-03-01 5 -2001_African_Footballer_of_the_Year 2017-05-01 1 -Bass_Pyramid 2017-01-01 22 -A_noodle 2015-05-01 5 -Aed_Bennan 2018-02-01 2 -1886_Yale_Bulldogs_football_team 2017-10-01 58 -2002_Players_Championship 2016-06-01 54 -African_Skimmer 2017-07-01 2 -3rd_Guldbagge_Awards 2016-12-01 39 -Arrows_A19B 2015-10-01 1 -Archduchess_Elisabetta_of_Austria-Este 2017-08-01 1526 -America_Islands 2015-11-01 1 -1932_Olympic_Games 2016-01-01 9 -2011_Chinese_pro-democracy_protests 2015-11-01 2044 -Bank_walkaway 2016-04-01 113 -594_in_Ireland 2017-04-01 1 -Association_of_Municipal_Corporations 2016-12-01 5 -Andreas_Brantelid 2015-09-01 167 -Amarthal_urf_Unchagaon 2017-05-01 82 -3-methoxymorphinan 2017-04-01 146 -2382_BC 2016-07-01 10 -1763_in_science 2016-07-01 28 -Arvert 2017-04-01 77 -Ale_yeast 2017-12-01 19 -A_Man_Without_a_Soul 2018-03-01 17 -Air_Force_Base_Louis_Trichardt 2017-09-01 1 -Athirson_Mazzoli_de_Oliveira 2017-06-01 3 -Anthony_Chan_Yau 2017-07-01 181 -Basic_Enlisted_Submarine_School 2017-06-01 392 -Aboriginal_Lands_of_Hawaiian_Ancestry 2015-09-01 11 -Fondren_Southwest,_Houston 2017-01-01 4 -3_World_Financial_Center 2017-07-01 64 -1971_IIHF_European_U19_Championship 2017-09-01 9 -1937-38_Allsvenskan 2015-12-01 6 -Christopher_Ashton_Kutcher 2017-06-01 2 -Australian_rules_football_in_south_australia 2016-12-01 1 -Amicable_pair 2018-01-01 7 -Alan_Tomes 2015-11-01 82 -Alexei_Petrovich,_Tsarevich_of_Russia 2015-12-01 3887 -Alexis_Damour 2015-10-01 66 -Bankruptcy_Act_of_1938 2017-06-01 76 -Amphiphyllum 2016-06-01 14 -Conway_High_School_West 2016-04-01 1 -5447_Lallement 2015-11-01 10 -Gabriel_Iddan 2017-01-01 1 -1879-80_Scottish_Cup 2017-04-01 3 -2011_Eneco_Tour 2016-10-01 31 -1471_in_England 2015-11-01 94 -Ashland_Town_Hall 2017-01-01 5 -Archduke_John 2015-05-01 20 -2000_Cameroonian_Premier_League 2017-09-01 18 -1997_flood 2017-11-01 5 -Agile_management 2015-09-01 26677 -Am_841 2017-12-01 3 -Apprentice_Mason 2018-01-01 4 -Hales-Jewett_theorem 2017-01-01 2 -Alien_Abductions 2017-10-01 14 -Arjun_Menon 2016-02-01 370 -Anthokyan 2016-01-01 4 -Automobili_Lamborghini 2016-02-01 1110 -Alain_Prost 2017-04-01 25196 -Fartein_Valen 2016-04-01 90 -Antonio_Galli_da_Bibiena 2016-05-01 5 -Al_Jawf,_Libya 2017-03-01 600 -AD_695 2018-02-01 1 -Amir_chand 2015-11-01 1 -Alcis_obliquisigna 2017-08-01 1 -Chandra_Talpade_Mohanty 2017-01-01 306 -Algerian_safe_house,_Jalalabad 2015-06-01 3 -Jake_Milner 2017-01-01 1 -Alternate_Communications_Center 2017-10-01 1 -In_the_Bleachers 2017-01-01 42 -Alex_Puodziukas 2016-04-01 7 -Altarpiece_of_Pilgrim_II 2018-02-01 2 -Cybernetical_Physics 2017-01-01 3 -Christopher_Unthank 2017-06-01 2 -1982_Independence_Bowl 2015-06-01 102 -Ascoli_Calcio_1898 2018-03-01 1115 -Briggs-Rauscher_reactions 2017-06-01 1 -Adjadja 2018-02-01 45 -Afghanistan_from_Ahmad_Shah_until_Dost_Mohammed 2016-06-01 3 -Catholic_social_doctrine 2017-01-01 6 -2833_BC 2016-11-01 1 -Bethy_Woodward 2016-04-01 38 -Bateman_polynomials 2017-06-01 22 -1966_Buenos_Aires_Grand_Prix 2015-10-01 19 -A_River_Somewhere 2015-10-01 353 -2016-17_BVIFA_National_Football_League 2017-04-01 2 -1909_Major_League_Baseball_season 2015-10-01 362 -1988_Oklahoma_Sooners_football 2017-11-01 2 -2010s_in_Chechen_fashion 2016-10-01 1 -Accademia_Olimpica 2017-08-01 17 -Air_cooling 2015-07-01 2010 -Amir_Saoud 2016-11-01 22 -Alex_Auburn 2015-05-01 52 -Apamea_impulsa 2016-11-01 6 -Australian_federal_election,_2007 2015-07-01 1794 -Ain_Sakhri 2017-10-01 76 -Belosaepiidae 2015-07-01 68 -Acts_of_Parliament_in_the_United_Kingdom 2017-10-01 4070 -Equity_Office 2016-11-01 202 -David_Bintley 2017-01-01 51 -Aksel_Schiotz 2018-03-01 3 -Appropriation_Act_2000 2017-05-01 12 -Edward_Johnson_III 2016-11-01 491 -2006_Ohio_State_Buckeyes_football_team 2016-03-01 1452 -Battle_of_Fort_Beausejour 2015-07-01 97 -Abel_Foullon 2015-12-01 82 -Apollo_VIII 2015-10-01 19 -Carry_on_up_the_jungle 2015-07-01 8 -Armour_villa 2017-05-01 4 -201_Poplar 2015-08-01 265 -Arta_prefecture 2016-08-01 1 -2015-16_Ekstraklasa 2018-02-01 13 -Alport,_Ontario 2018-02-01 2 -Bongoland 2017-06-01 62 -Alfred_Charles_Post 2016-11-01 11 -Aam_Aadmi_Party_crisis 2016-10-01 1 -Andrea_Moda 2015-07-01 143 -Abdul_Halim_Sharar 2017-08-01 545 -Apostolic_Vicariate_of_Yunnan 2016-12-01 1 -Catherine_Steadman 2016-11-01 5218 -Agastachys_odorata 2015-10-01 38 -9783_Tensho-kan 2016-03-01 2 -AFL_Cairns 2017-10-01 337 -Abomey 2015-06-01 1062 -Anne_Crofton,_1st_Baroness_Crofton 2015-12-01 42 -Cash-flow_return_on_investment 2017-01-01 137 -Alberto_Arvelo_Torrealba_Municipality 2015-08-01 56 -Abyssinian_Shorthorned_Zebu 2017-09-01 124 -Albanian_hip_hop 2016-01-01 1812 -Alphonso_IV_of_Portugal 2016-02-01 12 -19th_The_Alberta_Mounted_Rifles 2016-10-01 1 -Chinese_shadow_theatre 2016-04-01 1 -American_Committee_of_the_Fourth_International 2017-08-01 4 -2014_Bahrain_GP2_Series_round 2016-03-01 80 -Alexandrian_orthodox 2017-09-01 2 -2010_Hurricane_Season 2015-05-01 18 -1938_All-Ireland_Senior_Camogie_Championship_Final 2017-01-01 1 -ATC_code_D01 2018-01-01 203 -Albedo 2015-08-01 23484 -Chavigny,_Meurthe-et-Moselle 2017-01-01 12 -Becky_Essex 2015-07-01 51 -Archaeological_Museum_Padre_Le_Paige 2018-02-01 2 -Abu_Bakar_Sillah 2017-01-01 5 -Back_chat 2017-01-01 2 -Anchylobela_dyseimata 2015-12-01 11 -Anthony_Overton 2017-03-01 261 -Bear_maul 2016-04-01 3 -Ambarawa,_Central_Java 2016-01-01 1 -Amber_lager 2016-11-01 87 -2nd_LAAD 2017-09-01 8 -Ashiya,_Hyogo 2018-03-01 24 -Angels_at_Risk 2018-02-01 74 -Audrey_Marie_Munson 2016-03-01 17 -1984_Australian_Football_Championships 2017-01-01 27 -Ammonia_fountain 2016-06-01 434 -Allister_Bentley 2018-03-01 11 -Alsager_Hay_Hill 2016-10-01 72 -1753_English_cricket_season 2015-05-01 51 -2009-10_New_Jersey_Devils_season 2016-10-01 1 -An_Untamed_State 2016-05-01 1109 -Beatrice_Carmichael 2016-11-01 5 -Abdul_Ghani_Ahmad 2017-12-01 115 -Arteria_suralis 2017-02-01 3 -Berzasca_River 2017-01-01 1 -Angel_Attack 2015-09-01 98 -1969_San_Francisco_49ers_football_team 2017-11-01 1 -Anthony_Beilenson 2017-09-01 114 -Crystalline_Entity 2016-04-01 180 -Granice 2017-01-01 2 -203rd_General_Hospital 2017-07-01 44 -Acrocercops_rhombiferellum 2017-12-01 20 -Ampliglossum_blanchetii 2017-05-01 1 -11553_Scheria 2017-03-01 2 -Ashkenozi 2017-02-01 1 -2010_Calder_Cup_Playoffs 2018-01-01 9 -Alice_Caymmi 2016-01-01 121 -Alfredo_Alvar 2017-04-01 44 -2006_Legends_Tour 2017-07-01 30 -Albano_Albanese 2015-10-01 53 -1943_Frankford_Junction_train_wreck 2016-08-01 510 -Evans_Court_Apartment_Building 2016-04-01 4 -Abu_al-Rayhan_Muhammad_ibn_al-Biruni 2017-11-01 1 -Abubakar_Muhammad_Rimi 2015-05-01 4 -Dostpur 2016-11-01 26 -Accessories_Council_Excellence_Awards 2016-03-01 14 -2006_North_American_heat_wave 2015-06-01 1161 -Amstelodamum 2017-09-01 12 -A_Very_Peculiar_Practice 2016-08-01 1860 -Allegorie_der_Liebe 2015-09-01 1 -Alex_Mackie 2017-02-01 95 -1812_Homestead_Farm_and_Museum 2017-09-01 29 -Argus_distribution 2016-03-01 8 -Anthony_Thomas_Stover 2017-02-01 1 -Arthur_Shallcross 2016-11-01 20 -Antoine_Francois_Fourcroy 2018-01-01 1 -Abbas_Halim 2016-11-01 21 -Akiva_Baer_ben_Joseph 2017-08-01 1 -Balatonfuered 2016-11-01 3 -Antemnae 2017-11-01 204 -Cling_Cling 2017-06-01 93 -B_flat_major 2017-01-01 28 -AirExplore 2017-12-01 930 -Auckland_Super_Sprint 2015-11-01 120 -Alfredo_De_Gasperis 2017-12-01 793 -Geoffrey_I_of_Vianden 2017-01-01 5 -Copa_de_Zaachila 2016-04-01 6 -Alboacen 2017-09-01 1 -BNH_Hospital_Bangkok 2017-06-01 2 -Agricultural_health_and_safety 2016-09-01 1 -Chiasms 2017-06-01 2 -Al_Karaana 2016-05-01 58 -Alberta_Highway_872 2016-11-01 1 -Among_the_mourners 2016-03-01 1 -Achema_Power_Plant 2015-06-01 55 -ATSE_Graz 2017-10-01 65 -Arthroscopy 2017-02-01 11721 -2010-2012_European_Nations_Cup_Second_Division 2018-01-01 7 -1967_Cincinnati_Reds 2015-08-01 4 -24th_Golden_Disc_Awards 2017-05-01 71 -Johnny_Floyd 2017-01-01 17 -Arthur_Rupin 2016-02-01 5 -Alpine_skiing_at_the_2011_Canada_Winter_Games 2015-09-01 38 -College_Press_Service 2017-01-01 8 -American_Psycho 2015-08-01 55567 -CBC_Winnipeg 2017-06-01 17 -Burning_the_process 2016-04-01 1 -2011_Stanley_Cup_playoffs 2017-01-01 1036 -Andrew_Mumford 2017-01-01 6 -1925_in_fine_arts_of_the_Soviet_Union 2018-02-01 28 -Aragvi_river 2017-02-01 2 -Andrew_Adamson 2018-03-01 16269 -Arcides_fulvohirta 2016-10-01 1 -Araya_Selassie_Yohannes 2015-11-01 423 -Apartment_house 2016-09-01 85 -Advanced_Art 2015-12-01 171 -1028_Lydina 2015-06-01 53 -2005_July_6_United_Nations_assault_on_Cite_Soleil,_Haiti 2017-04-01 2 -Adolph_Weiss 2015-06-01 98 -Adam_Jerzy_Czartoryski 2015-09-01 1237 -1980_United_States_presidential_election 2017-05-01 56 -1956_Oscars 2016-08-01 10 -Burundian_Senate_election,_2005 2016-04-01 1 -Amarolea_floridana 2015-07-01 3 -August_Bier 2015-12-01 514 -Arbelodes_sebelensis 2018-03-01 6 -Abiah_Brown 2018-02-01 1 -A_Maceo_Smith_High_School 2016-10-01 2 -1488_in_architecture 2017-12-01 6 -2009_AMP_Energy_500 2016-04-01 45 -1921_Baylor_Bears_football_team 2017-03-01 21 -Dmitry_Akhba 2015-07-01 43 -2004_Big_12_Conference_Baseball_Tournament 2016-07-01 37 -Abdisalam_Omer 2018-02-01 116 -Alma,_son_of_Alma 2015-08-01 53 -An_Phoblacht 2016-10-01 962 -2009_Turner_Prize 2016-01-01 75 -Jack_Zajac 2017-01-01 24 -1906_Wimbledon_Championships 2016-04-01 22 -Chuckwalla_Valley 2017-06-01 22 -Alien_Quadrology 2016-02-01 1 -Chalcidoptera_contraria 2016-04-01 1 -Alaska_Republican_Gubernatorial_Primary_Election,_2006 2016-02-01 1 -333639_Yaima 2018-02-01 7 -Aquila_hastata 2015-11-01 28 -Al-Fua 2017-07-01 1 -Anihilation 2015-07-01 28 -International_Toy_Fair 2017-01-01 1 -38th_Regiment_Indiana_Infantry 2017-01-01 10 -Andrea_Stella 2017-07-01 75 -Anselmo_de_Moraes 2015-09-01 562 -Applemore 2016-05-01 3 -Akpinar,_Kirsehir 2015-06-01 3 -Ant_nest 2016-05-01 53 -Catherine_of_Siena 2016-11-01 8806 -Barbos 2015-06-01 12 -Amlaib_mac_Iduilb 2017-08-01 2 -Alice_Janowski 2018-03-01 17 -Acacia_leptocarpa 2017-03-01 48 -Al-Hadi_Yahya 2016-01-01 39 -2015_British_Figure_Skating_Championships 2017-07-01 38 -Avenues_Television 2016-03-01 214 -Dendropsophus_sartori 2015-07-01 11 -1952_in_Germany 2015-05-01 63 -Armuchee_High_School 2016-04-01 27 -April_1_RFC 2017-11-01 2 -Caroline_Bliss 2016-11-01 972 -66th_Rice_Bowl 2016-06-01 17 -Alec_Smight 2017-02-01 173 -Alexei_Panin 2017-09-01 3 -Codeword 2016-04-01 84 -Dormice 2015-07-01 63 -2105_BC 2017-11-01 6 -5th_National_Congress_of_Kuomintang 2016-06-01 5 -Caminho_das_Indias 2017-01-01 5 -Agerbo 2017-11-01 2 -Abe_Anellis 2018-01-01 86 -Aceh_Medal 2015-07-01 33 -Alltech_Arena 2016-10-01 144 -Aly_Oury 2016-06-01 260 -757th_Troop_Carrier_Squadron 2017-07-01 2 -Alec_Peters 2017-12-01 2731 -Agua_Buena_Airport 2017-09-01 12 -Alessandro_Livi 2016-08-01 104 -Andkaer 2017-04-01 3 -Cateran 2017-06-01 135 -57th_Venice_International_Film_Festival 2017-04-01 180 -Brijal_Patel 2017-06-01 98 -Cnemaspis_jerdonii 2015-07-01 6 -Aluminum_sodium_salt 2016-10-01 3 -Arnaldo_Antonio_Sanabria_Ayala 2017-09-01 4 -Angels_of_Iron 2018-02-01 83 -Bugs_Bunny_Rabbit_Rampage 2017-06-01 422 -Admiralty_Class_Destroyer 2017-10-01 2 -Atlas_Media 2017-05-01 2 -Arcesilaus_i_of_cyrene 2017-03-01 1 -2011_Tajikistan_national_football_team_results 2017-04-01 13 -Artur_Shakhnazarov 2017-12-01 22 -747_Express_Bus 2018-03-01 20 -101-in-1_Party_Megamix 2017-10-01 188 -Fastpoint_Games 2016-11-01 32 -Analog_Anthology_1 2017-07-01 1 -Archival_bond 2015-09-01 119 -1985_Air_Force_Falcons_football 2017-09-01 4 -American_Airlines_plane_diverted_to_Miami_after_landing_gear_problem 2017-06-01 3 -Adaptive_Evolution_in_the_Human_Genome 2017-08-01 2 -Arthur_Strangways 2015-11-01 5 -1583_in_poetry 2015-09-01 68 -Andrew_igoudala 2015-06-01 2 -Euonychophora 2016-11-01 37 -Catechizing 2016-04-01 4 -1960-61_ice_hockey_Bundesliga_season 2018-03-01 3 -Buk_Vlaka 2017-06-01 10 -Arbor_Day 2018-03-01 16265 -Guan_Sheng 2017-01-01 73 -2014_Barcelona_Open_Banc_Sabadell 2016-08-01 57 -1976-77_Nationalliga_A 2016-04-01 1 -AFL_records 2015-11-01 16 -2005_Tour_Down_Under 2016-10-01 26 -92_BCE 2015-08-01 4 -Bento_Box_Animation 2017-01-01 1 -Alabama_Territory 2018-03-01 1195 -Abdul-Wasa_Al-Saqqaf 2016-07-01 21 -Archbishops_of_Semarang 2017-01-01 6 -Ambivina 2017-10-01 13 -Aghjaghala_Ulia 2017-08-01 2 -Blechnum_novae-zelandiae 2016-11-01 26 -Dictyosome 2016-04-01 19 -Arts_Council_of_Great_Britain 2016-12-01 785 -LBC_Radio 2017-01-01 3 -Ageo,_Saitama 2016-06-01 396 -Babla_Mehta 2016-12-01 674 -2012-13_Russian_Cup 2018-01-01 10 -Chandragupt 2017-06-01 6 -407th_Air_Refueling_Squadron 2016-01-01 96 -Aftermarket 2016-07-01 1253 -A_Portrait_of_New_Orleans 2016-08-01 18 -2000-01_Yemeni_League 2017-03-01 1 -Actinidia_chinensis 2015-11-01 907 -Amsterdam_Tournament_1999 2018-03-01 1 -Arthur_Iberall 2017-02-01 112 -Auricula_Meretricula 2016-02-01 103 -Archbishop_of_Lahore 2016-09-01 8 -Chippewa_Indians_of_Montana 2016-04-01 9 -Abidjan-Niger_Railway 2018-01-01 22 -29th_Annual_Grammy_Awards 2017-05-01 1087 -Ateles_geoffroyi_frontatus 2017-06-01 3 -Enrico_Cernuschi 2016-11-01 3 -A4183_road 2017-02-01 8 -Ahrayut 2016-10-01 75 -Alison_Castle 2016-03-01 55 -Automobile_aftermarket 2016-10-01 5 -2008_GAINSCO_Auto_Insurance_Indy_300 2016-07-01 51 -1937_Scottish_Cup_Final 2017-04-01 126 -2005_Clipsal_500_Adelaide 2018-02-01 22 -Farid_Farjad 2016-04-01 120 -13_Tribes_of_Long_Island 2015-12-01 11 -Afroneta_bamilekei 2017-01-01 2 -Frederick_Stuart_Greene 2017-01-01 1 -Andre_Braugher 2017-04-01 37655 -1906_International_Lawn_Tennis_Challenge 2017-10-01 73 -2009-10_NFL_Playoffs 2016-01-01 69 -Cricket_Wellington 2016-11-01 2 -Craig_Blazer 2015-07-01 21 -Aeolidiella_orientalis 2017-05-01 3 -Andre_Prokovsky 2017-06-01 4 -Angela_McKee 2017-11-01 14 -Airbase_Golubovci 2016-10-01 1 -2011_ISAF_Sailing_World_Championships 2017-05-01 89 -Bartica_Airport 2017-06-01 27 -Agusan_Dam 2016-09-01 454 -Bosque_Real_Country_Club 2015-07-01 42 -Georges_Duhamel 2017-01-01 122 -Allrounder 2017-03-01 63 -2017_Missouri_State_Bears_football_team 2017-09-01 868 -Allons_a_Lafayette 2017-11-01 17 -Agathla 2015-05-01 105 -1086_in_poetry 2015-09-01 25 -Absolute_extreme 2017-09-01 1 -Agathe_Bonitzer 2017-12-01 229 -Chinese_Red_Pine 2017-06-01 18 -Angular_dispersion 2016-02-01 11 -Jean-Sebastian_Giguere 2017-01-01 2 -Actinium-235 2018-03-01 4 -Ago,_filo_e_nodo 2017-02-01 11 -Aranea_cruentata 2016-03-01 1 -2009_Korea_National_League 2017-11-01 19 -Americom-8 2016-08-01 28 -2006_Junee_Bushfire 2018-03-01 81 -2013_Major_League_Baseball_Home_Run_Derby 2017-09-01 182 -1928_US_Presidential_Election 2016-12-01 42 -After-eighty_generation 2016-02-01 127 -1932_Hawthorn_Football_Club_season 2017-07-01 16 -Amelia_Elizabeth_Mary_Rygate 2017-05-01 2 -Aline_Khalaf 2017-12-01 465 -Akron_Junction,_New_York 2017-07-01 56 -Apollo_moon_landing_conspiracy_theories 2015-09-01 4 -1978_National_League_Championship_Series 2017-03-01 325 -1959-60_German_football_championship 2017-08-01 5 -Almost_a_Bride 2017-01-01 1 -Andrew_Lysaght,_junior 2015-10-01 20 -1902_Otani_expedition 2018-02-01 1 -1892_Currie_Cup 2016-09-01 53 -1988_USC_Trojans_football_team 2016-10-01 494 -1944_in_Northern_Ireland 2016-12-01 46 -Alfred_Acherman 2017-07-01 1 -Arcadia,_Nebraska 2017-02-01 148 -4_x_400_metre_relay 2018-03-01 1 -A4030_road 2016-07-01 1 -Chi-li 2016-11-01 3 -Aircraft_fairing 2016-11-01 1861 -Buddhism_in_Belize 2015-07-01 40 -Alameda_County_Open 2017-02-01 33 -Area_of_countries_and_regions_of_the_United_Kingdom 2017-10-01 6 -2014_Weber_State_Wildcats_football_team 2016-10-01 47 -American_Journal_of_Comparative_Law 2016-04-01 62 -A_Teaspoon_Every_Four_Hours 2017-03-01 47 -Astasis 2016-03-01 1195 -Akhrakouaeronon 2015-11-01 62 -Annenkrone 2016-03-01 40 -Ballotine 2016-12-01 4753 -2000_Kipawa_earthquake 2015-11-01 139 -Archdiocese_of_cashel_and_emly 2017-01-01 1 -Chevrolet_SS396 2017-01-01 1 -Achyroseris 2016-03-01 1 -Daniel_Pulteney 2016-11-01 29 -2006_Major_League_Baseball_draft 2017-07-01 10637 -Adetunji_Idowu_Olurin 2016-01-01 37 -Ardatov,_Nizhny_Novgorod_Oblast 2017-04-01 18 -Andrew_Hilditch 2015-08-01 398 -A_Very_Merry_Daughter_Of_the_Bride 2017-04-01 67 -1993_in_radio 2017-08-01 85 -Deltan 2016-11-01 91 -Adnan_Custovic 2017-12-01 26 -Di_Gennaro 2017-01-01 4 -237_AD 2017-11-01 1 -Aaron_Gombar 2018-03-01 2 -Acrolophus 2017-04-01 47 -Alfred_Bergman 2017-06-01 27 -Charles_Bebb 2017-06-01 39 -Dirico 2017-01-01 24 -1982_Major_League_Baseball_Draft 2016-12-01 90 -DDT_wrestling 2016-11-01 4 -1988-89_Houston_Rockets_season 2016-02-01 10 -Acacia_loderi 2015-11-01 35 -2015_Deauville_American_Film_Festival 2016-10-01 126 -Andropadus_importunus 2016-02-01 9 -Antonio_Bacchetti 2017-04-01 52 -Ann_Trindade 2015-09-01 49 -5_x_Monk_5_x_Lacy 2016-05-01 37 -Barlochan,_Ontario 2017-06-01 2 -Achaian 2017-03-01 35 -Flow_rider 2017-01-01 1 -Antiblemma_discerpta 2018-02-01 1 -1997_Illinois_Fighting_Illini_football_team 2017-11-01 331 -Ahrntal 2016-03-01 540 -Apollo_Conference 2015-10-01 329 -Algenib_in_Perseus 2016-01-01 1 -Craig_Norgate 2016-04-01 42 -Antwerp_Zoo 2015-12-01 879 -Cold_Contagious 2017-06-01 161 -Bolito 2016-11-01 181 -Chinese_bridges 2016-11-01 1 -14th_Indiana_Infantry_Regiment 2017-04-01 115 -Bindunuwewa_massacre 2015-07-01 52 -Eastshore_Highway 2016-11-01 2 -Daemonologie 2017-01-01 1655 -Aero_Pacifico 2015-07-01 1 -Blue_Ribbon_Schools_Program 2017-06-01 557 -Ash_Township,_MI 2018-02-01 3 -Al-Hatab_Square 2018-02-01 450 -Alje_Vennema 2018-02-01 187 -1920_All-Ireland_Senior_Football_Championship_Final 2016-05-01 40 -Criss_Oliva 2016-11-01 801 -Bethlehem,_Ohio 2017-01-01 16 -1976_WHA_Amateur_Draft 2015-08-01 47 -Angela_Fimmano 2017-06-01 17 -Alexander_Bonini_of_Alexandria 2017-09-01 1 -Anarchist_faq 2015-05-01 13 -Aleksander_Benedykt_Sobieski 2016-05-01 240 -Cape_Florida_Lighthouse 2016-04-01 6 -Fernando_VI_of_Spain 2017-01-01 3 -Crossing_number 2017-06-01 29 -1984_NSL_Cup 2017-05-01 26 -Barbara_Weldon 2015-06-01 29 -Andreas_Olsen 2017-01-01 32 -Battle_of_Baima 2016-04-01 2 -Amory_Hansen 2016-05-01 26 -Akhmimic 2015-11-01 41 -Al_Awda 2018-02-01 18 -Adelheid-Marie_of_Anhalt-Dessau 2016-07-01 70 -Americans_for_Technology_Leadership 2015-10-01 90 -Belizean_diplomatic_missions 2017-06-01 3 -African_communist 2016-05-01 3 -Andosol 2016-09-01 246 -Alan_Attraction 2016-05-01 15 -A_Yank_in_Rome 2015-12-01 70 -2004_in_the_United_Arab_Emirates 2018-02-01 33 -Additionality 2017-06-01 371 -Assassination_of_Trotsky 2015-06-01 47 -Alice_Sotero 2018-02-01 27 -Agyneta_platnicki 2016-04-01 4 -Alexandra_Vasilyevna_Velyaminova 2015-07-01 30 -1881_in_Chile 2016-06-01 16 -Arterial_ischemic_stroke 2018-01-01 57 -Astro_Glacier 2015-09-01 27 -Chester_Earl_Merrow 2017-06-01 58 -Alejandro_de_la_Madrid 2015-11-01 1630 -70936_Kamen 2017-08-01 1 -AK_Steel_Holding_Corp 2015-08-01 8 -1124_Stroobantia 2017-10-01 23 -Asian_Wedding 2016-10-01 15 -23837_Matthewnanni 2015-10-01 18 -Acharya_Jagadish_Chandra_Bose_Indian_Botanic_Garden 2017-03-01 4893 -Betsy_Hodges 2016-04-01 560 -Arthur_and_the_Invisibles 2015-08-01 14924 -Arkansas-Ole_Miss_football_rivalry 2015-05-01 7 -Asia_Cup 2015-09-01 5938 -Arginine_racemase 2016-12-01 15 -585th_Field_Company,_Royal_Engineers 2018-03-01 1 -1975_Stagg_Bowl 2017-08-01 6 -Dame_Commander_of_The_Most_Honourable_Order_of_the_Bath 2017-01-01 1 -Askajian 2016-02-01 26 -2006_Nebraska_Cornhuskers_football_team 2015-08-01 975 -Cicero_Francis_Lowe_House 2015-07-01 10 -Conan_IV,_Duke_of_Brittany 2016-11-01 252 -2005_World_Modern_Pentathlon_Championships 2016-07-01 38 -1946_Aleutian_Islands_earthquake 2017-03-01 2019 -ANKRD17 2017-09-01 19 -1970_Maryland_Terrapins_football_team 2017-11-01 42 -Ali_Dehkhoda 2017-04-01 1 -1244_in_art 2015-07-01 22 -1520s_in_Denmark 2016-01-01 20 -Abdoulaye_Gaye 2017-12-01 10 -An_Angel_Has_Arrived 2016-03-01 36 -1453_BC 2015-08-01 26 -2017_National_Games_of_China 2017-05-01 1293 -A_Night_in_Sickbay 2016-05-01 251 -Dateline_Diamonds 2017-01-01 53 -419_guestbook_spamming 2016-02-01 5 -Familiar_bluet 2017-01-01 4 -Abu_Bakr_Mirza 2017-10-01 86 -7272_Darbydyar 2017-11-01 4 -Ages_of_consent_in_Latin_America 2017-03-01 961 -1982_Japan_Soccer_League_Cup 2016-04-01 14 -2810_BC 2015-07-01 9 -Druga_Liga_Republike_Srpske 2017-01-01 1 -1998_Swedish_Rally 2017-09-01 34 -1567_in_Norway 2015-10-01 89 -126_Army_Engineer_Regiment,_Royal_Engineers 2016-03-01 5 -2017_American_League_Wild_Card_Game 2017-10-01 25120 -August_Follen 2017-01-01 2 -Ala_Gertner 2015-11-01 876 -Glenwood,_Harford_County,_Maryland 2017-01-01 3 -Applied_ecology 2017-12-01 730 -Ariarathes_V_Eusebes_Philopator 2018-03-01 5 -2006_AFC_Champions_League 2017-09-01 947 -60_minutes_2 2016-10-01 2 -Embryonic_shield 2017-01-01 2 -2001_Meath_Intermediate_Football_Championship 2015-11-01 8 -Apparition_of_Christ_to_Madonna 2017-06-01 5 -Hoosier_Road_Elementary 2017-01-01 1 -Arua_Uda 2016-12-01 29 -Array_comprehension 2015-11-01 8 -Baszki 2015-06-01 36 -Akron_Neighborhoods 2016-01-01 4 -Catholic_Church_in_Costa_Rica 2017-06-01 85 -Canada-Sweden_relations 2015-07-01 1 -Barza_Radio_Community 2016-11-01 6 -Dalhousie_Middle_School 2016-11-01 5 -Alliphis_bakeri 2017-11-01 2 -Bartica_massacre 2017-06-01 53 -30th_January 2015-11-01 10 -1920_revolution 2017-05-01 5 -Amyraldism 2017-08-01 828 -AA_Jefferson_District 2016-05-01 45 -Eunebristis_cinclidias 2017-01-01 1 -A_Scott_Connelly 2017-06-01 5 -Antony_Durose 2016-07-01 19 -Arval_Brethren 2017-11-01 579 -Anthidium_dissectum 2017-05-01 2 -Aru,_Democratic_Republic_of_the_Congo 2017-04-01 81 -1956-57_West_Indian_cricket_season 2017-04-01 2 -2014_Moscow_Film_Festival 2017-08-01 2 -Anna_Gurji 2017-06-01 27 -Allen_Memorial_Medical_Library 2016-07-01 120 -Anton_Sistermans 2017-02-01 36 -Clotheshorses 2017-06-01 1 -36_Stratagems 2017-08-01 25 -Attack_of_the_crab_monsters 2016-10-01 16 -30_rock_awards 2015-09-01 2 -Aeroflot,_Uralsk_Civil_Aviation_Directorate 2017-08-01 2 -Amblyseius_parabufortus 2017-06-01 3 -Indian_coral_tree 2017-01-01 3 -3285_Ruth_Wolfe 2016-02-01 9 -Anderson_da_Silva_Gibin 2016-08-01 73 -5001st_Composite_Group 2017-03-01 4 -Danzik 2016-04-01 8 -4810_Ruslanova 2016-03-01 2 -Arkendale,_Virginia 2016-04-01 14 -Al_Francis_Bichara 2016-09-01 239 -Cayena 2017-01-01 1 -A_Glass_of_Darkness 2017-04-01 95 -GMC_CCKW 2017-01-01 887 -Alabama_State_Route_107 2015-11-01 13 -2011_in_motorsport 2017-12-01 26 -Adecco_General_Staffing,_New_Zealand 2017-12-01 86 -Anbargah 2015-10-01 6 -1995_Asian_Cup_Winners_Cup 2016-06-01 7 -1986_Wales_rugby_union_tour_of_the_South_Pacific 2016-12-01 30 -Adya_Goud_Brahmin 2017-03-01 2 -Akcakiraz 2015-08-01 5 -24249_Bobbiolson 2017-12-01 4 -Ahmanson_Theatre 2016-02-01 801 -Abdullah_ibn_Jahsh 2016-10-01 196 -1937_in_Chile 2015-08-01 24 -2000_in_England 2016-01-01 57 -A_Deepness_In_The_Sky 2017-08-01 2 -Area_code_678 2015-07-01 480 -Avalon_Hill 2017-01-01 880 -Anna,_Duchess_of_Prussia 2015-12-01 315 -Alexandr_Syman 2017-04-01 24 -7400_series_logic 2017-11-01 2 -Greenleaf_Township,_Minnesota 2017-01-01 1 -Acetylsal 2017-04-01 6 -Earth_and_Man_National_Museum 2016-11-01 43 -Affetside 2015-10-01 185 -1971_CFL_season 2015-08-01 202 -Beth_Bader 2016-11-01 21 -Enrolled_Nurse 2016-04-01 5 -Al-Azraq 2016-12-01 22 -4th_South_Carolina_Regiment 2015-07-01 42 -Amanda_Overmyer 2017-02-01 356 -Auto_wrap 2016-02-01 8 -Anonymous_internet_banking 2015-07-01 98 -Curatoria 2016-11-01 3 -A-roll 2016-05-01 134 -Accra_hearts_of_oak_sc 2017-10-01 4 -Apostasy_from_Judaism 2015-12-01 45 -Acantharctia_tenebrosa 2018-01-01 3 -Abigail_Keasey_Frankel 2017-11-01 25 -2008_Paraguayan_general_election 2016-01-01 1 -Adams_motor 2015-09-01 37 -Drummond_Community_High_School 2017-01-01 17 -Andrews_Nakahara 2017-10-01 474 -10th_Maccabiah 2017-04-01 30 -Ackerman,_Rick 2015-08-01 4 -Dumri,_Buxar 2016-11-01 35 -Asking_Jesus_into_your_heart 2016-09-01 1 -Adamowicz_brothers 2016-12-01 161 -Alien_Musibat 2017-12-01 2 -Ahmad_Al_Tayer 2016-04-01 39 -Analytical_phonics 2016-01-01 520 -Do_It_Good 2016-04-01 281 -2004_Kumbakonam_School_fire 2017-12-01 2114 -1977_Chattanooga_Mocs_football_team 2016-08-01 3 -Globe_valves 2017-01-01 11 -Abelmoschus_crinitus 2016-04-01 18 -1874_Yale_Bulldogs_football_team 2016-02-01 37 -Climer 2017-06-01 1 -Auchroisk 2017-06-01 37 -2010_Albirex_Niigata_season 2016-10-01 19 -Adhocracy 2017-06-01 2217 -Chios_Massacre 2015-07-01 1110 -African_Red_Slip 2017-02-01 221 -1976_Portland_Timbers_season 2016-07-01 41 -Alsace-Larraine 2015-09-01 2 -3750_Ilizarov 2017-07-01 12 -Aleksandr_Shkaev 2017-05-01 1 -32_bar_form 2016-01-01 12 -Aequatorium_jamesonii 2018-03-01 14 -Abade_neiva 2016-09-01 2 -Arakvaz 2016-08-01 23 -207_Sqn 2017-10-01 2 -Ducal_hat 2016-11-01 10 -2_Degrees 2017-03-01 19 -Ahmeddiyya_Islam 2016-03-01 4 -Amidi-ye_Kohneh 2017-11-01 13 -Contributions_to_Indian_Sociology 2016-11-01 42 -Clark_Leiblee 2016-04-01 5 -Abraham_of_Strathearn 2017-09-01 14 -Bangor_City_Forest 2015-07-01 34 -Alireza_Afzal 2017-02-01 24 -Akhaura-Laksam-Chittagong_Line 2015-09-01 30 -1973_National_500 2017-10-01 80 -Attachment 2017-09-01 1356 -Kellett_Strait 2017-01-01 5 -Ajarani_River 2018-01-01 30 -Akbarabad,_Khomeyn 2017-03-01 8 -Adriaan_Theodoor_Peperzak 2018-02-01 88 -Alucita_dryogramma 2015-09-01 1 -Brit_Med_J 2015-07-01 1 -4th_Metro_Manila_Film_Festival 2015-09-01 80 -Alialujah_Choir 2018-03-01 221 -1953-54_SM-sarja_season 2016-09-01 1 -Air_Force_Song 2018-02-01 19 -4-6_duoprism 2016-03-01 30 -Ashley_Spurlin 2017-06-01 94 -Asfaq_Kayani 2017-10-01 1 -1607_in_architecture 2016-06-01 7 -4-way_speakers 2015-10-01 2 -Blue_Heeler 2015-07-01 149 -5_Euro 2017-04-01 16 -2009_Spa-Francorchamps_GP2_Series_round 2016-04-01 12 -2015_Guru_Granth_Sahib_desecration 2015-11-01 6821 -Agriculture_Marketing_Service 2016-07-01 2 -2006_Football_League_Cup_Final 2015-11-01 1711 -2008_Uber_Cup_group_stage 2016-02-01 40 -1923_PGA_Championship 2016-08-01 97 -Fannie_Bay 2016-04-01 6 -AlchemyAPI 2016-04-01 344 -Cinema_of_Italy 2017-01-01 1217 -Arodes 2016-11-01 36 -Damien_Marley 2015-07-01 168 -Al_Jumayl_Baladiyat 2015-08-01 5 -2015_Alabama_State_Hornets_football_team 2017-06-01 32 -Aglossa_tanya 2016-03-01 1 -73rd_Pennsylvania_Infantry 2017-01-01 12 -2015_European_Junior_and_U23_Canoe_Slalom_Championships 2018-02-01 31 -African_Leopard 2016-08-01 64 -Faverolles,_Orne 2017-01-01 5 -Aaron_Fukuhara 2015-11-01 17 -Annular_ligaments_of_trachea 2017-01-01 31 -2014_US_Open_Series 2016-11-01 35 -A_Better_Mousetrap 2018-02-01 4 -Dibaklu 2016-11-01 1 -At_Samat_District 2015-06-01 35 -Aaron_Peasley 2017-05-01 32 -Apistomology 2015-12-01 2 -Buyat_Bay 2015-07-01 54 -1942_Estonian_Football_Championship 2017-05-01 22 -Action_for_Autism 2016-06-01 346 -100_Hz 2015-06-01 72 -2003_Arizona_State_Sun_Devils_football_team 2017-05-01 82 -Antona_obscura 2016-09-01 1 -Akiko_Sugiyama 2015-12-01 32 -Elysburg 2016-11-01 8 -2017_New_South_Wales_Cup 2017-09-01 38 -2011-12_Gold_Coast_United_FC_season 2017-06-01 1 -Agency_for_the_Prohibition_of_Nuclear_Weapons_in_Latin_America_and_the_Caribbean 2016-04-01 15 -Albert_Dunn 2017-08-01 87 -Hahamakin_ang_Lahat 2017-01-01 984 -2013_Spuyten_Duyvil_derailment 2017-11-01 5 -Ayling 2017-01-01 5 -Anti-Establishment 2016-10-01 1 -1951_Spanish_motorcycle_Grand_Prix 2018-01-01 48 -2009-10_Brunei_Premier_League 2017-08-01 4 -23_Ursae_Majoris 2016-08-01 90 -1927-28_Austrian_football_championship 2017-08-01 4 -Andrew_McKeever 2017-10-01 3 -Clinocottus 2017-06-01 23 -2006_State_of_Origin 2015-11-01 7 -2013-14_Los_Angeles_Clippers_season 2015-07-01 8 -Cor_Jesu 2017-01-01 1 -Besseringen_B-Werk 2017-06-01 158 -Amy_Hempel 2017-07-01 1091 -Franc-Comtois 2016-04-01 2 -Allium_giganteum 2017-07-01 1103 -Abishai 2016-08-01 56 -Abraham_Clark_High_School 2016-04-01 88 -Baku_chronology 2015-06-01 1 -22nd_MEU 2015-10-01 39 -2015_Open_Engie_de_Touraine 2015-10-01 195 -Churchill_Bowl 2017-06-01 30 -AGMARK 2017-08-01 117 -American_standard_wire_gauge 2017-12-01 3 -Araby,_LA 2015-05-01 2 -217_BC 2016-12-01 202 -2008_Trinidad_and_Tobago_League_Cup 2016-02-01 6 -Alazan_Bay 2015-12-01 22 -Aluminum_fencing 2015-11-01 48 -Achilles_tendinitis 2016-10-01 5884 -AFP_Peacekeeping_Operations_Center 2017-01-01 64 -2013_Xinjiang_clashes 2016-01-01 1 -Arborea_Giudicato_of_Arborea 2015-09-01 3 -1941_Cleveland_Rams_season 2017-06-01 40 -Ju_Posht,_Rasht 2017-01-01 3 -Ascalenia 2016-07-01 10 -Aplectoides 2018-02-01 4 -European_Cup_1969-70 2016-11-01 14 -Armen_Mkertchian 2016-05-01 9 -2015_Aspria_Tennis_Cup_-_Singles 2018-02-01 1 -14_August_1947 2017-11-01 6 -Adobe_Creative_Suite_1 2015-05-01 1 -IC_chips 2017-01-01 2 -Austo_AE300 2016-07-01 4 -Date_palms 2015-07-01 79 -BCS_bowl_game 2017-06-01 13 -AR_Border 2017-06-01 1 -Aranda_de_Duero 2016-04-01 256 -1919_Wake_Forest_Demon_Deacons_football_team 2016-01-01 16 -All_The_Wrong_Clues_For_The_Right_Solution 2017-10-01 9 -Allan_Campbell_McLean 2015-06-01 131 -Bradford_Council_election,_2011 2017-06-01 5 -Astronomy_and_astrophysics 2015-09-01 62 -Dutch_Antillean_people 2015-07-01 57 -Army_Radio 2018-03-01 711 -BBVA_Bancomer 2016-11-01 709 -Lake_Aloha 2017-01-01 30 -Andy_Bean 2018-02-01 3092 -1941_Pittsburgh_Steelers_season 2016-05-01 147 -Aniopi_Melidoni 2016-06-01 4 -Aglossosia_fusca 2017-09-01 3 -Art_books 2017-04-01 36 -1929_Washington_Senators_season 2017-04-01 47 -Antaeotricha_congelata 2016-12-01 10 -Douglas_C-54G-5-DO_Skymaster 2017-01-01 1 -Chris_Jamison 2016-11-01 827 -Ace_Blackwell 2015-11-01 9 -Abdul_Qadir_Fitrat 2018-02-01 32 -Arnoldo_Vizcaino 2017-10-01 1 -2012_Open_EuroEnergie_de_Quimper_-_Doubles 2017-12-01 3 -Dale_Rominski 2017-01-01 7 -ADHD_coaching 2015-06-01 50 -Claire_Yiu 2016-11-01 209 -Applicant 2015-10-01 253 -Apache_OpenOffice 2017-06-01 6031 -Abel_Kiprop_Mutai 2015-09-01 22 -Airdrome_Taube 2017-04-01 46 -Andrey_Viktorovich 2016-06-01 1 -American_Idol_controversy 2016-03-01 36 -Anthrenocerus_confertus 2018-01-01 17 -Appraisal_Subcommittee 2018-03-01 17 -Babusa 2015-07-01 3 -500_homeruns 2016-06-01 1 -Argentina_national_volleyball_team 2016-08-01 64 -Chief_prosecutor_of_Russia 2015-07-01 1 -Absolution_DVD 2015-06-01 1 -1,3-Beta-glucan_synthase 2017-05-01 440 -Dave_Sinardet 2016-04-01 26 -Adeline_Whitney 2018-03-01 10 -Allon_shvut 2016-07-01 3 -2012_Penn_State_Nittany_Lions_football_season 2017-12-01 3 -Coleman-Franklin-Cannon_Mill 2017-01-01 4 -Action_director 2015-05-01 93 -AD_547 2016-01-01 1 -Acta_germanica 2017-09-01 1 -Abu_Dhabi_Global_Market_Square 2017-01-01 35 -Kozo_Shioya 2017-01-01 7 -China_Investment_Corp 2017-01-01 2 -Dmitri_Zakharovich_Protopopov 2016-04-01 129 -Anatra_Anadis 2017-10-01 208 -Archaikum 2017-11-01 5 -2000_Webby_Awards 2017-04-01 360 -2003_BCR_Open_Romania_-_Singles 2016-08-01 2 -Abacetus_bisignatus 2016-09-01 79 -American_school_of_kinshasa 2016-01-01 1 -Anna,_7th_Duchess_of_Bedford 2016-08-01 8 -Black_majority_district 2016-11-01 3 -Dagma_Lahlum 2015-07-01 1 -Credit_Saison 2015-07-01 517 -Ariyankuppam_firka 2016-02-01 19 -Annette_Fuentes 2016-06-01 17 -Angerstein,_John 2015-12-01 2 -Annenkov_Island 2016-03-01 280 -Anne_Frank_museum 2016-06-01 67 -Annales_sancti_Amandi 2017-06-01 22 -L-FABP 2017-01-01 1 -Alvord,_TX 2017-06-01 12 -2006_World_Team_Table_Tennis_Championships 2016-05-01 119 -Angriffen 2015-12-01 9 -Anthony_Oppenheimer 2017-03-01 452 -Absamat_Masaliyevich_Masaliyev 2016-09-01 1 -Airborne_Museum_at_Aldershot 2016-03-01 41 -Aktiubinsk_Oblast 2015-08-01 7 -100_East_Wisconsin 2015-05-01 782 -7th_Bangladesh_National_Film_Awards 2017-08-01 91 -Alejandro_Reyes 2017-12-01 35 -Applied_philosophy 2018-03-01 539 -Adhemar_Pimenta 2016-06-01 146 -Break_the_fourth_wall 2016-04-01 66 -Annoushka_Ducas 2017-10-01 411 -ATC_code_J01CA01 2015-06-01 1 -Evelyn_County,_New_South_Wales 2016-11-01 7 -Elastic_scattering 2016-11-01 1374 -1032_Pafuri 2015-07-01 35 -Andrew_Bromwich 2015-08-01 26 -Ishita_Arun 2017-01-01 249 -Aspergics 2016-07-01 1 -1857_in_Chile 2018-03-01 22 -Breffni 2015-07-01 38 -845_in_poetry 2017-08-01 2 -20321_Lightdonovan 2015-10-01 12 -Arthur_Chandler 2017-12-01 27 -CsISOLatin2 2017-06-01 1 -1900_Grand_National 2016-06-01 69 -Aeritalia_AMX 2017-03-01 3 -B_Sharps 2015-06-01 11 -544_area_code 2015-09-01 2 -30th_Guldbagge_Awards 2015-06-01 37 -Agrippina 2017-08-01 315 -Ardmore 2016-02-01 433 -Amplypterus_panopus 2016-03-01 23 -Alexander_Bukharov 2017-09-01 5 -Alaska_Raceway_Park 2017-01-01 46 -Albanian_National_Road_Race_Championships 2017-03-01 31 -1968_Democratic_National_Convention_protest_activity 2017-10-01 2802 -2012_Birthday_Honours 2017-10-01 427 -2000_NHL_expansion_draft 2017-06-01 1 -A_Town_Where_You_Live 2016-11-01 2920 -Ahmed_Shahzad 2018-03-01 25 -Elisabeth_Svendsen 2016-11-01 39 -2002_FINA_Synchronised_Swimming_World_Cup 2016-08-01 30 -Akatek 2017-04-01 10 -Animation_with_DAZ_Studio 2018-02-01 78 -Fergus_Craig 2016-11-01 119 -Ancel_Nalau 2015-11-01 5 -5171_Augustesen 2017-04-01 20 -Anne_McGuire 2017-11-01 329 -Australian_Photoplay_Company 2015-12-01 6 -1913_in_Canada 2017-04-01 137 -Arhopala_allata 2015-05-01 26 -Il_Paradiso_delle_Signore 2017-01-01 31 -Geri_Palast 2017-01-01 38 -Alan_Abela_Wadge 2017-03-01 77 -22nd_Tactical_Air_Support_Squadron 2017-10-01 7 -Avant_Stellar 2017-06-01 22 -Black_phantom_tetra 2016-11-01 205 -Billy_McCaffrey 2017-01-01 314 -Annie_Furuhjelm 2017-11-01 97 -1992_PGA_Tour 2017-12-01 307 -2008_Chilean_pork_crisis 2016-01-01 55 -2012_Currie_Cup_First_Division 2018-02-01 32 -Aleksei_Fomkin 2015-05-01 144 -Alexander_Krausnick-Groh 2016-05-01 101 -Adam_Richard_Wiles 2017-08-01 5 -ATCvet_code_QA01AD01 2015-09-01 2 -Abu_Bakr_Ibn_Bajja 2017-03-01 5 -Architecture-Studio 2016-04-01 94 -950s_BC 2016-02-01 257 -Abschwunges 2017-07-01 1 -Adonis_Geroskipou 2017-06-01 15 -2008-09_SV_Werder_Bremen_season 2016-03-01 3 -Closed_loops 2016-04-01 1 -AFC_Youth_Championship_1982 2015-12-01 10 -Aquila_Shoes 2015-08-01 209 -9842_Funakoshi 2017-12-01 11 -Educational_quotient 2016-04-01 21 -Antoni_Julian_Nowowiejski 2018-01-01 211 -Adi_Oka_Idhile 2017-11-01 16 -DEXIA-BIL_Luxembourg_Open 2016-11-01 3 -Andrew_James_Simpson 2016-03-01 43 -Alexander_Boksenberg 2017-12-01 61 -1827_in_Denmark 2017-03-01 39 -Afternoon_tea_with_suggs 2017-11-01 3 -Alpha,_MN 2017-06-01 6 -Ari_Onasis 2015-06-01 4 -1961-62_Football_League_First_Division 2015-11-01 1 -Andi_Lila 2015-06-01 2847 -A_Gathering_Of_Old_Men 2018-02-01 1 -Abul_Fazl_al-Abbas 2017-01-01 1 -Asgill,_Charles 2017-08-01 1 -Alexander_Arkhangelsky 2015-07-01 12 -1947-48_Portuguese_Liga 2015-06-01 1 -3rd_MMC_-_Varna 2016-07-01 3 -Alberts,_Wayne 2017-05-01 3 -Alois_Schickelgruber 2018-02-01 9 -Hefner_Stadium 2017-01-01 2 -410912_Lisakaroline 2018-02-01 26 -Academy_at_Mountain_State 2018-03-01 1 -617_Squadron 2016-05-01 489 -Al_Silm_Haji_Hajjaj_Awwad_Al_Hajjaji 2015-07-01 5 -Arturo_Merino_Benitez_Airport 2017-10-01 13 -AEK_Athens_Futsal 2015-06-01 10 -Aggaeus 2018-02-01 2 -Association_for_Retarded_Citizens_of_the_United_States 2017-08-01 3 -Kielce_pogrom 2017-01-01 335 -1351_in_poetry 2016-01-01 17 -1923_Princeton_Tigers_football_team 2017-11-01 41 -Auzata_semipavonaria 2017-01-01 2 -892_in_poetry 2016-01-01 6 -Anton_Krotiak 2017-12-01 2 -Arthur_Shelley 2017-12-01 23 -2003_Kyoto_Purple_Sanga_season 2018-02-01 9 -Frederic_Bowker_Terrington_Carter 2016-04-01 6 -2-orthoplex 2016-03-01 1 -Acacia_australiana 2015-09-01 4 -2012_Newcastle_Knights_season 2016-06-01 103 -Ann_Wrights_Corner,_Virginia 2017-07-01 19 -12557_Caracol 2017-03-01 5 -2001_African_Footballer_of_the_Year 2017-05-01 1 -Bass_Pyramid 2017-01-01 22 -A_noodle 2015-05-01 5 -Aed_Bennan 2018-02-01 2 -1886_Yale_Bulldogs_football_team 2017-10-01 58 -2002_Players_Championship 2016-06-01 54 -African_Skimmer 2017-07-01 2 -3rd_Guldbagge_Awards 2016-12-01 39 -Arrows_A19B 2015-10-01 1 -Archduchess_Elisabetta_of_Austria-Este 2017-08-01 1526 -America_Islands 2015-11-01 1 -1932_Olympic_Games 2016-01-01 9 -2011_Chinese_pro-democracy_protests 2015-11-01 2044 -Bank_walkaway 2016-04-01 113 -594_in_Ireland 2017-04-01 1 -Association_of_Municipal_Corporations 2016-12-01 5 -Andreas_Brantelid 2015-09-01 167 -Amarthal_urf_Unchagaon 2017-05-01 82 -3-methoxymorphinan 2017-04-01 146 -2382_BC 2016-07-01 10 -1763_in_science 2016-07-01 28 -Arvert 2017-04-01 77 -Ale_yeast 2017-12-01 19 -A_Man_Without_a_Soul 2018-03-01 17 -Air_Force_Base_Louis_Trichardt 2017-09-01 1 -Athirson_Mazzoli_de_Oliveira 2017-06-01 3 -Anthony_Chan_Yau 2017-07-01 181 -Basic_Enlisted_Submarine_School 2017-06-01 392 -Aboriginal_Lands_of_Hawaiian_Ancestry 2015-09-01 11 -Fondren_Southwest,_Houston 2017-01-01 4 -3_World_Financial_Center 2017-07-01 64 -1971_IIHF_European_U19_Championship 2017-09-01 9 -1937-38_Allsvenskan 2015-12-01 6 -Christopher_Ashton_Kutcher 2017-06-01 2 -Australian_rules_football_in_south_australia 2016-12-01 1 -Amicable_pair 2018-01-01 7 -Alan_Tomes 2015-11-01 82 -Alexei_Petrovich,_Tsarevich_of_Russia 2015-12-01 3887 -Alexis_Damour 2015-10-01 66 -Bankruptcy_Act_of_1938 2017-06-01 76 -Amphiphyllum 2016-06-01 14 -Conway_High_School_West 2016-04-01 1 -5447_Lallement 2015-11-01 10 -Gabriel_Iddan 2017-01-01 1 -1879-80_Scottish_Cup 2017-04-01 3 -2011_Eneco_Tour 2016-10-01 31 -1471_in_England 2015-11-01 94 -Ashland_Town_Hall 2017-01-01 5 -Archduke_John 2015-05-01 20 -2000_Cameroonian_Premier_League 2017-09-01 18 -1997_flood 2017-11-01 5 -Agile_management 2015-09-01 26677 -Am_841 2017-12-01 3 -Apprentice_Mason 2018-01-01 4 -Hales-Jewett_theorem 2017-01-01 2 -Alien_Abductions 2017-10-01 14 -Arjun_Menon 2016-02-01 370 -Anthokyan 2016-01-01 4 -Automobili_Lamborghini 2016-02-01 1110 -Alain_Prost 2017-04-01 25196 -Fartein_Valen 2016-04-01 90 -Antonio_Galli_da_Bibiena 2016-05-01 5 -Al_Jawf,_Libya 2017-03-01 600 -AD_695 2018-02-01 1 -Amir_chand 2015-11-01 1 -Alcis_obliquisigna 2017-08-01 1 -Chandra_Talpade_Mohanty 2017-01-01 306 -Algerian_safe_house,_Jalalabad 2015-06-01 3 -Jake_Milner 2017-01-01 1 -Alternate_Communications_Center 2017-10-01 1 -In_the_Bleachers 2017-01-01 42 -Alex_Puodziukas 2016-04-01 7 -Altarpiece_of_Pilgrim_II 2018-02-01 2 -Cybernetical_Physics 2017-01-01 3 -Christopher_Unthank 2017-06-01 2 -1982_Independence_Bowl 2015-06-01 102 -Ascoli_Calcio_1898 2018-03-01 1115 -Briggs-Rauscher_reactions 2017-06-01 1 -Adjadja 2018-02-01 45 -Afghanistan_from_Ahmad_Shah_until_Dost_Mohammed 2016-06-01 3 -Catholic_social_doctrine 2017-01-01 6 -2833_BC 2016-11-01 1 -Bethy_Woodward 2016-04-01 38 -Bateman_polynomials 2017-06-01 22 -1966_Buenos_Aires_Grand_Prix 2015-10-01 19 -A_River_Somewhere 2015-10-01 353 -2016-17_BVIFA_National_Football_League 2017-04-01 2 -1909_Major_League_Baseball_season 2015-10-01 362 -1988_Oklahoma_Sooners_football 2017-11-01 2 -2010s_in_Chechen_fashion 2016-10-01 1 -Accademia_Olimpica 2017-08-01 17 -Air_cooling 2015-07-01 2010 -Amir_Saoud 2016-11-01 22 -Alex_Auburn 2015-05-01 52 -Apamea_impulsa 2016-11-01 6 -Australian_federal_election,_2007 2015-07-01 1794 -Ain_Sakhri 2017-10-01 76 -Belosaepiidae 2015-07-01 68 -Acts_of_Parliament_in_the_United_Kingdom 2017-10-01 4070 -Equity_Office 2016-11-01 202 -David_Bintley 2017-01-01 51 -Aksel_Schiotz 2018-03-01 3 -Appropriation_Act_2000 2017-05-01 12 -Edward_Johnson_III 2016-11-01 491 -2006_Ohio_State_Buckeyes_football_team 2016-03-01 1452 -Battle_of_Fort_Beausejour 2015-07-01 97 -Abel_Foullon 2015-12-01 82 -Apollo_VIII 2015-10-01 19 -Carry_on_up_the_jungle 2015-07-01 8 -Armour_villa 2017-05-01 4 -201_Poplar 2015-08-01 265 -Arta_prefecture 2016-08-01 1 -2015-16_Ekstraklasa 2018-02-01 13 -Alport,_Ontario 2018-02-01 2 -Bongoland 2017-06-01 62 -Alfred_Charles_Post 2016-11-01 11 -Aam_Aadmi_Party_crisis 2016-10-01 1 -Andrea_Moda 2015-07-01 143 -Abdul_Halim_Sharar 2017-08-01 545 -Apostolic_Vicariate_of_Yunnan 2016-12-01 1 -Catherine_Steadman 2016-11-01 5218 -Agastachys_odorata 2015-10-01 38 -9783_Tensho-kan 2016-03-01 2 -AFL_Cairns 2017-10-01 337 -Abomey 2015-06-01 1062 -Anne_Crofton,_1st_Baroness_Crofton 2015-12-01 42 -Cash-flow_return_on_investment 2017-01-01 137 -Alberto_Arvelo_Torrealba_Municipality 2015-08-01 56 -Abyssinian_Shorthorned_Zebu 2017-09-01 124 -Albanian_hip_hop 2016-01-01 1812 -Alphonso_IV_of_Portugal 2016-02-01 12 -19th_The_Alberta_Mounted_Rifles 2016-10-01 1 -Chinese_shadow_theatre 2016-04-01 1 -American_Committee_of_the_Fourth_International 2017-08-01 4 -2014_Bahrain_GP2_Series_round 2016-03-01 80 -Alexandrian_orthodox 2017-09-01 2 -2010_Hurricane_Season 2015-05-01 18 -1938_All-Ireland_Senior_Camogie_Championship_Final 2017-01-01 1 -ATC_code_D01 2018-01-01 203 -Albedo 2015-08-01 23484 -Chavigny,_Meurthe-et-Moselle 2017-01-01 12 -Becky_Essex 2015-07-01 51 -Archaeological_Museum_Padre_Le_Paige 2018-02-01 2 -Abu_Bakar_Sillah 2017-01-01 5 -Back_chat 2017-01-01 2 -Anchylobela_dyseimata 2015-12-01 11 -Anthony_Overton 2017-03-01 261 -Bear_maul 2016-04-01 3 -Ambarawa,_Central_Java 2016-01-01 1 -Amber_lager 2016-11-01 87 -2nd_LAAD 2017-09-01 8 -Ashiya,_Hyogo 2018-03-01 24 -Angels_at_Risk 2018-02-01 74 -Audrey_Marie_Munson 2016-03-01 17 -1984_Australian_Football_Championships 2017-01-01 27 -Ammonia_fountain 2016-06-01 434 -Allister_Bentley 2018-03-01 11 -Alsager_Hay_Hill 2016-10-01 72 -1753_English_cricket_season 2015-05-01 51 -2009-10_New_Jersey_Devils_season 2016-10-01 1 -An_Untamed_State 2016-05-01 1109 -Beatrice_Carmichael 2016-11-01 5 -Abdul_Ghani_Ahmad 2017-12-01 115 -Arteria_suralis 2017-02-01 3 -Berzasca_River 2017-01-01 1 -Angel_Attack 2015-09-01 98 -1969_San_Francisco_49ers_football_team 2017-11-01 1 -Anthony_Beilenson 2017-09-01 114 -Crystalline_Entity 2016-04-01 180 -Granice 2017-01-01 2 -203rd_General_Hospital 2017-07-01 44 -Acrocercops_rhombiferellum 2017-12-01 20 -Ampliglossum_blanchetii 2017-05-01 1 -11553_Scheria 2017-03-01 2 -Ashkenozi 2017-02-01 1 -2010_Calder_Cup_Playoffs 2018-01-01 9 -Alice_Caymmi 2016-01-01 121 -Alfredo_Alvar 2017-04-01 44 -2006_Legends_Tour 2017-07-01 30 -Albano_Albanese 2015-10-01 53 -1943_Frankford_Junction_train_wreck 2016-08-01 510 -Evans_Court_Apartment_Building 2016-04-01 4 -Abu_al-Rayhan_Muhammad_ibn_al-Biruni 2017-11-01 1 -Abubakar_Muhammad_Rimi 2015-05-01 4 -Dostpur 2016-11-01 26 -Accessories_Council_Excellence_Awards 2016-03-01 14 -2006_North_American_heat_wave 2015-06-01 1161 -Amstelodamum 2017-09-01 12 -A_Very_Peculiar_Practice 2016-08-01 1860 -Allegorie_der_Liebe 2015-09-01 1 -Alex_Mackie 2017-02-01 95 -1812_Homestead_Farm_and_Museum 2017-09-01 29 -Argus_distribution 2016-03-01 8 -Anthony_Thomas_Stover 2017-02-01 1 -Arthur_Shallcross 2016-11-01 20 -Antoine_Francois_Fourcroy 2018-01-01 1 -Abbas_Halim 2016-11-01 21 -Akiva_Baer_ben_Joseph 2017-08-01 1 -Balatonfuered 2016-11-01 3 -Antemnae 2017-11-01 204 -Cling_Cling 2017-06-01 93 -B_flat_major 2017-01-01 28 -AirExplore 2017-12-01 930 -Auckland_Super_Sprint 2015-11-01 120 -Alfredo_De_Gasperis 2017-12-01 793 -Geoffrey_I_of_Vianden 2017-01-01 5 -Copa_de_Zaachila 2016-04-01 6 -Alboacen 2017-09-01 1 -BNH_Hospital_Bangkok 2017-06-01 2 -Agricultural_health_and_safety 2016-09-01 1 -Chiasms 2017-06-01 2 -Al_Karaana 2016-05-01 58 -Alberta_Highway_872 2016-11-01 1 -Among_the_mourners 2016-03-01 1 -Achema_Power_Plant 2015-06-01 55 -ATSE_Graz 2017-10-01 65 -Arthroscopy 2017-02-01 11721 -2010-2012_European_Nations_Cup_Second_Division 2018-01-01 7 -1967_Cincinnati_Reds 2015-08-01 4 -24th_Golden_Disc_Awards 2017-05-01 71 -Johnny_Floyd 2017-01-01 17 -Arthur_Rupin 2016-02-01 5 -Alpine_skiing_at_the_2011_Canada_Winter_Games 2015-09-01 38 -College_Press_Service 2017-01-01 8 -American_Psycho 2015-08-01 55567 -CBC_Winnipeg 2017-06-01 17 -Burning_the_process 2016-04-01 1 -2011_Stanley_Cup_playoffs 2017-01-01 1036 -Andrew_Mumford 2017-01-01 6 -1925_in_fine_arts_of_the_Soviet_Union 2018-02-01 28 -Aragvi_river 2017-02-01 2 -Andrew_Adamson 2018-03-01 16269 -Arcides_fulvohirta 2016-10-01 1 -Araya_Selassie_Yohannes 2015-11-01 423 -Apartment_house 2016-09-01 85 -Advanced_Art 2015-12-01 171 -1028_Lydina 2015-06-01 53 -2005_July_6_United_Nations_assault_on_Cite_Soleil,_Haiti 2017-04-01 2 -Adolph_Weiss 2015-06-01 98 -Adam_Jerzy_Czartoryski 2015-09-01 1237 -1980_United_States_presidential_election 2017-05-01 56 -1956_Oscars 2016-08-01 10 -Burundian_Senate_election,_2005 2016-04-01 1 -Amarolea_floridana 2015-07-01 3 -August_Bier 2015-12-01 514 -Arbelodes_sebelensis 2018-03-01 6 -Abiah_Brown 2018-02-01 1 -A_Maceo_Smith_High_School 2016-10-01 2 -1488_in_architecture 2017-12-01 6 -2009_AMP_Energy_500 2016-04-01 45 -1921_Baylor_Bears_football_team 2017-03-01 21 -Dmitry_Akhba 2015-07-01 43 -2004_Big_12_Conference_Baseball_Tournament 2016-07-01 37 -Abdisalam_Omer 2018-02-01 116 -Alma,_son_of_Alma 2015-08-01 53 -An_Phoblacht 2016-10-01 962 -2009_Turner_Prize 2016-01-01 75 -Jack_Zajac 2017-01-01 24 -1906_Wimbledon_Championships 2016-04-01 22 -Chuckwalla_Valley 2017-06-01 22 -Alien_Quadrology 2016-02-01 1 -Chalcidoptera_contraria 2016-04-01 1 -Alaska_Republican_Gubernatorial_Primary_Election,_2006 2016-02-01 1 -333639_Yaima 2018-02-01 7 -Aquila_hastata 2015-11-01 28 -Al-Fua 2017-07-01 1 -Anihilation 2015-07-01 28 -International_Toy_Fair 2017-01-01 1 -38th_Regiment_Indiana_Infantry 2017-01-01 10 -Andrea_Stella 2017-07-01 75 -Anselmo_de_Moraes 2015-09-01 562 -Applemore 2016-05-01 3 -Akpinar,_Kirsehir 2015-06-01 3 -Ant_nest 2016-05-01 53 -Catherine_of_Siena 2016-11-01 8806 -Barbos 2015-06-01 12 -Amlaib_mac_Iduilb 2017-08-01 2 -Alice_Janowski 2018-03-01 17 -Acacia_leptocarpa 2017-03-01 48 -Al-Hadi_Yahya 2016-01-01 39 -2015_British_Figure_Skating_Championships 2017-07-01 38 -Avenues_Television 2016-03-01 214 -Dendropsophus_sartori 2015-07-01 11 -1952_in_Germany 2015-05-01 63 -Armuchee_High_School 2016-04-01 27 -April_1_RFC 2017-11-01 2 -Caroline_Bliss 2016-11-01 972 -66th_Rice_Bowl 2016-06-01 17 -Alec_Smight 2017-02-01 173 -Alexei_Panin 2017-09-01 3 -Codeword 2016-04-01 84 -Dormice 2015-07-01 63 -2105_BC 2017-11-01 6 -5th_National_Congress_of_Kuomintang 2016-06-01 5 -Caminho_das_Indias 2017-01-01 5 -Agerbo 2017-11-01 2 -Abe_Anellis 2018-01-01 86 -Aceh_Medal 2015-07-01 33 -Alltech_Arena 2016-10-01 144 -Aly_Oury 2016-06-01 260 -757th_Troop_Carrier_Squadron 2017-07-01 2 -Alec_Peters 2017-12-01 2731 -Agua_Buena_Airport 2017-09-01 12 -Alessandro_Livi 2016-08-01 104 -Andkaer 2017-04-01 3 -Cateran 2017-06-01 135 -57th_Venice_International_Film_Festival 2017-04-01 180 -Brijal_Patel 2017-06-01 98 -Cnemaspis_jerdonii 2015-07-01 6 -Aluminum_sodium_salt 2016-10-01 3 -Arnaldo_Antonio_Sanabria_Ayala 2017-09-01 4 -Angels_of_Iron 2018-02-01 83 -Bugs_Bunny_Rabbit_Rampage 2017-06-01 422 -Admiralty_Class_Destroyer 2017-10-01 2 -Atlas_Media 2017-05-01 2 -Arcesilaus_i_of_cyrene 2017-03-01 1 -2011_Tajikistan_national_football_team_results 2017-04-01 13 -Artur_Shakhnazarov 2017-12-01 22 -747_Express_Bus 2018-03-01 20 -101-in-1_Party_Megamix 2017-10-01 188 -Fastpoint_Games 2016-11-01 32 -Analog_Anthology_1 2017-07-01 1 -Archival_bond 2015-09-01 119 -1985_Air_Force_Falcons_football 2017-09-01 4 -American_Airlines_plane_diverted_to_Miami_after_landing_gear_problem 2017-06-01 3 -Adaptive_Evolution_in_the_Human_Genome 2017-08-01 2 -Arthur_Strangways 2015-11-01 5 -1583_in_poetry 2015-09-01 68 -Andrew_igoudala 2015-06-01 2 -Euonychophora 2016-11-01 37 -Catechizing 2016-04-01 4 -1960-61_ice_hockey_Bundesliga_season 2018-03-01 3 -Buk_Vlaka 2017-06-01 10 -Arbor_Day 2018-03-01 16265 -Guan_Sheng 2017-01-01 73 -2014_Barcelona_Open_Banc_Sabadell 2016-08-01 57 -1976-77_Nationalliga_A 2016-04-01 1 -AFL_records 2015-11-01 16 -2005_Tour_Down_Under 2016-10-01 26 -92_BCE 2015-08-01 4 -Bento_Box_Animation 2017-01-01 1 -Alabama_Territory 2018-03-01 1195 -Abdul-Wasa_Al-Saqqaf 2016-07-01 21 -Archbishops_of_Semarang 2017-01-01 6 -Ambivina 2017-10-01 13 -Aghjaghala_Ulia 2017-08-01 2 -Blechnum_novae-zelandiae 2016-11-01 26 -Dictyosome 2016-04-01 19 -Arts_Council_of_Great_Britain 2016-12-01 785 -LBC_Radio 2017-01-01 3 -Ageo,_Saitama 2016-06-01 396 -Babla_Mehta 2016-12-01 674 -2012-13_Russian_Cup 2018-01-01 10 -Chandragupt 2017-06-01 6 -407th_Air_Refueling_Squadron 2016-01-01 96 -Aftermarket 2016-07-01 1253 -A_Portrait_of_New_Orleans 2016-08-01 18 -2000-01_Yemeni_League 2017-03-01 1 -Actinidia_chinensis 2015-11-01 907 -Amsterdam_Tournament_1999 2018-03-01 1 -Arthur_Iberall 2017-02-01 112 -Auricula_Meretricula 2016-02-01 103 -Archbishop_of_Lahore 2016-09-01 8 -Chippewa_Indians_of_Montana 2016-04-01 9 -Abidjan-Niger_Railway 2018-01-01 22 -29th_Annual_Grammy_Awards 2017-05-01 1087 -Ateles_geoffroyi_frontatus 2017-06-01 3 -Enrico_Cernuschi 2016-11-01 3 -A4183_road 2017-02-01 8 -Ahrayut 2016-10-01 75 -Alison_Castle 2016-03-01 55 -Automobile_aftermarket 2016-10-01 5 -2008_GAINSCO_Auto_Insurance_Indy_300 2016-07-01 51 -1937_Scottish_Cup_Final 2017-04-01 126 -2005_Clipsal_500_Adelaide 2018-02-01 22 -Farid_Farjad 2016-04-01 120 -13_Tribes_of_Long_Island 2015-12-01 11 -Afroneta_bamilekei 2017-01-01 2 -Frederick_Stuart_Greene 2017-01-01 1 -Andre_Braugher 2017-04-01 37655 -1906_International_Lawn_Tennis_Challenge 2017-10-01 73 -2009-10_NFL_Playoffs 2016-01-01 69 -Cricket_Wellington 2016-11-01 2 -Craig_Blazer 2015-07-01 21 -Aeolidiella_orientalis 2017-05-01 3 -Andre_Prokovsky 2017-06-01 4 -Angela_McKee 2017-11-01 14 -Airbase_Golubovci 2016-10-01 1 -2011_ISAF_Sailing_World_Championships 2017-05-01 89 -Bartica_Airport 2017-06-01 27 -Agusan_Dam 2016-09-01 454 -Bosque_Real_Country_Club 2015-07-01 42 -Georges_Duhamel 2017-01-01 122 -Allrounder 2017-03-01 63 -2017_Missouri_State_Bears_football_team 2017-09-01 868 -Allons_a_Lafayette 2017-11-01 17 -Agathla 2015-05-01 105 -1086_in_poetry 2015-09-01 25 -Absolute_extreme 2017-09-01 1 -Agathe_Bonitzer 2017-12-01 229 -Chinese_Red_Pine 2017-06-01 18 -Angular_dispersion 2016-02-01 11 -Jean-Sebastian_Giguere 2017-01-01 2 -Actinium-235 2018-03-01 4 -Ago,_filo_e_nodo 2017-02-01 11 -Aranea_cruentata 2016-03-01 1 -2009_Korea_National_League 2017-11-01 19 -Americom-8 2016-08-01 28 -2006_Junee_Bushfire 2018-03-01 81 -2013_Major_League_Baseball_Home_Run_Derby 2017-09-01 182 -1928_US_Presidential_Election 2016-12-01 42 -After-eighty_generation 2016-02-01 127 -1932_Hawthorn_Football_Club_season 2017-07-01 16 -Amelia_Elizabeth_Mary_Rygate 2017-05-01 2 -Aline_Khalaf 2017-12-01 465 -Akron_Junction,_New_York 2017-07-01 56 -Apollo_moon_landing_conspiracy_theories 2015-09-01 4 -1978_National_League_Championship_Series 2017-03-01 325 -1959-60_German_football_championship 2017-08-01 5 -Almost_a_Bride 2017-01-01 1 -Andrew_Lysaght,_junior 2015-10-01 20 -1902_Otani_expedition 2018-02-01 1 -1892_Currie_Cup 2016-09-01 53 -1988_USC_Trojans_football_team 2016-10-01 494 -1944_in_Northern_Ireland 2016-12-01 46 -Alfred_Acherman 2017-07-01 1 -Arcadia,_Nebraska 2017-02-01 148 -4_x_400_metre_relay 2018-03-01 1 -A4030_road 2016-07-01 1 -Chi-li 2016-11-01 3 -Aircraft_fairing 2016-11-01 1861 -Buddhism_in_Belize 2015-07-01 40 -Alameda_County_Open 2017-02-01 33 -Area_of_countries_and_regions_of_the_United_Kingdom 2017-10-01 6 -2014_Weber_State_Wildcats_football_team 2016-10-01 47 -American_Journal_of_Comparative_Law 2016-04-01 62 -A_Teaspoon_Every_Four_Hours 2017-03-01 47 -Astasis 2016-03-01 1195 -Akhrakouaeronon 2015-11-01 62 -Annenkrone 2016-03-01 40 -Ballotine 2016-12-01 4753 -2000_Kipawa_earthquake 2015-11-01 139 -Archdiocese_of_cashel_and_emly 2017-01-01 1 -Chevrolet_SS396 2017-01-01 1 -Achyroseris 2016-03-01 1 -Daniel_Pulteney 2016-11-01 29 -2006_Major_League_Baseball_draft 2017-07-01 10637 -Adetunji_Idowu_Olurin 2016-01-01 37 -Ardatov,_Nizhny_Novgorod_Oblast 2017-04-01 18 -Andrew_Hilditch 2015-08-01 398 -A_Very_Merry_Daughter_Of_the_Bride 2017-04-01 67 -1993_in_radio 2017-08-01 85 -Deltan 2016-11-01 91 -Adnan_Custovic 2017-12-01 26 -Di_Gennaro 2017-01-01 4 -237_AD 2017-11-01 1 -Aaron_Gombar 2018-03-01 2 -Acrolophus 2017-04-01 47 -Alfred_Bergman 2017-06-01 27 -Charles_Bebb 2017-06-01 39 -Dirico 2017-01-01 24 -1982_Major_League_Baseball_Draft 2016-12-01 90 -DDT_wrestling 2016-11-01 4 -1988-89_Houston_Rockets_season 2016-02-01 10 -Acacia_loderi 2015-11-01 35 -2015_Deauville_American_Film_Festival 2016-10-01 126 -Andropadus_importunus 2016-02-01 9 -Antonio_Bacchetti 2017-04-01 52 -Ann_Trindade 2015-09-01 49 -5_x_Monk_5_x_Lacy 2016-05-01 37 -Barlochan,_Ontario 2017-06-01 2 -Achaian 2017-03-01 35 -Flow_rider 2017-01-01 1 -Antiblemma_discerpta 2018-02-01 1 -1997_Illinois_Fighting_Illini_football_team 2017-11-01 331 -Ahrntal 2016-03-01 540 -Apollo_Conference 2015-10-01 329 -Algenib_in_Perseus 2016-01-01 1 -Craig_Norgate 2016-04-01 42 -Antwerp_Zoo 2015-12-01 879 -Cold_Contagious 2017-06-01 161 -Bolito 2016-11-01 181 -Chinese_bridges 2016-11-01 1 -14th_Indiana_Infantry_Regiment 2017-04-01 115 -Bindunuwewa_massacre 2015-07-01 52 -Eastshore_Highway 2016-11-01 2 -Daemonologie 2017-01-01 1655 -Aero_Pacifico 2015-07-01 1 -Blue_Ribbon_Schools_Program 2017-06-01 557 -Ash_Township,_MI 2018-02-01 3 -Al-Hatab_Square 2018-02-01 450 -Alje_Vennema 2018-02-01 187 -1920_All-Ireland_Senior_Football_Championship_Final 2016-05-01 40 -Criss_Oliva 2016-11-01 801 -Bethlehem,_Ohio 2017-01-01 16 -1976_WHA_Amateur_Draft 2015-08-01 47 -Angela_Fimmano 2017-06-01 17 -Alexander_Bonini_of_Alexandria 2017-09-01 1 -Anarchist_faq 2015-05-01 13 -Aleksander_Benedykt_Sobieski 2016-05-01 240 -Cape_Florida_Lighthouse 2016-04-01 6 -Fernando_VI_of_Spain 2017-01-01 3 -Crossing_number 2017-06-01 29 -1984_NSL_Cup 2017-05-01 26 -Barbara_Weldon 2015-06-01 29 -Andreas_Olsen 2017-01-01 32 -Battle_of_Baima 2016-04-01 2 -Amory_Hansen 2016-05-01 26 -Akhmimic 2015-11-01 41 -Al_Awda 2018-02-01 18 -Adelheid-Marie_of_Anhalt-Dessau 2016-07-01 70 -Americans_for_Technology_Leadership 2015-10-01 90 -Belizean_diplomatic_missions 2017-06-01 3 -African_communist 2016-05-01 3 -Andosol 2016-09-01 246 -Alan_Attraction 2016-05-01 15 -A_Yank_in_Rome 2015-12-01 70 -2004_in_the_United_Arab_Emirates 2018-02-01 33 -Additionality 2017-06-01 371 -Assassination_of_Trotsky 2015-06-01 47 -Alice_Sotero 2018-02-01 27 -Agyneta_platnicki 2016-04-01 4 -Alexandra_Vasilyevna_Velyaminova 2015-07-01 30 -1881_in_Chile 2016-06-01 16 -Arterial_ischemic_stroke 2018-01-01 57 -Astro_Glacier 2015-09-01 27 -Chester_Earl_Merrow 2017-06-01 58 -Alejandro_de_la_Madrid 2015-11-01 1630 -70936_Kamen 2017-08-01 1 -AK_Steel_Holding_Corp 2015-08-01 8 -1124_Stroobantia 2017-10-01 23 -Asian_Wedding 2016-10-01 15 -23837_Matthewnanni 2015-10-01 18 -Acharya_Jagadish_Chandra_Bose_Indian_Botanic_Garden 2017-03-01 4893 -Betsy_Hodges 2016-04-01 560 -Arthur_and_the_Invisibles 2015-08-01 14924 -Arkansas-Ole_Miss_football_rivalry 2015-05-01 7 -Asia_Cup 2015-09-01 5938 -Arginine_racemase 2016-12-01 15 -585th_Field_Company,_Royal_Engineers 2018-03-01 1 -1975_Stagg_Bowl 2017-08-01 6 -Dame_Commander_of_The_Most_Honourable_Order_of_the_Bath 2017-01-01 1 -Askajian 2016-02-01 26 -2006_Nebraska_Cornhuskers_football_team 2015-08-01 975 -Cicero_Francis_Lowe_House 2015-07-01 10 -Conan_IV,_Duke_of_Brittany 2016-11-01 252 -2005_World_Modern_Pentathlon_Championships 2016-07-01 38 -1946_Aleutian_Islands_earthquake 2017-03-01 2019 -ANKRD17 2017-09-01 19 -1970_Maryland_Terrapins_football_team 2017-11-01 42 -Ali_Dehkhoda 2017-04-01 1 -1244_in_art 2015-07-01 22 -1520s_in_Denmark 2016-01-01 20 -Abdoulaye_Gaye 2017-12-01 10 -An_Angel_Has_Arrived 2016-03-01 36 -1453_BC 2015-08-01 26 -2017_National_Games_of_China 2017-05-01 1293 -A_Night_in_Sickbay 2016-05-01 251 -Dateline_Diamonds 2017-01-01 53 -419_guestbook_spamming 2016-02-01 5 -Familiar_bluet 2017-01-01 4 -Abu_Bakr_Mirza 2017-10-01 86 -7272_Darbydyar 2017-11-01 4 -Ages_of_consent_in_Latin_America 2017-03-01 961 -1982_Japan_Soccer_League_Cup 2016-04-01 14 -2810_BC 2015-07-01 9 -Druga_Liga_Republike_Srpske 2017-01-01 1 -1998_Swedish_Rally 2017-09-01 34 -1567_in_Norway 2015-10-01 89 -126_Army_Engineer_Regiment,_Royal_Engineers 2016-03-01 5 -2017_American_League_Wild_Card_Game 2017-10-01 25120 -August_Follen 2017-01-01 2 -Ala_Gertner 2015-11-01 876 -Glenwood,_Harford_County,_Maryland 2017-01-01 3 -Applied_ecology 2017-12-01 730 -Ariarathes_V_Eusebes_Philopator 2018-03-01 5 -2006_AFC_Champions_League 2017-09-01 947 -60_minutes_2 2016-10-01 2 -Embryonic_shield 2017-01-01 2 -2001_Meath_Intermediate_Football_Championship 2015-11-01 8 -Apparition_of_Christ_to_Madonna 2017-06-01 5 -Hoosier_Road_Elementary 2017-01-01 1 -Arua_Uda 2016-12-01 29 -Array_comprehension 2015-11-01 8 -Baszki 2015-06-01 36 -Akron_Neighborhoods 2016-01-01 4 -Catholic_Church_in_Costa_Rica 2017-06-01 85 -Canada-Sweden_relations 2015-07-01 1 -Barza_Radio_Community 2016-11-01 6 -Dalhousie_Middle_School 2016-11-01 5 -Alliphis_bakeri 2017-11-01 2 -Bartica_massacre 2017-06-01 53 -30th_January 2015-11-01 10 -1920_revolution 2017-05-01 5 -Amyraldism 2017-08-01 828 -AA_Jefferson_District 2016-05-01 45 -Eunebristis_cinclidias 2017-01-01 1 -A_Scott_Connelly 2017-06-01 5 -Antony_Durose 2016-07-01 19 -Arval_Brethren 2017-11-01 579 -Anthidium_dissectum 2017-05-01 2 -Aru,_Democratic_Republic_of_the_Congo 2017-04-01 81 -1956-57_West_Indian_cricket_season 2017-04-01 2 -2014_Moscow_Film_Festival 2017-08-01 2 -Anna_Gurji 2017-06-01 27 -Allen_Memorial_Medical_Library 2016-07-01 120 -Anton_Sistermans 2017-02-01 36 -Clotheshorses 2017-06-01 1 -36_Stratagems 2017-08-01 25 -Attack_of_the_crab_monsters 2016-10-01 16 -30_rock_awards 2015-09-01 2 -Aeroflot,_Uralsk_Civil_Aviation_Directorate 2017-08-01 2 -Amblyseius_parabufortus 2017-06-01 3 -Indian_coral_tree 2017-01-01 3 -3285_Ruth_Wolfe 2016-02-01 9 -Anderson_da_Silva_Gibin 2016-08-01 73 -5001st_Composite_Group 2017-03-01 4 -Danzik 2016-04-01 8 -4810_Ruslanova 2016-03-01 2 -Arkendale,_Virginia 2016-04-01 14 -Al_Francis_Bichara 2016-09-01 239 -Cayena 2017-01-01 1 -A_Glass_of_Darkness 2017-04-01 95 -GMC_CCKW 2017-01-01 887 -Alabama_State_Route_107 2015-11-01 13 -2011_in_motorsport 2017-12-01 26 -Adecco_General_Staffing,_New_Zealand 2017-12-01 86 -Anbargah 2015-10-01 6 -1995_Asian_Cup_Winners_Cup 2016-06-01 7 -1986_Wales_rugby_union_tour_of_the_South_Pacific 2016-12-01 30 -Adya_Goud_Brahmin 2017-03-01 2 -Akcakiraz 2015-08-01 5 -24249_Bobbiolson 2017-12-01 4 -Ahmanson_Theatre 2016-02-01 801 -Abdullah_ibn_Jahsh 2016-10-01 196 -1937_in_Chile 2015-08-01 24 -2000_in_England 2016-01-01 57 -A_Deepness_In_The_Sky 2017-08-01 2 -Area_code_678 2015-07-01 480 -Avalon_Hill 2017-01-01 880 -Anna,_Duchess_of_Prussia 2015-12-01 315 -Alexandr_Syman 2017-04-01 24 -7400_series_logic 2017-11-01 2 -Greenleaf_Township,_Minnesota 2017-01-01 1 -Acetylsal 2017-04-01 6 -Earth_and_Man_National_Museum 2016-11-01 43 -Affetside 2015-10-01 185 -1971_CFL_season 2015-08-01 202 -Beth_Bader 2016-11-01 21 -Enrolled_Nurse 2016-04-01 5 -Al-Azraq 2016-12-01 22 -4th_South_Carolina_Regiment 2015-07-01 42 -Amanda_Overmyer 2017-02-01 356 -Auto_wrap 2016-02-01 8 -Anonymous_internet_banking 2015-07-01 98 -Curatoria 2016-11-01 3 -A-roll 2016-05-01 134 -Accra_hearts_of_oak_sc 2017-10-01 4 -Apostasy_from_Judaism 2015-12-01 45 -Acantharctia_tenebrosa 2018-01-01 3 -Abigail_Keasey_Frankel 2017-11-01 25 -2008_Paraguayan_general_election 2016-01-01 1 -Adams_motor 2015-09-01 37 -Drummond_Community_High_School 2017-01-01 17 -Andrews_Nakahara 2017-10-01 474 -10th_Maccabiah 2017-04-01 30 -Ackerman,_Rick 2015-08-01 4 -Dumri,_Buxar 2016-11-01 35 -Asking_Jesus_into_your_heart 2016-09-01 1 -Adamowicz_brothers 2016-12-01 161 -Alien_Musibat 2017-12-01 2 -Ahmad_Al_Tayer 2016-04-01 39 -Analytical_phonics 2016-01-01 520 -Do_It_Good 2016-04-01 281 -2004_Kumbakonam_School_fire 2017-12-01 2114 -1977_Chattanooga_Mocs_football_team 2016-08-01 3 -Globe_valves 2017-01-01 11 -Abelmoschus_crinitus 2016-04-01 18 -1874_Yale_Bulldogs_football_team 2016-02-01 37 -Climer 2017-06-01 1 -Auchroisk 2017-06-01 37 -2010_Albirex_Niigata_season 2016-10-01 19 -Adhocracy 2017-06-01 2217 -Chios_Massacre 2015-07-01 1110 -African_Red_Slip 2017-02-01 221 -1976_Portland_Timbers_season 2016-07-01 41 -Alsace-Larraine 2015-09-01 2 -3750_Ilizarov 2017-07-01 12 -Aleksandr_Shkaev 2017-05-01 1 -32_bar_form 2016-01-01 12 -Aequatorium_jamesonii 2018-03-01 14 -Abade_neiva 2016-09-01 2 -Arakvaz 2016-08-01 23 -207_Sqn 2017-10-01 2 -Ducal_hat 2016-11-01 10 -2_Degrees 2017-03-01 19 -Ahmeddiyya_Islam 2016-03-01 4 -Amidi-ye_Kohneh 2017-11-01 13 -Contributions_to_Indian_Sociology 2016-11-01 42 -Clark_Leiblee 2016-04-01 5 -Abraham_of_Strathearn 2017-09-01 14 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/time.parquet b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/assets/time.parquet deleted file mode 100644 index 37402b87f1a2adb1b432dde3ffa88fb453748f35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 663 zcmWG=3^EjD5e*Qv@e$<`Wf5Xv&|qNTWn^IF#J`>x)EXJkAA+~^71el(b3KEM7OH)g9 tlM4zI%2JDpGxPHl4E0R)40VJ390P(q{6i#lWEdELh(Uyrfgu2xBmsN;T|WQ- diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md deleted file mode 100644 index 306c611527a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -sidebar_label: 'バイナリとネイティブ' -slug: '/integrations/data-formats/binary-native' -title: 'ClickHouse でのネイティブおよびバイナリ形式の使用' -description: 'ClickHouse でのネイティブおよびバイナリ形式の使用方法について説明したページ' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - -# ClickHouseでのネイティブおよびバイナリ形式の使用 - -ClickHouseは複数のバイナリ形式をサポートしており、これによりパフォーマンスとスペース効率が向上します。バイナリ形式は、データがバイナリ形式で保存されるため、文字エンコーディングにおいても安全です。 - -デモ用に、some_data [テーブル](assets/some_data.sql)と[data](assets/some_data.tsv)を使用しますので、あなたのClickHouseインスタンスで再現してみてください。 - -## ネイティブClickHouse形式でのエクスポート {#exporting-in-a-native-clickhouse-format} - -ClickHouseノード間でデータをエクスポートおよびインポートするのに最も効率的なデータ形式は[Native](/interfaces/formats.md/#native)形式です。エクスポートは`INTO OUTFILE`句を使用して実行します: - -```sql -SELECT * FROM some_data -INTO OUTFILE 'data.clickhouse' FORMAT Native -``` - -これにより、ネイティブ形式の[data.clickhouse](assets/data.clickhouse)ファイルが作成されます。 - -### ネイティブ形式からのインポート {#importing-from-a-native-format} - -データをインポートするには、[file()](/sql-reference/table-functions/file.md)を使用して小さなファイルや探索目的の場合の操作を行います: - -```sql -DESCRIBE file('data.clickhouse', Native); -``` -```response -┌─name──┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ path │ String │ │ │ │ │ │ -│ month │ Date │ │ │ │ │ │ -│ hits │ UInt32 │ │ │ │ │ │ -└───────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -:::tip -`file()`関数を使用する場合、ClickHouse Cloudではファイルが存在するマシン上で`clickhouse client`のコマンドを実行する必要があります。もう1つのオプションは、[`clickhouse-local`](/operations/utilities/clickhouse-local.md)を使用してローカルでファイルを探索することです。 -::: - -プロダクション環境では、`FROM INFILE`を使用してデータをインポートします: - -```sql -INSERT INTO sometable -FROM INFILE 'data.clickhouse' -FORMAT Native -``` - -### ネイティブ形式の圧縮 {#native-format-compression} - -データをネイティブ形式にエクスポートする際に圧縮を有効にすることもできます(ほとんどの他の形式と同様)し、`COMPRESSION`句を使用します: - -```sql -SELECT * FROM some_data -INTO OUTFILE 'data.clickhouse' -COMPRESSION 'lz4' -FORMAT Native -``` - -エクスポート時にLZ4圧縮を使用しました。データをインポートする際にも指定する必要があります: - -```sql -INSERT INTO sometable -FROM INFILE 'data.clickhouse' -COMPRESSION 'lz4' -FORMAT Native -``` - -## RowBinary形式へのエクスポート {#exporting-to-rowbinary} - -もう一つのサポートされているバイナリ形式は[RowBinary](/interfaces/formats.md/#rowbinary)で、バイナリで表現された行でデータをインポートおよびエクスポートできます: - -```sql -SELECT * FROM some_data -INTO OUTFILE 'data.binary' FORMAT RowBinary -``` - -これにより、[data.binary](assets/data.binary)ファイルがバイナリ行形式で生成されます。 - -### RowBinaryファイルの探索 {#exploring-rowbinary-files} -この形式では自動スキーマ推論はサポートされていないため、ロードする前にスキーマを明示的に定義する必要があります: - -```sql -SELECT * -FROM file('data.binary', RowBinary, 'path String, month Date, hits UInt32') -LIMIT 5 -``` -```response -┌─path───────────────────────────┬──────month─┬─hits─┐ -│ Bangor_City_Forest │ 2015-07-01 │ 34 │ -│ Alireza_Afzal │ 2017-02-01 │ 24 │ -│ Akhaura-Laksam-Chittagong_Line │ 2015-09-01 │ 30 │ -│ 1973_National_500 │ 2017-10-01 │ 80 │ -│ Attachment │ 2017-09-01 │ 1356 │ -└────────────────────────────────┴────────────┴──────┘ -``` - -[RowBinaryWithNames](/interfaces/formats.md/#rowbinarywithnames)の使用を検討してください。これはカラムリストのヘッダー行も追加します。[RowBinaryWithNamesAndTypes](/interfaces/formats.md/#rowbinarywithnamesandtypes)はカラム型を含む追加のヘッダー行も追加します。 - -### RowBinaryファイルからのインポート {#importing-from-rowbinary-files} -RowBinaryファイルからデータをロードするには、`FROM INFILE`句を使用します: - -```sql -INSERT INTO sometable -FROM INFILE 'data.binary' -FORMAT RowBinary -``` - -## RawBLOBを使用した単一バイナリ値のインポート {#importing-single-binary-value-using-rawblob} - -ファイル全体を読み込み、テーブルのフィールドに保存したいとしましょう。 -この場合、[RawBLOB形式](/interfaces/formats.md/#rawblob)を使用できます。この形式は単一カラムのテーブルとのみ直接使用できます: - -```sql -CREATE TABLE images(data String) Engine = Memory -``` - -ここでは、`images`テーブルに画像ファイルを保存します: - -```bash -cat image.jpg | clickhouse-client -q "INSERT INTO images FORMAT RawBLOB" -``` - -`data`フィールドの長さをチェックすると、元のファイルサイズと等しくなります: - -```sql -SELECT length(data) FROM images -``` -```response -┌─length(data)─┐ -│ 6121 │ -└──────────────┘ -``` - -### RawBLOBデータのエクスポート {#exporting-rawblob-data} - -この形式は、`INTO OUTFILE`句を使用してデータをエクスポートするためにも使用できます: - -```sql -SELECT * FROM images LIMIT 1 -INTO OUTFILE 'out.jpg' -FORMAT RawBLOB -``` - -1つの値以上をエクスポートするとファイルが破損するため、`LIMIT 1`を使用する必要があることに注意してください。 - -## MessagePack {#messagepack} - -ClickHouseは、[MessagePack](https://msgpack.org/)へのインポートおよびエクスポートを、[MsgPack](/interfaces/formats.md/#msgpack)を使用してサポートしています。MessagePack形式へエクスポートするには: - -```sql -SELECT * -FROM some_data -INTO OUTFILE 'data.msgpk' -FORMAT MsgPack -``` - -[MessagePackファイル](assets/data.msgpk)からデータをインポートするには: - -```sql -INSERT INTO sometable -FROM INFILE 'data.msgpk' -FORMAT MsgPack -``` - -## プロトコルバッファ {#protocol-buffers} - - - -[Protocol Buffers](/interfaces/formats.md/#protobuf)を使用するには、最初に[スキーマファイル](assets/schema.proto)を定義する必要があります: - -```protobuf -syntax = "proto3"; - -message MessageType { - string path = 1; - date month = 2; - uint32 hits = 3; -}; -``` - -このスキーマファイルへのパス(この場合`schema.proto`)は、[Protobuf](/interfaces/formats.md/#protobuf)形式の`format_schema`設定オプションに設定します: - -```sql -SELECT * FROM some_data -INTO OUTFILE 'proto.bin' -FORMAT Protobuf -SETTINGS format_schema = 'schema:MessageType' -``` - -これにより、[proto.bin](assets/proto.bin)ファイルにデータが保存されます。ClickHouseは、Protobufデータのインポートとネストされたメッセージもサポートしています。単一のProtocol Bufferメッセージで作業するには、[ProtobufSingle](/interfaces/formats.md/#protobufsingle)を使用してください(この場合、長さ区切り子は省略されます)。 - -## Cap'n Proto {#capn-proto} - - - -ClickHouseがサポートするもう一つの人気のバイナリシリアル化形式は[Cap'n Proto](https://capnproto.org/)です。`Protobuf`形式と同様に、私たちの例ではスキーマファイル([`schema.capnp`](assets/schema.capnp))を定義する必要があります: - -```response -@0xec8ff1a10aa10dbe; - -struct PathStats { - path @0 :Text; - month @1 :UInt32; - hits @2 :UInt32; -} -``` - -このスキーマを使用して、[CapnProto](/interfaces/formats.md/#capnproto)形式でデータをインポートおよびエクスポートできます: - -```sql -SELECT - path, - CAST(month, 'UInt32') AS month, - hits -FROM some_data -INTO OUTFILE 'capnp.bin' -FORMAT CapnProto -SETTINGS format_schema = 'schema:PathStats' -``` - -`Date`カラムを`UInt32`にキャストする必要があったことに注意してください。これは[対応する型の一致](/interfaces/formats/CapnProto#data_types-matching-capnproto)が必要だからです。 - -## その他の形式 {#other-formats} - -ClickHouseは、さまざまなシナリオやプラットフォームに対応するために、テキスト形式とバイナリ形式の両方をサポートします。さまざまな形式やそれらとの作業方法については、以下の記事を参照してください: - -- [CSVおよびTSV形式](csv-tsv.md) -- [Parquet](parquet.md) -- [JSON形式](/integrations/data-ingestion/data-formats/json/intro.md) -- [正規表現とテンプレート](templates-regex.md) -- **ネイティブおよびバイナリ形式** -- [SQL形式](sql.md) - -また、[clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)もチェックしてください。これは、ClickHouseサーバーを起動せずにローカルやリモートのファイルで作業するための、持ち運び可能なフル機能ツールです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md.hash deleted file mode 100644 index 4f97e0b4e9c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/binary.md.hash +++ /dev/null @@ -1 +0,0 @@ -17788adf6ed3b52c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md deleted file mode 100644 index c2896c076c5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -sidebar_label: 'CSV and TSV' -slug: '/integrations/data-formats/csv-tsv' -title: 'Working with CSV and TSV data in ClickHouse' -description: 'Page describing how to work with CSV and TSV data in ClickHouse' ---- - - - - -# ClickHouseにおけるCSVおよびTSVデータの操作 - -ClickHouseはCSVからのデータのインポートとCSVへのデータのエクスポートをサポートしています。CSVファイルはヘッダ行、カスタム区切り文字、エスケープ記号など、異なるフォーマットの仕様で提供されることがあるため、ClickHouseでは各ケースに効率的に対処するためのフォーマットと設定が用意されています。 - -## CSVファイルからのデータのインポート {#importing-data-from-a-csv-file} - -データをインポートする前に、関連する構造のテーブルを作成しましょう: - -```sql -CREATE TABLE sometable -( - `path` String, - `month` Date, - `hits` UInt32 -) -ENGINE = MergeTree -ORDER BY tuple(month, path) -``` - -[CSVファイル](assets/data_small.csv)から`sometable`テーブルにデータをインポートするには、ファイルを直接clickhouse-clientにパイプします: - -```bash -clickhouse-client -q "INSERT INTO sometable FORMAT CSV" < data_small.csv -``` - -ここでは、ClickHouseにCSV形式のデータを取り込んでいることを通知するために[FORMAT CSV](/interfaces/formats.md/#csv)を使用しています。あるいは、[FROM INFILE](/sql-reference/statements/insert-into.md/#inserting-data-from-a-file)句を使ってローカルファイルからデータをロードすることもできます: - -```sql -INSERT INTO sometable -FROM INFILE 'data_small.csv' -FORMAT CSV -``` - -ここでは、ClickHouseがファイル形式を理解できるように`FORMAT CSV`句を使用しています。また、[url()](/sql-reference/table-functions/url.md)関数を使用してURLから直接データをロードしたり、[s3()](/sql-reference/table-functions/s3.md)関数を使用してS3ファイルからデータをロードすることも可能です。 - -:::tip -`file()`および`INFILE`/`OUTFILE`に対しては明示的なフォーマット設定をスキップできます。 -その場合、ClickHouseは自動的にファイル拡張子に基づいてフォーマットを検出します。 -::: - -### ヘッダ付きのCSVファイル {#csv-files-with-headers} - -私たちの[CSVファイルにヘッダがあると仮定しましょう](assets/data_small_headers.csv): - -```bash -head data-small-headers.csv -``` -```response -"path","month","hits" -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -``` - -このファイルからデータをインポートするには、[CSVWithNames](/interfaces/formats.md/#csvwithnames)フォーマットを使用します: - -```bash -clickhouse-client -q "INSERT INTO sometable FORMAT CSVWithNames" < data_small_headers.csv -``` - -この場合、ClickHouseはファイルからデータをインポートする際に最初の行をスキップします。 - -:::tip -23.1 [バージョン](https://github.com/ClickHouse/ClickHouse/releases)以降、ClickHouseは`CSV`タイプが使用されているときにCSVファイルのヘッダを自動的に検出するため、`CSVWithNames`や`CSVWithNamesAndTypes`を使用する必要はありません。 -::: - -### カスタム区切り文字のあるCSVファイル {#csv-files-with-custom-delimiters} - -CSVファイルがカンマ以外の区切り文字を使用している場合、[format_csv_delimiter](/operations/settings/settings-formats.md/#format_csv_delimiter)オプションを使用して関連する記号を設定することができます: - -```sql -SET format_csv_delimiter = ';' -``` - -これで、CSVファイルからインポートする際に、カンマの代わりに`;`記号が区切り文字として使用されるようになります。 - -### CSVファイル内の行のスキップ {#skipping-lines-in-a-csv-file} - -時には、CSVファイルからデータをインポートする際に特定の行数をスキップしたい場合があります。これは、[input_format_csv_skip_first_lines](/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines)オプションを使用して行うことができます: - -```sql -SET input_format_csv_skip_first_lines = 10 -``` - -この場合、CSVファイルの最初の10行をスキップします: - -```sql -SELECT count(*) FROM file('data-small.csv', CSV) -``` -```response -┌─count()─┐ -│ 990 │ -└─────────┘ -``` - -[ファイル](assets/data_small.csv)には1k行がありますが、最初の10行をスキップするように指定したため、ClickHouseは990行のみを読み込みました。 - -:::tip -`file()`関数を使用する場合、ClickHouse Cloudでは、ファイルが存在するマシンで`clickhouse client`のコマンドを実行する必要があります。別のオプションは、ローカルファイルを探索するために[`clickhouse-local`](/operations/utilities/clickhouse-local.md)を使用することです。 -::: - -### CSVファイル内のNULL値の扱い {#treating-null-values-in-csv-files} - -NULL値は、ファイルを生成したアプリケーションによって異なる方法でエンコードされることがあります。デフォルトでは、ClickHouseはCSV内のNULL値として`\N`を使用します。しかし、[format_csv_null_representation](/operations/settings/settings-formats.md/#format_tsv_null_representation)オプションを使用してこれを変更できます。 - -以下のCSVファイルを考えてみましょう: - -```bash -> cat nulls.csv -Donald,90 -Joe,Nothing -Nothing,70 -``` - -このファイルからデータをロードすると、ClickHouseは`Nothing`を文字列として扱います(これは正しいです): - -```sql -SELECT * FROM file('nulls.csv') -``` -```response -┌─c1──────┬─c2──────┐ -│ Donald │ 90 │ -│ Joe │ Nothing │ -│ Nothing │ 70 │ -└─────────┴─────────┘ -``` - -ClickHouseに`Nothing`を`NULL`として扱わせたい場合、次のオプションを定義できます: - -```sql -SET format_csv_null_representation = 'Nothing' -``` - -これで、期待される場所に`NULL`があります: - -```sql -SELECT * FROM file('nulls.csv') -``` -```response -┌─c1─────┬─c2───┐ -│ Donald │ 90 │ -│ Joe │ ᴺᵁᴸᴸ │ -│ ᴺᵁᴸᴸ │ 70 │ -└────────┴──────┘ -``` - -## TSV(タブ区切り)ファイル {#tsv-tab-separated-files} - -タブ区切りデータフォーマットは、データ交換フォーマットとして広く使用されています。[TSVファイル](assets/data_small.tsv)からClickHouseにデータをロードするには、[TabSeparated](/interfaces/formats.md/#tabseparated)フォーマットが使用されます: - -```bash -clickhouse-client -q "INSERT INTO sometable FORMAT TabSeparated" < data_small.tsv -``` - -ヘッダのあるTSVファイルを操作するための[TabSeparatedWithNames](/interfaces/formats.md/#tabseparatedwithnames)フォーマットもあります。また、CSVと同様に、[input_format_tsv_skip_first_lines](/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines)オプションを使用して最初のX行をスキップすることができます。 - -### 生TSV {#raw-tsv} - -時には、TSVファイルがタブや行の改行をエスケープせずに保存されていることがあります。そのようなファイルを扱うには[TabSeparatedRaw](/interfaces/formats.md/#tabseparatedraw)を使用します。 - -## CSVへのエクスポート {#exporting-to-csv} - -前の例に示した任意のフォーマットを使ってデータをエクスポートすることもできます。テーブル(またはクエリ)からCSV形式にデータをエクスポートするには、同じ`FORMAT`句を使用します: - -```sql -SELECT * -FROM sometable -LIMIT 5 -FORMAT CSV -``` -```response -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -``` - -CSVファイルにヘッダを追加するには、[CSVWithNames](/interfaces/formats.md/#csvwithnames)フォーマットを使用します: - -```sql -SELECT * -FROM sometable -LIMIT 5 -FORMAT CSVWithNames -``` -```response -"path","month","hits" -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -``` - -### エクスポートしたデータをCSVファイルに保存する {#saving-exported-data-to-a-csv-file} - -エクスポートしたデータをファイルに保存するには、[INTO...OUTFILE](/sql-reference/statements/select/into-outfile.md)句を使用します: - -```sql -SELECT * -FROM sometable -INTO OUTFILE 'out.csv' -FORMAT CSVWithNames -``` -```response -36838935 rows in set. Elapsed: 1.304 sec. Processed 36.84 million rows, 1.42 GB (28.24 million rows/s., 1.09 GB/s.) -``` - -ClickHouseが36m行をCSVファイルに保存するのに**約1**秒かかったことに注意してください。 - -### カスタム区切り文字でのCSVエクスポート {#exporting-csv-with-custom-delimiters} - -カンマ以外の区切り文字を使用したい場合は、[format_csv_delimiter](/operations/settings/settings-formats.md/#format_csv_delimiter)設定オプションを使用します: - -```sql -SET format_csv_delimiter = '|' -``` - -これでClickHouseはCSV形式の区切り文字として`|`を使用します: - -```sql -SELECT * -FROM sometable -LIMIT 5 -FORMAT CSV -``` -```response -"Akiba_Hebrew_Academy"|"2017-08-01"|241 -"Aegithina_tiphia"|"2018-02-01"|34 -"1971-72_Utah_Stars_season"|"2016-10-01"|1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8"|"2015-12-01"|73 -"2016_Greater_Western_Sydney_Giants_season"|"2017-05-01"|86 -``` - -### Windows向けのCSVエクスポート {#exporting-csv-for-windows} - -Windows環境でCSVファイルを正しく動作させるには、[output_format_csv_crlf_end_of_line](/operations/settings/settings-formats.md/#output_format_csv_crlf_end_of_line)オプションを有効にする必要があります。これにより、行の改行として`\n`の代わりに`\r\n`が使用されます: - -```sql -SET output_format_csv_crlf_end_of_line = 1; -``` - -## CSVファイルのスキーマ推測 {#schema-inference-for-csv-files} - -不明なCSVファイルを扱う場合が多いため、カラムに使用するタイプを調べる必要があります。ClickHouseはデフォルトで、与えられたCSVファイルの分析に基づいてデータフォーマットを推測しようとします。これを「スキーマ推測」と呼びます。検出されたデータ型は、`DESCRIBE`ステートメントを`[file()](/sql-reference/table-functions/file.md)`関数と組み合わせて調べることができます: - -```sql -DESCRIBE file('data-small.csv', CSV) -``` -```response -┌─name─┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ c1 │ Nullable(String) │ │ │ │ │ │ -│ c2 │ Nullable(Date) │ │ │ │ │ │ -│ c3 │ Nullable(Int64) │ │ │ │ │ │ -└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -ここで、ClickHouseはCSVファイルのカラムタイプを効率的に推測しました。ClickHouseに推測させたくない場合は、次のオプションでこれを無効にできます: - -```sql -SET input_format_csv_use_best_effort_in_schema_inference = 0 -``` - -この場合、すべてのカラムタイプは`String`として扱われます。 - -### 明示的なカラムタイプを使用したCSVのエクスポートとインポート {#exporting-and-importing-csv-with-explicit-column-types} - -ClickHouseは、データをエクスポートする際にカラムタイプを明示的に設定することも許可しています。[CSVWithNamesAndTypes](/interfaces/formats.md/#csvwithnamesandtypes)(および他の*WithNames形式ファミリー)を使用します: - -```sql -SELECT * -FROM sometable -LIMIT 5 -FORMAT CSVWithNamesAndTypes -``` -```response -"path","month","hits" -"String","Date","UInt32" -"Akiba_Hebrew_Academy","2017-08-01",241 -"Aegithina_tiphia","2018-02-01",34 -"1971-72_Utah_Stars_season","2016-10-01",1 -"2015_UEFA_European_Under-21_Championship_qualification_Group_8","2015-12-01",73 -"2016_Greater_Western_Sydney_Giants_season","2017-05-01",86 -``` - -このフォーマットには2つのヘッダ行が含まれます。一つはカラム名で、もう一つはカラムタイプです。これにより、ClickHouse(および他のアプリケーション)は[そのようなファイル](assets/data_csv_types.csv)からデータを読み込む際にカラムタイプを識別できます: - -```sql -DESCRIBE file('data_csv_types.csv', CSVWithNamesAndTypes) -``` -```response -┌─name──┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ path │ String │ │ │ │ │ │ -│ month │ Date │ │ │ │ │ │ -│ hits │ UInt32 │ │ │ │ │ │ -└───────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -これでClickHouseは、推測するのではなく第(2)ヘッダ行に基づいてカラムタイプを識別します。 - -## カスタム区切り文字、セパレーター、およびエスケープルール {#custom-delimiters-separators-and-escaping-rules} - -複雑なケースでは、テキストデータが非常にカスタムな方法でフォーマットされている場合でも、構造を持つことがあります。ClickHouseには、そのような場合のために特別な[CustomSeparated](/interfaces/formats.md/#format-customseparated)フォーマットがあり、カスタムエスケープルール、区切り文字、行セパレーター、開始/終了シンボルを設定できます。 - -以下のデータがファイルにあるとします: - -```text -row('Akiba_Hebrew_Academy';'2017-08-01';241),row('Aegithina_tiphia';'2018-02-01';34),... -``` - -各行が`row()`でラップされており、行は`,`で区切られ、個々の値は`;`で区切られていることがわかります。この場合、次の設定を使用してこのファイルからデータを読み取ることができます: - -```sql -SET format_custom_row_before_delimiter = 'row('; -SET format_custom_row_after_delimiter = ')'; -SET format_custom_field_delimiter = ';'; -SET format_custom_row_between_delimiter = ','; -SET format_custom_escaping_rule = 'Quoted'; -``` - -これで、カスタムフォーマットの[ファイル](assets/data_small_custom.txt)からデータをロードできます: - -```sql -SELECT * -FROM file('data_small_custom.txt', CustomSeparated) -LIMIT 3 -``` -```response -┌─c1────────────────────────┬─────────c2─┬──c3─┐ -│ Akiba_Hebrew_Academy │ 2017-08-01 │ 241 │ -│ Aegithina_tiphia │ 2018-02-01 │ 34 │ -│ 1971-72_Utah_Stars_season │ 2016-10-01 │ 1 │ -└───────────────────────────┴────────────┴─────┘ -``` - -[CustomSeparatedWithNames](/interfaces/formats.md/#customseparatedwithnames)を使用して、ヘッダを正しくエクスポートおよびインポートすることもできます。さらに複雑なケースには[regexおよびテンプレート](templates-regex.md)フォーマットを探索してください。 - -## 大きなCSVファイルの操作 {#working-with-large-csv-files} - -CSVファイルは大きくなることがあり、ClickHouseは任意のサイズのファイルで効率的に動作します。大きなファイルは通常圧縮されて提供され、ClickHouseは処理前に圧縮を解く必要がありません。挿入時に`COMPRESSION`句を使用できます: - -```sql -INSERT INTO sometable -FROM INFILE 'data_csv.csv.gz' -COMPRESSION 'gzip' FORMAT CSV -``` - -`COMPRESSION`句を省略した場合、ClickHouseは拡張子に基づいてファイルの圧縮を推測しようとします。同様のアプローチを使用して、直接圧縮されたフォーマットでファイルをエクスポートすることができます: - -```sql -SELECT * -FROM for_csv -INTO OUTFILE 'data_csv.csv.gz' -COMPRESSION 'gzip' FORMAT CSV -``` - -これにより、圧縮された`data_csv.csv.gz`ファイルが作成されます。 - -## その他のフォーマット {#other-formats} - -ClickHouseは、さまざまなシナリオやプラットフォームをカバーするために多くのフォーマット(テキストとバイナリの両方)をサポートしています。以下の記事でさらに多くのフォーマットやそれらとの作業方法を探ってみてください: - -- **CSVおよびTSVフォーマット** -- [Parquet](parquet.md) -- [JSONフォーマット](/integrations/data-ingestion/data-formats/json/intro.md) -- [Regexおよびテンプレート](templates-regex.md) -- [ネイティブおよびバイナリフォーマット](binary.md) -- [SQLフォーマット](sql.md) - -また、[clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)を確認してください。Clickhouseサーバーを必要とせずに、ローカル/リモートファイルで作業するためのポータブルでフル機能のツールです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md.hash deleted file mode 100644 index 0c9d2454e8e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/csv-tsv.md.hash +++ /dev/null @@ -1 +0,0 @@ -3242afae8d353526 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md deleted file mode 100644 index 505110261f1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -slug: '/integrations/data-formats' -sidebar_label: '概要' -sidebar_position: 1 -keywords: -- 'clickhouse' -- 'CSV' -- 'TSV' -- 'Parquet' -- 'clickhouse-client' -- 'clickhouse-local' -title: 'ClickHouseへのさまざまなデータ形式からのインポート' -description: 'さまざまなデータ形式をClickHouseにインポートする方法について説明するページ' ---- - - - - -# ClickHouseへのさまざまなデータ形式からのインポート - -このドキュメントのセクションでは、さまざまなファイルタイプからのロードの例を見つけることができます。 - -### [**バイナリ**](/integrations/data-ingestion/data-formats/binary.md) {#binary} - -ClickHouse Native、MessagePack、Protocol Buffers、Cap'n Protoなどのバイナリ形式をエクスポートおよびロードします。 - -### [**CSVおよびTSV**](/integrations/data-ingestion/data-formats/csv-tsv.md) {#csv-and-tsv} - -カスタムヘッダーと区切り文字を使用して、TSVを含むCSVファミリーをインポートおよびエクスポートします。 - -### [**JSON**](/integrations/data-ingestion/data-formats/json/intro.md) {#json} - -オブジェクトとしておよび行区切りのNDJSONとしてさまざまな形式のJSONをロードおよびエクスポートします。 - -### [**Parquetデータ**](/integrations/data-ingestion/data-formats/parquet.md) {#parquet-data} - -ParquetやArrowなどの一般的なApache形式を扱います。 - -### [**SQLデータ**](/integrations/data-ingestion/data-formats/sql.md) {#sql-data} - -MySQLやPostgresqlにインポートするためのSQLダンプが必要ですか?他を探す必要はありません。 - -Grafana、TableauなどのBIツールを接続したい場合は、ドキュメントの[可視化カテゴリ](../../data-visualization/index.md)をチェックしてください。 - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouseにおけるデータ形式の紹介](https://clickhouse.com/blog/data-formats-clickhouse-csv-tsv-parquet-native) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md.hash deleted file mode 100644 index 31bc0d7996e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/intro.md.hash +++ /dev/null @@ -1 +0,0 @@ -871c8848307dc474 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md deleted file mode 100644 index 0b6cc7db1dd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'JSONのエクスポート' -slug: '/integrations/data-formats/json/exporting' -description: 'ClickHouseからJSONデータをエクスポートする' -keywords: -- 'json' -- 'clickhouse' -- 'formats' -- 'exporting' ---- - - - - -# JSONのエクスポート - -インポートに使用されるほぼすべてのJSON形式は、エクスポートにも使用できます。最も一般的なのは[`JSONEachRow`](/interfaces/formats.md/#jsoneachrow)です: - -```sql -SELECT * FROM sometable FORMAT JSONEachRow -``` -```response -{"path":"Bob_Dolman","month":"2016-11-01","hits":245} -{"path":"1-krona","month":"2017-01-01","hits":4} -{"path":"Ahmadabad-e_Kalij-e_Sofla","month":"2017-01-01","hits":3} -``` - -または、[`JSONCompactEachRow`](/interfaces/formats#jsoncompacteachrow)を使用して、カラム名をスキップすることでディスクスペースを節約できます: - -```sql -SELECT * FROM sometable FORMAT JSONCompactEachRow -``` -```response -["Bob_Dolman", "2016-11-01", 245] -["1-krona", "2017-01-01", 4] -["Ahmadabad-e_Kalij-e_Sofla", "2017-01-01", 3] -``` - -## 文字列としてのデータ型のオーバーライド {#overriding-data-types-as-strings} - -ClickHouseはデータ型を尊重し、基準に従ってJSONをエクスポートします。しかし、すべての値を文字列としてエンコードする必要がある場合は、[JSONStringsEachRow](/interfaces/formats.md/#jsonstringseachrow)形式を使用できます: - -```sql -SELECT * FROM sometable FORMAT JSONStringsEachRow -``` -```response -{"path":"Bob_Dolman","month":"2016-11-01","hits":"245"} -{"path":"1-krona","month":"2017-01-01","hits":"4"} -{"path":"Ahmadabad-e_Kalij-e_Sofla","month":"2017-01-01","hits":"3"} -``` - -これで、`hits`の数値カラムが文字列としてエンコードされます。文字列としてのエクスポートはすべてのJSON*形式でサポートされており、`JSONStrings\*`および`JSONCompactStrings\*`形式を探ることができます: - -```sql -SELECT * FROM sometable FORMAT JSONCompactStringsEachRow -``` -```response -["Bob_Dolman", "2016-11-01", "245"] -["1-krona", "2017-01-01", "4"] -["Ahmadabad-e_Kalij-e_Sofla", "2017-01-01", "3"] -``` - -## データと共にメタデータをエクスポート {#exporting-metadata-together-with-data} - -一般的な[JSON](/interfaces/formats.md/#json)形式は、アプリで人気があり、結果データだけでなくカラムの型やクエリの統計もエクスポートします: - -```sql -SELECT * FROM sometable FORMAT JSON -``` -```response -{ - "meta": - [ - { - "name": "path", - "type": "String" - }, - ... - ], - - "data": - [ - { - "path": "Bob_Dolman", - "month": "2016-11-01", - "hits": 245 - }, - ... - ], - - "rows": 3, - - "statistics": - { - "elapsed": 0.000497457, - "rows_read": 3, - "bytes_read": 87 - } -} -``` - -[JSONCompact](/interfaces/formats.md/#jsoncompact)形式は、同じメタデータを印刷しますが、データ自体はコンパクトな形式を使用します: - -```sql -SELECT * FROM sometable FORMAT JSONCompact -``` -```response -{ - "meta": - [ - { - "name": "path", - "type": "String" - }, - ... - ], - - "data": - [ - ["Bob_Dolman", "2016-11-01", 245], - ["1-krona", "2017-01-01", 4], - ["Ahmadabad-e_Kalij-e_Sofla", "2017-01-01", 3] - ], - - "rows": 3, - - "statistics": - { - "elapsed": 0.00074981, - "rows_read": 3, - "bytes_read": 87 - } -} -``` - -すべての値を文字列としてエンコードするために、[`JSONStrings`](/interfaces/formats.md/#jsonstrings)や[`JSONCompactStrings`](/interfaces/formats.md/#jsoncompactstrings)のバリアントを考慮してください。 - -## JSONデータと構造をコンパクトにエクスポートする方法 {#compact-way-to-export-json-data-and-structure} - -データとその構造の両方を効率的に持つ方法は、[`JSONCompactEachRowWithNamesAndTypes`](/interfaces/formats.md/#jsoncompacteachrowwithnamesandtypes)形式を使用することです: - -```sql -SELECT * FROM sometable FORMAT JSONCompactEachRowWithNamesAndTypes -``` -```response -["path", "month", "hits"] -["String", "Date", "UInt32"] -["Bob_Dolman", "2016-11-01", 245] -["1-krona", "2017-01-01", 4] -["Ahmadabad-e_Kalij-e_Sofla", "2017-01-01", 3] -``` - -これにより、カラム名と型のヘッダー行2つが先頭にあるコンパクトなJSON形式が使用されます。この形式は、別のClickHouseインスタンス(または他のアプリ)にデータを取り込むために使用できます。 - -## JSONをファイルにエクスポートする {#exporting-json-to-a-file} - -エクスポートされたJSONデータをファイルに保存するには、[INTO OUTFILE](/sql-reference/statements/select/into-outfile.md)句を使用できます: - -```sql -SELECT * FROM sometable INTO OUTFILE 'out.json' FORMAT JSONEachRow -``` -```response -36838935 rows in set. Elapsed: 2.220 sec. Processed 36.84 million rows, 1.27 GB (16.60 million rows/s., 572.47 MB/s.) -``` - -ClickHouseはわずか2秒でほぼ3700万のレコードをJSONファイルにエクスポートしました。また、`COMPRESSION`句を使用して、オンザフライで圧縮を有効にしてエクスポートすることもできます: - -```sql -SELECT * FROM sometable INTO OUTFILE 'out.json.gz' FORMAT JSONEachRow -``` -```response -36838935 rows in set. Elapsed: 22.680 sec. Processed 36.84 million rows, 1.27 GB (1.62 million rows/s., 56.02 MB/s.) -``` - -それを達成するのに時間がかかりますが、はるかに小さな圧縮ファイルが生成されます: - -```bash -2.2G out.json -576M out.json.gz -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md.hash deleted file mode 100644 index 8bb667ebdf7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/exporting.md.hash +++ /dev/null @@ -1 +0,0 @@ -2fb664d5f5dc0517 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md deleted file mode 100644 index 1cca19b3036..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md +++ /dev/null @@ -1,462 +0,0 @@ ---- -title: '他のJSON形式の取り扱い' -slug: '/integrations/data-formats/json/other-formats' -description: '他のJSON形式の取り扱い' -sidebar_label: '他の形式の取り扱い' -keywords: -- 'json' -- 'formats' -- 'json formats' ---- - - - - - -# その他のJSONフォーマットの取り扱い - -以前のJSONデータの読み込みの例では、[`JSONEachRow`](/interfaces/formats/JSONEachRow) (`NDJSON`) の使用を前提としています。このフォーマットは、各JSON行のキーをカラムとして読み込みます。例えば: - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz', JSONEachRow) -LIMIT 5 - -┌───────date─┬─country_code─┬─project────────────┬─type────────┬─installer────┬─python_minor─┬─system─┬─version─┐ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -└────────────┴──────────────┴────────────────────┴─────────────┴──────────────┴──────────────┴────────┴─────────┘ - -5 rows in set. Elapsed: 0.449 sec. -``` - -このフォーマットは一般的にJSONで最もよく使用される形式ですが、ユーザーは他の形式に出会ったり、JSONを単一オブジェクトとして読み取る必要があります。 - -以下に、他の一般的な形式でのJSONの読み込みとロードの例を示します。 - -## JSONをオブジェクトとして読み込む {#reading-json-as-an-object} - -これまでの例は、`JSONEachRow` が改行区切りのJSONを読み込み、各行がテーブルの行にマッピングされ、各キーがカラムに対応する方法を示しています。これは、JSONが予測可能で各カラムのタイプが単一である場合に理想的です。 - -対照的に、`JSONAsObject` は各行を単一の `JSON` オブジェクトとして扱い、それを [`JSON`](/sql-reference/data-types/newjson) 型の単一カラムに保存します。これにより、ネストされたJSONペイロードや、キーが動的で潜在的に複数のタイプを持つ場合により適しています。 - -`JSONEachRow` を行単位の挿入用として使用し、柔軟または動的なJSONデータを格納する際には [`JSONAsObject`](/interfaces/formats/JSONAsObject) を使用してください。 - -前述の例と対照的に、以下のクエリでは同じデータを1行のJSONオブジェクトとして読み取ります: - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz', JSONAsObject) -LIMIT 5 - -┌─json─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -5 rows in set. Elapsed: 0.338 sec. -``` - -`JSONAsObject` フォーマットは、単一のJSONオブジェクトカラムを使用してテーブルに行を挿入するのに便利です。例: - -```sql -CREATE TABLE pypi -( - `json` JSON -) -ENGINE = MergeTree -ORDER BY tuple(); - -INSERT INTO pypi SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz', JSONAsObject) -LIMIT 5; - -SELECT * -FROM pypi -LIMIT 2; - -┌─json─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -│ {"country_code":"CN","date":"2022-11-15","installer":"bandersnatch","project":"clickhouse-connect","python_minor":"","system":"","type":"bdist_wheel","version":"0.2.8"} │ -└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -2 rows in set. Elapsed: 0.003 sec. -``` - -`JSONAsObject` フォーマットは、オブジェクトの構造が不一致な場合の改行区切りJSONを読み取るのにも役立ちます。例えば、キーが行ごとに型が変わる(時には文字列、時にはオブジェクトになる)場合です。そのような場合、ClickHouseは `JSONEachRow` を使用して安定したスキーマを推測できず、`JSONAsObject` により厳密な型制約なしでデータを取り込むことができ、各JSON行を単一のカラムに全体として保存します。以下の例では `JSONEachRow` が失敗することに注意してください: - -```sql -SELECT count() -FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/bluesky/file_0001.json.gz', 'JSONEachRow') - -Elapsed: 1.198 sec. - -Received exception from server (version 24.12.1): -Code: 636. DB::Exception: Received from sql-clickhouse.clickhouse.com:9440. DB::Exception: The table structure cannot be extracted from a JSONEachRow format file. Error: -Code: 117. DB::Exception: JSON objects have ambiguous data: in some objects path 'record.subject' has type 'String' and in some - 'Tuple(`$type` String, cid String, uri String)'. You can enable setting input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects to use String type for path 'record.subject'. (INCORRECT_DATA) (version 24.12.1.18239 (official build)) -To increase the maximum number of rows/bytes to read for structure determination, use setting input_format_max_rows_to_read_for_schema_inference/input_format_max_bytes_to_read_for_schema_inference. -You can specify the structure manually: (in file/uri bluesky/file_0001.json.gz). (CANNOT_EXTRACT_TABLE_STRUCTURE) -``` - -逆に、`JSONAsObject` はこの場合に使用でき、`JSON` 型は同じサブカラムに対して複数の型をサポートします。 - -```sql -SELECT count() -FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/bluesky/file_0001.json.gz', 'JSONAsObject') - -┌─count()─┐ -│ 1000000 │ -└─────────┘ - -1 row in set. Elapsed: 0.480 sec. Processed 1.00 million rows, 256.00 B (2.08 million rows/s., 533.76 B/s.) -``` - -## JSONオブジェクトの配列 {#array-of-json-objects} - -JSONデータの最も一般的な形式の一つは、JSON配列内にJSONオブジェクトのリストを持つことです。この例を見てみましょう。[この例](../assets/list.json): - -```bash -> cat list.json -[ - { - "path": "Akiba_Hebrew_Academy", - "month": "2017-08-01", - "hits": 241 - }, - { - "path": "Aegithina_tiphia", - "month": "2018-02-01", - "hits": 34 - }, - ... -] -``` - -このようなデータのためのテーブルを作成しましょう: - -```sql -CREATE TABLE sometable -( - `path` String, - `month` Date, - `hits` UInt32 -) -ENGINE = MergeTree -ORDER BY tuple(month, path) -``` - -JSONオブジェクトのリストをインポートするには、[`JSONEachRow`](/interfaces/formats.md/#jsoneachrow) フォーマットを使用します([list.json](../assets/list.json) ファイルからデータを挿入します): - -```sql -INSERT INTO sometable -FROM INFILE 'list.json' -FORMAT JSONEachRow -``` - -ローカルファイルからデータをロードするために [FROM INFILE](/sql-reference/statements/insert-into.md/#inserting-data-from-a-file) 節を使用しました。インポートが成功したことが確認できます: - -```sql -SELECT * -FROM sometable -``` -```response -┌─path──────────────────────┬──────month─┬─hits─┐ -│ 1971-72_Utah_Stars_season │ 2016-10-01 │ 1 │ -│ Akiba_Hebrew_Academy │ 2017-08-01 │ 241 │ -│ Aegithina_tiphia │ 2018-02-01 │ 34 │ -└───────────────────────────┴────────────┴──────┘ -``` - -## JSONオブジェクトのキー {#json-object-keys} - -場合によっては、JSONオブジェクトのリストが配列要素ではなくオブジェクトプロパティとしてエンコードされることがあります(例えば、[objects.json](../assets/objects.json) を見てください): - -```bash -cat objects.json -``` - -```response -{ - "a": { - "path":"April_25,_2017", - "month":"2018-01-01", - "hits":2 - }, - "b": { - "path":"Akahori_Station", - "month":"2016-06-01", - "hits":11 - }, - ... -} -``` - -ClickHouseは、この種のデータから読み込むために[`JSONObjectEachRow`](/interfaces/formats.md/#jsonobjecteachrow) フォーマットを使用できます: - -```sql -INSERT INTO sometable FROM INFILE 'objects.json' FORMAT JSONObjectEachRow; -SELECT * FROM sometable; -``` -```response -┌─path────────────┬──────month─┬─hits─┐ -│ Abducens_palsy │ 2016-05-01 │ 28 │ -│ Akahori_Station │ 2016-06-01 │ 11 │ -│ April_25,_2017 │ 2018-01-01 │ 2 │ -└─────────────────┴────────────┴──────┘ -``` - -### 親オブジェクトキーの値を指定する {#specifying-parent-object-key-values} - -親オブジェクトキーの値もテーブルに保存したいとします。その場合、以下のオプションを使用してキーの値を保存するカラムの名前を定義できます:[以下のオプション](/operations/settings/settings-formats.md/#format_json_object_each_row_column_for_object_name): - -```sql -SET format_json_object_each_row_column_for_object_name = 'id' -``` - -現在、[`file()`](/sql-reference/functions/files.md/#file) 関数を使用して元のJSONファイルから読み込まれるデータを確認できます: - -```sql -SELECT * FROM file('objects.json', JSONObjectEachRow) -``` -```response -┌─id─┬─path────────────┬──────month─┬─hits─┐ -│ a │ April_25,_2017 │ 2018-01-01 │ 2 │ -│ b │ Akahori_Station │ 2016-06-01 │ 11 │ -│ c │ Abducens_palsy │ 2016-05-01 │ 28 │ -└────┴─────────────────┴────────────┴──────┘ -``` - -`id` カラムがキー値で正しく埋め込まれていることに注意してください。 - -## JSON配列 {#json-arrays} - -時には、スペースを節約するために、JSONファイルがオブジェクトの代わりに配列でエンコードされます。この場合、[JSON配列のリスト](../assets/arrays.json)を扱います: - -```bash -cat arrays.json -``` -```response -["Akiba_Hebrew_Academy", "2017-08-01", 241], -["Aegithina_tiphia", "2018-02-01", 34], -["1971-72_Utah_Stars_season", "2016-10-01", 1] -``` - -この場合、ClickHouseはこのデータをロードし、配列内の順序に基づいて各値を対応するカラムに割り当てます。これには[`JSONCompactEachRow`](/interfaces/formats.md/#jsoncompacteachrow)フォーマットを使用します: - -```sql -SELECT * FROM sometable -``` -```response -┌─c1────────────────────────┬─────────c2─┬──c3─┐ -│ Akiba_Hebrew_Academy │ 2017-08-01 │ 241 │ -│ Aegithina_tiphia │ 2018-02-01 │ 34 │ -│ 1971-72_Utah_Stars_season │ 2016-10-01 │ 1 │ -└───────────────────────────┴────────────┴─────┘ -``` - -### JSON配列から個別カラムをインポートする {#importing-individual-columns-from-json-arrays} - -場合によっては、データが行単位ではなくカラム単位でエンコードされることがあります。この場合、親JSONオブジェクトには値を持つカラムが含まれています。[以下のファイル](../assets/columns.json)を見てみましょう: - -```bash -cat columns.json -``` -```response -{ - "path": ["2007_Copa_America", "Car_dealerships_in_the_USA", "Dihydromyricetin_reductase"], - "month": ["2016-07-01", "2015-07-01", "2015-07-01"], - "hits": [178, 11, 1] -} -``` - -ClickHouseは[`JSONColumns`](/interfaces/formats.md/#jsoncolumns)フォーマットを使用してそのようなデータを解析します: - -```sql -SELECT * FROM file('columns.json', JSONColumns) -``` -```response -┌─path───────────────────────┬──────month─┬─hits─┐ -│ 2007_Copa_America │ 2016-07-01 │ 178 │ -│ Car_dealerships_in_the_USA │ 2015-07-01 │ 11 │ -│ Dihydromyricetin_reductase │ 2015-07-01 │ 1 │ -└────────────────────────────┴────────────┴──────┘ -``` - -カラムの配列を扱う際には、[`JSONCompactColumns`](/interfaces/formats.md/#jsoncompactcolumns)フォーマットを使用することもできます: - -```sql -SELECT * FROM file('columns-array.json', JSONCompactColumns) -``` -```response -┌─c1──────────────┬─────────c2─┬─c3─┐ -│ Heidenrod │ 2017-01-01 │ 10 │ -│ Arthur_Henrique │ 2016-11-01 │ 12 │ -│ Alan_Ebnother │ 2015-11-01 │ 66 │ -└─────────────────┴────────────┴────┘ -``` - -## JSONオブジェクトを解析するのではなく保存する {#saving-json-objects-instead-of-parsing} - -JSONオブジェクトを解析するのではなく、単一の `String` (または `JSON`) カラムに保存したい場合があります。これは、異なる構造のJSONオブジェクトのリストを扱う際に便利です。[このファイル](../assets/custom.json)を例に取りますが、親リスト内に複数の異なるJSONオブジェクトがあります: - -```bash -cat custom.json -``` -```response -[ - {"name": "Joe", "age": 99, "type": "person"}, - {"url": "/my.post.MD", "hits": 1263, "type": "post"}, - {"message": "Warning on disk usage", "type": "log"} -] -``` - -次のテーブルに元のJSONオブジェクトを保存したいとします: - -```sql -CREATE TABLE events -( - `data` String -) -ENGINE = MergeTree -ORDER BY () -``` - -このテーブルにファイルからデータをロードするために、[`JSONAsString`](/interfaces/formats.md/#jsonasstring)フォーマットを使用してJSONオブジェクトを解析せずに保持します: - -```sql -INSERT INTO events (data) -FROM INFILE 'custom.json' -FORMAT JSONAsString -``` - -そして、保存されたオブジェクトをクエリするために[JSON関数](/sql-reference/functions/json-functions.md)を使用できます: - -```sql -SELECT - JSONExtractString(data, 'type') AS type, - data -FROM events -``` -```response -┌─type───┬─data─────────────────────────────────────────────────┐ -│ person │ {"name": "Joe", "age": 99, "type": "person"} │ -│ post │ {"url": "/my.post.MD", "hits": 1263, "type": "post"} │ -│ log │ {"message": "Warning on disk usage", "type": "log"} │ -└────────┴──────────────────────────────────────────────────────┘ -``` - -`JSONAsString` は、通常 `JSONEachRow` フォーマットで使用されるJSONオブジェクト・パー・ライン形式のファイルにおいても問題なく機能することに注意してください。 - -## ネストされたオブジェクトのスキーマ {#schema-for-nested-objects} - -ネストされたJSONオブジェクト(例:[list-nested.json](../assets/list-nested.json))を扱う場合、明示的なスキーマを定義し、複雑な型 ([`Array`](/sql-reference/data-types/array.md)、[`Object Data Type`](/sql-reference/data-types/object-data-type)または [`Tuple`](/sql-reference/data-types/tuple.md))を使用してデータをロードできます: - -```sql -SELECT * -FROM file('list-nested.json', JSONEachRow, 'page Tuple(path String, title String, owner_id UInt16), month Date, hits UInt32') -LIMIT 1 -``` -```response -┌─page───────────────────────────────────────────────┬──────month─┬─hits─┐ -│ ('Akiba_Hebrew_Academy','Akiba Hebrew Academy',12) │ 2017-08-01 │ 241 │ -└────────────────────────────────────────────────────┴────────────┴──────┘ -``` - -## ネストされたJSONオブジェクトへのアクセス {#accessing-nested-json-objects} - -[ネストされたJSONキー](../assets/list-nested.json)に参照するには、[以下の設定オプション](/operations/settings/settings-formats.md/#input_format_import_nested_json)を有効にします: - -```sql -SET input_format_import_nested_json = 1 -``` - -これにより、ドット記法を使用してネストされたJSONオブジェクトキーにアクセスできるようになります(機能させるためにはバックティック記号で囲むことを忘れないでください): - -```sql -SELECT * -FROM file('list-nested.json', JSONEachRow, '`page.owner_id` UInt32, `page.title` String, month Date, hits UInt32') -LIMIT 1 -``` -```results -┌─page.owner_id─┬─page.title───────────┬──────month─┬─hits─┐ -│ 12 │ Akiba Hebrew Academy │ 2017-08-01 │ 241 │ -└───────────────┴──────────────────────┴────────────┴──────┘ -``` - -これにより、ネストされたJSONオブジェクトをフラット化したり、いくつかのネストされた値を別のカラムとして保存したりできます。 - -## 不明なカラムのスキップ {#skipping-unknown-columns} - -デフォルトでは、ClickHouseはJSONデータをインポートする際に不明なカラムを無視します。`month` カラムなしで元のファイルをテーブルにインポートしてみましょう: - -```sql -CREATE TABLE shorttable -( - `path` String, - `hits` UInt32 -) -ENGINE = MergeTree -ORDER BY path -``` - -3カラムの[元のJSONデータ](../assets/list.json)をこのテーブルに挿入できます: - -```sql -INSERT INTO shorttable FROM INFILE 'list.json' FORMAT JSONEachRow; -SELECT * FROM shorttable -``` -```response -┌─path──────────────────────┬─hits─┐ -│ 1971-72_Utah_Stars_season │ 1 │ -│ Aegithina_tiphia │ 34 │ -│ Akiba_Hebrew_Academy │ 241 │ -└───────────────────────────┴──────┘ -``` - -ClickHouseはインポート時に不明なカラムを無視します。この挙動は、[input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 設定オプションで無効にできます: - -```sql -SET input_format_skip_unknown_fields = 0; -INSERT INTO shorttable FROM INFILE 'list.json' FORMAT JSONEachRow; -``` -```response -Ok. -Exception on client: -Code: 117. DB::Exception: Unknown field found while parsing JSONEachRow format: month: (in file/uri /data/clickhouse/user_files/list.json): (at row 1) -``` - -ClickHouseは不一致なJSONとテーブルカラム構造のケースで例外をスローします。 - -## BSON {#bson} - -ClickHouseは、[BSON](https://bsonspec.org/) エンコードファイルからのエクスポートとインポートをサポートしています。このフォーマットは、[MongoDB](https://github.com/mongodb/mongo) データベースなど、一部のDBMSで使用されます。 - -BSONデータをインポートするには、[BSONEachRow](/interfaces/formats.md/#bsoneachrow)フォーマットを使用します。以下の[BSONファイル](../assets/data.bson)からデータをインポートします: - -```sql -SELECT * FROM file('data.bson', BSONEachRow) -``` -```response -┌─path──────────────────────┬─month─┬─hits─┐ -│ Bob_Dolman │ 17106 │ 245 │ -│ 1-krona │ 17167 │ 4 │ -│ Ahmadabad-e_Kalij-e_Sofla │ 17167 │ 3 │ -└───────────────────────────┴───────┴──────┘ -``` - -同じフォーマットを使用してBSONファイルへのエクスポートも行えます: - -```sql -SELECT * -FROM sometable -INTO OUTFILE 'out.bson' -FORMAT BSONEachRow -``` - -その後、データは `out.bson` ファイルにエクスポートされます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md.hash deleted file mode 100644 index a5cf4762999..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/formats.md.hash +++ /dev/null @@ -1 +0,0 @@ -908565680797f79e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md deleted file mode 100644 index c9a63526ee6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -title: 'JSON schema inference' -slug: '/integrations/data-formats/json/inference' -description: 'How to use JSON schema inference' -keywords: -- 'json' -- 'schema' -- 'inference' -- 'schema inference' ---- - -import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; - -ClickHouseは、JSONデータの構造を自動的に特定できます。これにより、`clickhouse-local`やS3バケットを介してディスク上のJSONデータを直接クエリすることができ、また、ClickHouseにデータを読み込む前にスキーマを自動的に作成することも可能です。 - -## 型推論を使用するタイミング {#when-to-use-type-inference} - -* **一貫した構造** - タイプを推測するためのデータには、興味のあるすべてのキーが含まれています。タイプ推論は、[最大行数](/operations/settings/formats#input_format_max_rows_to_read_for_schema_inference)または[バイト数](/operations/settings/formats#input_format_max_bytes_to_read_for_schema_inference)までのデータをサンプリングすることに基づいています。サンプル後のデータで追加のカラムがある場合、それらは無視され、クエリすることはできません。 -* **一貫した型** - 特定のキーのデータ型は互換性がある必要があります。つまり、一方の型を他方に自動的に強制変換できる必要があります。 - -もし、新しいキーが追加される動的なJSONがある場合や、同じパスに対して複数の型が可能な場合は、["非構造化データと動的データの扱い"](/integrations/data-formats/json/inference#working-with-semi-structured-data)を参照してください。 - -## 型の検出 {#detecting-types} - -以下の内容は、JSONが一貫した構造を持ち、各パスに対して単一の型を持つと仮定しています。 - -前述の例では、`NDJSON`形式の[Python PyPIデータセット](https://clickpy.clickhouse.com/)のシンプルなバージョンを使用しました。このセクションでは、ネストされた構造を持つより複雑なデータセット-2.5百万の学術論文を含む[arXivデータセット](https://www.kaggle.com/datasets/Cornell-University/arxiv?resource=download)を探ります。このデータセットの各行は、公開された学術論文を表しています。以下に例を示します: - -```json -{ - "id": "2101.11408", - "submitter": "Daniel Lemire", - "authors": "Daniel Lemire", - "title": "Number Parsing at a Gigabyte per Second", - "comments": "Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/", - "journal-ref": "Software: Practice and Experience 51 (8), 2021", - "doi": "10.1002/spe.2984", - "report-no": null, - "categories": "cs.DS cs.MS", - "license": "http://creativecommons.org/licenses/by/4.0/", - "abstract": "With disks and networks providing gigabytes per second ....\n", - "versions": [ - { - "created": "Mon, 11 Jan 2021 20:31:27 GMT", - "version": "v1" - }, - { - "created": "Sat, 30 Jan 2021 23:57:29 GMT", - "version": "v2" - } - ], - "update_date": "2022-11-07", - "authors_parsed": [ - [ - "Lemire", - "Daniel", - "" - ] - ] -} -``` - -このデータには、前の例よりも遥かに複雑なスキーマが必要です。以下にこのスキーマの定義プロセスを概説し、`Tuple`や`Array`などの複雑な型を紹介します。 - -このデータセットは、`s3://datasets-documentation/arxiv/arxiv.json.gz`というパブリックS3バケットに保存されています。 - -上記のデータセットにはネストされたJSONオブジェクトが含まれていることがわかります。ユーザーはスキーマをドラフトし、バージョン管理する必要がありますが、推論によりデータから型を推測できます。これにより、スキーマのDDLが自動生成され、手動で作成する必要がなくなり、開発プロセスが加速します。 - -:::note 自動フォーマット検出 -スキーマを検出するだけでなく、JSONスキーマ推論はファイル拡張子と内容から自動的にデータのフォーマットを推測します。上記のファイルは、その結果としてNDJSONとして自動的に検出されます。 -::: - -[s3関数](/sql-reference/table-functions/s3)を使用した`DESCRIBE`コマンドは、推測される型を示します。 - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/arxiv/arxiv.json.gz') -SETTINGS describe_compact_output = 1 -``` -```response -┌─name───────────┬─type────────────────────────────────────────────────────────────────────┐ -│ id │ Nullable(String) │ -│ submitter │ Nullable(String) │ -│ authors │ Nullable(String) │ -│ title │ Nullable(String) │ -│ comments │ Nullable(String) │ -│ journal-ref │ Nullable(String) │ -│ doi │ Nullable(String) │ -│ report-no │ Nullable(String) │ -│ categories │ Nullable(String) │ -│ license │ Nullable(String) │ -│ abstract │ Nullable(String) │ -│ versions │ Array(Tuple(created Nullable(String),version Nullable(String))) │ -│ update_date │ Nullable(Date) │ -│ authors_parsed │ Array(Array(Nullable(String))) │ -└────────────────┴─────────────────────────────────────────────────────────────────────────┘ -``` -:::note Nullの回避 -多くのカラムがNullableとして検出されていることがわかります。私たちは[Nullable](/sql-reference/data-types/nullable#storage-features)型の使用を必要な場合を除いて推奨していません。[schema_inference_make_columns_nullable](/operations/settings/formats#schema_inference_make_columns_nullable)を使用して、Nullableが適用される場合の動作を制御できます。 -::: - -ほとんどのカラムは自動的に`String`として検出され、`update_date`カラムは正しく`Date`として検出されました。`versions`カラムは`Array(Tuple(created String, version String))`として生成され、オブジェクトのリストを保存します。`authors_parsed`はネストされた配列のために`Array(Array(String))`として定義されています。 - -:::note 型検出の制御 -日付や日時の自動検出は、それぞれ[`input_format_try_infer_dates`](/operations/settings/formats#input_format_try_infer_dates)および[`input_format_try_infer_datetimes`](/operations/settings/formats#input_format_try_infer_datetimes)の設定で制御できます(両方ともデフォルトで有効)。オブジェクトをタプルとして推測することは、[`input_format_json_try_infer_named_tuples_from_objects`](/operations/settings/formats#input_format_json_try_infer_named_tuples_from_objects)の設定で制御されます。他のJSONのスキーマ推論を制御する設定、数値の自動検出などは、[こちら](/interfaces/schema-inference#text-formats)で見つけることができます。 -::: - -## JSONのクエリ {#querying-json} - -以下の内容は、JSONが一貫した構造を持ち、各パスに対して単一の型を持つと仮定しています。 - -スキーマ推論に依存して、JSONデータをその場でクエリできます。以下では、日付と配列が自動的に検出されるという事実を利用して、各年のトップ著者を見つけます。 - -```sql -SELECT - toYear(update_date) AS year, - authors, - count() AS c -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/arxiv/arxiv.json.gz') -GROUP BY - year, - authors -ORDER BY - year ASC, - c DESC -LIMIT 1 BY year - -┌─year─┬─authors────────────────────────────────────┬───c─┐ -│ 2007 │ The BABAR Collaboration, B. Aubert, et al │ 98 │ -│ 2008 │ The OPAL collaboration, G. Abbiendi, et al │ 59 │ -│ 2009 │ Ashoke Sen │ 77 │ -│ 2010 │ The BABAR Collaboration, B. Aubert, et al │ 117 │ -│ 2011 │ Amelia Carolina Sparavigna │ 21 │ -│ 2012 │ ZEUS Collaboration │ 140 │ -│ 2013 │ CMS Collaboration │ 125 │ -│ 2014 │ CMS Collaboration │ 87 │ -│ 2015 │ ATLAS Collaboration │ 118 │ -│ 2016 │ ATLAS Collaboration │ 126 │ -│ 2017 │ CMS Collaboration │ 122 │ -│ 2018 │ CMS Collaboration │ 138 │ -│ 2019 │ CMS Collaboration │ 113 │ -│ 2020 │ CMS Collaboration │ 94 │ -│ 2021 │ CMS Collaboration │ 69 │ -│ 2022 │ CMS Collaboration │ 62 │ -│ 2023 │ ATLAS Collaboration │ 128 │ -│ 2024 │ ATLAS Collaboration │ 120 │ -└──────┴────────────────────────────────────────────┴─────┘ - -18行の結果がセットに含まれています。経過時間: 20.172秒。処理された行数: 252万、サイズ: 1.39 GB (124.72千行/秒、68.76 MB/秒) -``` - -スキーマ推論により、スキーマを指定することなくJSONファイルをクエリでき、アドホックなデータ分析タスクを加速することができます。 - -## テーブルの作成 {#creating-tables} - -スキーマ推論に依存して、テーブルのスキーマを作成できます。以下の`CREATE AS EMPTY`コマンドは、テーブルのDDLを推論させ、テーブルを作成します。これはデータを読み込むことはありません: - -```sql -CREATE TABLE arxiv -ENGINE = MergeTree -ORDER BY update_date EMPTY -AS SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/arxiv/arxiv.json.gz') -SETTINGS schema_inference_make_columns_nullable = 0 -``` - -テーブルスキーマを確認するために、`SHOW CREATE TABLE`コマンドを使用します: - -```sql -SHOW CREATE TABLE arxiv - -CREATE TABLE arxiv -( - `id` String, - `submitter` String, - `authors` String, - `title` String, - `comments` String, - `journal-ref` String, - `doi` String, - `report-no` String, - `categories` String, - `license` String, - `abstract` String, - `versions` Array(Tuple(created String, version String)), - `update_date` Date, - `authors_parsed` Array(Array(String)) -) -ENGINE = MergeTree -ORDER BY update_date -``` - -上記がこのデータの正しいスキーマです。スキーマ推論はデータをサンプリングして読み取り、行ごとにデータを読み取ります。カラムの値はフォーマットに従って抽出され、型を決定するために再帰的なパーサーとヒューリスティクスが使用されます。スキーマ推論において読み取る最大行数とバイト数は、設定[`input_format_max_rows_to_read_for_schema_inference`](/operations/settings/formats#input_format_max_rows_to_read_for_schema_inference)(デフォルト25000行)および[`input_format_max_bytes_to_read_for_schema_inference`](/operations/settings/formats#input_format_max_bytes_to_read_for_schema_inference)(デフォルト32MB)で制御されます。検出が正しくない場合、ユーザーは[こちら]( /operations/settings/formats#schema_inference_make_columns_nullable)に記載されているようにヒントを提供できます。 - -### スニペットからのテーブル作成 {#creating-tables-from-snippets} - -上記の例では、S3上のファイルを使用してテーブルスキーマを作成しました。ユーザーは単一の行スニペットからスキーマを作成したいかもしれません。これは、以下のように[format](/sql-reference/table-functions/format)関数を使用して達成できます: - -```sql -CREATE TABLE arxiv -ENGINE = MergeTree -ORDER BY update_date EMPTY -AS SELECT * -FROM format(JSONEachRow, '{"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"Number Parsing at a Gigabyte per Second","comments":"Software at https://github.com/fastfloat/fast_float and","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"Withdisks and networks providing gigabytes per second ","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]]}') SETTINGS schema_inference_make_columns_nullable = 0 - -SHOW CREATE TABLE arxiv - -CREATE TABLE arxiv -( - `id` String, - `submitter` String, - `authors` String, - `title` String, - `comments` String, - `doi` String, - `report-no` String, - `categories` String, - `license` String, - `abstract` String, - `versions` Array(Tuple(created String, version String)), - `update_date` Date, - `authors_parsed` Array(Array(String)) -) -ENGINE = MergeTree -ORDER BY update_date -``` - -## JSONデータの読み込み {#loading-json-data} - -以下の内容は、JSONが一貫した構造を持ち、各パスに対して単一の型を持つと仮定しています。 - -前述のコマンドで、データを読み込むことができるテーブルが作成されました。次に、以下のように`INSERT INTO SELECT`を使用してデータをテーブルに挿入できます: - -```sql -INSERT INTO arxiv SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/arxiv/arxiv.json.gz') - -0行の結果がセットに含まれています。経過時間: 38.498秒。処理された行数: 252万、サイズ: 1.39 GB (65.35千行/秒、36.03 MB/秒) -ピークメモリ使用量: 870.67 MiB. -``` - -他のソースからのデータの読み込みの例(例:ファイル)については、[こちら]( /sql-reference/statements/insert-into)を参照してください。 - -データが読み込まれたら、元の構造で行を表示するために形式`PrettyJSONEachRow`を使用してデータをクエリできます: - -```sql -SELECT * -FROM arxiv -LIMIT 1 -FORMAT PrettyJSONEachRow - -{ - "id": "0704.0004", - "submitter": "David Callan", - "authors": "David Callan", - "title": "A determinant of Stirling cycle numbers counts unlabeled acyclic", - "comments": "11 pages", - "journal-ref": "", - "doi": "", - "report-no": "", - "categories": "math.CO", - "license": "", - "abstract": " We show that a determinant of Stirling cycle numbers counts unlabeled acyclic\nsingle-source automata.", - "versions": [ - { - "created": "Sat, 31 Mar 2007 03:16:14 GMT", - "version": "v1" - } - ], - "update_date": "2007-05-23", - "authors_parsed": [ - [ - "Callan", - "David" - ] - ] -} - -1行の結果がセットに含まれています。経過時間: 0.009秒。 -``` - -## エラーの処理 {#handling-errors} - -時には、不正なデータを持つことがあります。特定のカラムが正しい型でない場合や、不正にフォーマットされたJSONオブジェクトが考えられます。その場合、設定[`input_format_allow_errors_num`](/operations/settings/formats#input_format_allow_errors_num)および[`input_format_allow_errors_ratio`](/operations/settings/formats#input_format_allow_errors_ratio)を使用して、データが挿入エラーを引き起こす場合に無視できる行の数を許可できます。また、推論を補助するために[ヒント](/operations/settings/formats#schema_inference_hints)を提供することができます。 - -## 非構造化データと動的データの扱い {#working-with-semi-structured-data} - - - -前述の例では、静的でよく知られたキー名と型を持つJSONを使用しました。しかし、これはしばしば当てはまりません。キーが追加されたり、型が変更されたりすることがあります。これは、可観測性データなどのユースケースで一般的です。 - -ClickHouseは、専用の[`JSON`](/sql-reference/data-types/newjson)型を通じてこれに対応します。 - -もしあなたのJSONが非常に動的で、ユニークなキーが多数あり、同じキーに対して複数の型がある場合、`JSONEachRow`でスキーマ推論を使用して各キーのカラムを推測することはお勧めしません – たとえデータが改行区切りJSON形式であっても。 - -以下は、前述の[Python PyPIデータセット](https://clickpy.clickhouse.com/)の拡張バージョンの例です。ここでは、ランダムなキー値ペアを持つ任意の`tags`カラムを追加しました。 - -```json -{ - "date": "2022-09-22", - "country_code": "IN", - "project": "clickhouse-connect", - "type": "bdist_wheel", - "installer": "bandersnatch", - "python_minor": "", - "system": "", - "version": "0.2.8", - "tags": { - "5gTux": "f3to*PMvaTYZsz!*rtzX1", - "nD8CV": "value" - } -} -``` - -このデータのサンプルは改行区切りJSON形式で公開されています。このファイルでスキーマ推論を試みると、パフォーマンスが悪く、非常に冗長な応答が得られることがわかります: - -```sql -DESCRIBE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/pypi_with_tags/sample_rows.json.gz') - --- 結果は簡略化のため省略 - -9行の結果がセットに含まれています。経過時間: 127.066秒。 -``` - -ここでの主な問題は、スキーマ推論のために`JSONEachRow`フォーマットが使用されていることです。これは、JSONの**各キーに対してカラム型を推測しようとします** – つまり、[`JSON`](/sql-reference/data-types/newjson)型を使用せずにデータに静的なスキーマを適用しようとすることです。 - -ユニークなカラムが何千もあるため、この推論のアプローチは遅くなります。代わりに、ユーザーは`JSONAsObject`フォーマットを使用できます。 - -`JSONAsObject`は、入力全体を単一のJSONオブジェクトとして扱い、それを[`JSON`](/sql-reference/data-types/newjson)型の単一カラムに保存します。これにより、非常に動的またはネストされたJSONペイロードに適しています。 - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/pypi_with_tags/sample_rows.json.gz', 'JSONAsObject') -SETTINGS describe_compact_output = 1 - -┌─name─┬─type─┐ -│ json │ JSON │ -└──────┴──────┘ - -1行の結果がセットに含まれています。経過時間: 0.005秒。 -``` - -このフォーマットは、カラムに複数の型があり、それらが調和できない場合にも重要です。たとえば、次のような改行区切りJSONを持つ`sample.json`ファイルを考えてください: - -```json -{"a":1} -{"a":"22"} -``` - -この場合、ClickHouseは型の衝突を強制変換し、カラム`a`を`Nullable(String)`として解決できます。 - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/json/sample.json') -SETTINGS describe_compact_output = 1 - -┌─name─┬─type─────────────┐ -│ a │ Nullable(String) │ -└──────┴──────────────────┘ - -1行の結果がセットに含まれています。経過時間: 0.081秒。 -``` - -:::note 型強制変換 -この型の強制変換は、いくつかの設定を通じて制御できます。上記の例は、設定[`input_format_json_read_numbers_as_strings`](/operations/settings/formats#input_format_json_read_numbers_as_strings)に依存しています。 -::: - -しかし、互換性のない型も存在します。次の例を考えてみてください: - -```json -{"a":1} -{"a":{"b":2}} -``` - -この場合、ここでの型変換は不可能です。したがって、`DESCRIBE`コマンドは失敗します: - -```sql -DESCRIBE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/json/conflict_sample.json') - -経過時間: 0.755秒。 - -サーバーから受け取った例外 (バージョン 24.12.1): -コード: 636. DB::Exception: sql-clickhouse.clickhouse.com:9440 から受信しました。DB::Exception: JSON形式ファイルからテーブル構造を抽出できません。エラー: -コード: 53. DB::Exception: 行1のカラム'a'に対して自動的に定義された型Tuple(b Int64)が、前の行で定義された型: Int64 と異なります。このカラムの型を設定schema_inference_hintsを使用して指定できます。 -``` - -この場合、`JSONAsObject`は各行を単一の[`JSON`](/sql-reference/data-types/newjson)型としてみなします(同じカラムが複数の型を持つことをサポートします)。これは不可欠です: - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/json/conflict_sample.json', JSONAsObject) -SETTINGS enable_json_type = 1, describe_compact_output = 1 - -┌─name─┬─type─┐ -│ json │ JSON │ -└──────┴──────┘ - -1行の結果がセットに含まれています。経過時間: 0.010秒。 -``` - -## さらなる情報 {#further-reading} - -データ型の推論についてもっと知りたい場合は、[こちら](/interfaces/schema-inference)のドキュメントページを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md.hash deleted file mode 100644 index f939b715ed9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/inference.md.hash +++ /dev/null @@ -1 +0,0 @@ -7dfb6c0a9aa1b6fc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md deleted file mode 100644 index c4b4fc88834..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -sidebar_label: 'Overview' -sidebar_position: 10 -title: 'Working with JSON' -slug: '/integrations/data-formats/json/overview' -description: 'Working with JSON in ClickHouse' -keywords: -- 'json' -- 'clickhouse' -score: 10 ---- - - - - -# JSON の概要 - -
- -
- -
-ClickHouse は、JSON を扱うためのいくつかのアプローチを提供しており、それぞれの利点と欠点、および使用法があります。このガイドでは、JSON をロードし、スキーマを最適に設計する方法を説明します。このガイドは、以下のセクションで構成されています。 - -- [JSON のロード](/integrations/data-formats/json/loading) - シンプルなスキーマを使用して、ClickHouse で構造化されたおよび半構造化された JSON をロードおよびクエリする方法。 -- [JSON スキーマの推論](/integrations/data-formats/json/inference) - JSON スキーマの推論を使用して JSON をクエリし、テーブルスキーマを作成する方法。 -- [JSON スキーマの設計](/integrations/data-formats/json/schema) - JSON スキーマを設計および最適化するための手順。 -- [JSON のエクスポート](/integrations/data-formats/json/exporting) - JSON をエクスポートする方法。 -- [他の JSON フォーマットの扱い](/integrations/data-formats/json/other-formats) - ニューライン区切り (NDJSON) 以外の JSON フォーマットを扱うためのヒント。 -- [JSON モデリングの他のアプローチ](/integrations/data-formats/json/other-approaches) - JSON モデリングの古いアプローチ。**推奨されません。** diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md.hash deleted file mode 100644 index 0d03cd23f11..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/intro.md.hash +++ /dev/null @@ -1 +0,0 @@ -a42322e99453c2d0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md deleted file mode 100644 index 166e6514225..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -sidebar_label: 'Loading JSON' -sidebar_position: 20 -title: 'Working with JSON' -slug: '/integrations/data-formats/json/loading' -description: 'Loading JSON' -keywords: -- 'json' -- 'clickhouse' -- 'inserting' -- 'loading' -- 'inserting' -score: 15 ---- - -import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; - - -# JSONの読み込み {#loading-json} - -以下の例は、構造化データおよび半構造化データのJSONの読み込みについての非常にシンプルな例を提供します。ネストされた構造を含むより複雑なJSONについては、ガイド[**JSONスキーマの設計**](/integrations/data-formats/json/schema)を参照してください。 - -## 構造化JSONの読み込み {#loading-structured-json} - -このセクションでは、JSONデータが[`NDJSON`](https://github.com/ndjson/ndjson-spec)(Newline Delimited JSON)フォーマットであり、ClickHouseでは[`JSONEachRow`](/interfaces/formats#jsoneachrow)として知られている、また、よく構造化されている、つまりカラム名とタイプが固定されていますと仮定します。`NDJSON`は、その簡潔さと効率的なスペース利用のため、JSONの読み込みには推奨されるフォーマットですが、その他のフォーマットも[入力と出力](/interfaces/formats#json)の両方でサポートされています。 - -次のJSONサンプルを考えてみましょう。これは[Python PyPIデータセット](https://clickpy.clickhouse.com/)からの行を表しています。 - -```json -{ - "date": "2022-11-15", - "country_code": "ES", - "project": "clickhouse-connect", - "type": "bdist_wheel", - "installer": "pip", - "python_minor": "3.9", - "system": "Linux", - "version": "0.3.0" -} -``` - -このJSONオブジェクトをClickHouseに読み込むためには、テーブルスキーマを定義する必要があります。 - -このシンプルなケースでは、構造は静的で、カラム名は既知であり、そのタイプも明確に定義されています。 - -ClickHouseは、キー名とそのタイプが動的であるJSONタイプを通じて半構造化データをサポートしていますが、ここでは必要ありません。 - -:::note 静的スキーマの優先 -カラムに固定名とタイプがあり、新しいカラムが期待されない場合は、必ず生産環境では静的に定義されたスキーマを優先してください。 - -JSONタイプは、カラムの名前やタイプが変更される可能性のある高い動的データに好まれます。このタイプは、プロトタイピングやデータ探索にも便利です。 -::: - -以下に示すのは、**JSONキーをカラム名にマッピングする**シンプルなスキーマです。 - -```sql -CREATE TABLE pypi ( - `date` Date, - `country_code` String, - `project` String, - `type` String, - `installer` String, - `python_minor` String, - `system` String, - `version` String -) -ENGINE = MergeTree -ORDER BY (project, date) -``` - -:::note オーダリングキー -ここで、`ORDER BY`句を通じてオーダリングキーを選択しました。オーダリングキーの詳細や選択方法については、[こちら](/data-modeling/schema-design#choosing-an-ordering-key)を参照してください。 -::: - -ClickHouseは、拡張子と内容から型を自動的に推測して、さまざまなフォーマットのJSONデータを読み込むことができます。上記のテーブルのためにJSONファイルを[システム関数](/sql-reference/table-functions/s3)を使用して読み取ることができます。 - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz') -LIMIT 1 -┌───────date─┬─country_code─┬─project────────────┬─type────────┬─installer────┬─python_minor─┬─system─┬─version─┐ -│ 2022-11-15 │ CN │ clickhouse-connect │ bdist_wheel │ bandersnatch │ │ │ 0.2.8 │ -└────────────┴──────────────┴────────────────────┴─────────────┴──────────────┴──────────────┴────────┴─────────┘ - -1行の結果が返されました。経過時間: 1.232秒。 -``` - -ファイルフォーマットを指定する必要がないことに注意してください。代わりに、バケット内のすべての`*.json.gz`ファイルを読み取るためにグロブパターンを使用しています。ClickHouseは、自動的にファイル拡張子と内容から形式を`JSONEachRow`(ndjson)であると推測します。ClickHouseが形式を検出できない場合は、パラメータ関数を通じて手動で形式を指定できます。 - -```sql -SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz', JSONEachRow) -``` - -:::note 圧縮ファイル -上記のファイルは圧縮されています。これはClickHouseによって自動的に検出され、処理されます。 -::: - -これらのファイル内の行を読み込むには、[`INSERT INTO SELECT`](/sql-reference/statements/insert-into#inserting-the-results-of-select)を使用できます。 - -```sql -INSERT INTO pypi SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/json/*.json.gz') -Ok. - -0行の結果が返されました。経過時間: 10.445秒。処理した行数: 19.49百万行、サイズ: 35.71 MB(1.87百万行/秒、3.42 MB/秒)。 - -SELECT * FROM pypi LIMIT 2 - -┌───────date─┬─country_code─┬─project────────────┬─type──┬─installer────┬─python_minor─┬─system─┬─version─┐ -│ 2022-05-26 │ CN │ clickhouse-connect │ sdist │ bandersnatch │ │ │ 0.0.7 │ -│ 2022-05-26 │ CN │ clickhouse-connect │ sdist │ bandersnatch │ │ │ 0.0.7 │ -└────────────┴──────────────┴────────────────────┴───────┴──────────────┴──────────────┴────────┴─────────┘ - -2行の結果が返されました。経過時間: 0.005秒。処理した行数: 8.19千行、サイズ: 908.03 KB(1.63百万行/秒、180.38 MB/秒)。 -``` - -行は、`[`FORMAT`句](/sql-reference/statements/select/format)を使用してインラインで読み込むこともできます。例えば: - -```sql -INSERT INTO pypi -FORMAT JSONEachRow -{"date":"2022-11-15","country_code":"CN","project":"clickhouse-connect","type":"bdist_wheel","installer":"bandersnatch","python_minor":"","system":"","version":"0.2.8"} -``` - -これらの例は`JSONEachRow`形式の使用を前提としています。他の一般的なJSON形式もサポートされており、これらの読み込みに関する例は[こちら](/integrations/data-formats/json/other-formats)で提供されています。 - -## 半構造化JSONの読み込み {#loading-semi-structured-json} - - - -前の例では、既知のキー名とタイプを持つ静的なJSONを読み込みました。これはしばしば当てはまりません—キーが追加されたり、キーのタイプが変わることがあります。これは、Observabilityデータなどのユースケースで一般的です。 - -ClickHouseは、専用の[`JSON`](/sql-reference/data-types/newjson)タイプを通じてこれに対応しています。 - -以下の例は、上記の[Python PyPIデータセット](https://clickpy.clickhouse.com/)の拡張バージョンからのものです。ここでは、ランダムなキー値ペアを持つ任意の`tags`カラムを追加しました。 - - -```json -{ - "date": "2022-09-22", - "country_code": "IN", - "project": "clickhouse-connect", - "type": "bdist_wheel", - "installer": "bandersnatch", - "python_minor": "", - "system": "", - "version": "0.2.8", - "tags": { - "5gTux": "f3to*PMvaTYZsz!*rtzX1", - "nD8CV": "value" - } -} -``` - -この`tags`カラムは予測できず、したがってモデリングが不可能です。このデータを読み込むには、上記のスキーマを使用しますが、[`JSON`](/sql-reference/data-types/newjson)タイプの追加の`tags`カラムを提供します: - -```sql -SET enable_json_type = 1; - -CREATE TABLE pypi_with_tags -( - `date` Date, - `country_code` String, - `project` String, - `type` String, - `installer` String, - `python_minor` String, - `system` String, - `version` String, - `tags` JSON -) -ENGINE = MergeTree -ORDER BY (project, date); -``` - -元のデータセットと同じアプローチを使用してテーブルをポピュレートします: - -```sql -INSERT INTO pypi_with_tags SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/pypi_with_tags/sample.json.gz') -``` - -```sql -INSERT INTO pypi_with_tags SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/pypi/pypi_with_tags/sample.json.gz') - -Ok. - -0行の結果が返されました。経過時間: 255.679秒。処理した行数: 1.00百万行、サイズ: 29.00 MB(3.91千行/秒、113.43 KB/秒)。 -ピークメモリ使用量: 2.00 GiB。 - -SELECT * -FROM pypi_with_tags -LIMIT 2 - -┌───────date─┬─country_code─┬─project────────────┬─type──┬─installer────┬─python_minor─┬─system─┬─version─┬─tags─────────────────────────────────────────────────────┐ -│ 2022-05-26 │ CN │ clickhouse-connect │ sdist │ bandersnatch │ │ │ 0.0.7 │ {"nsBM":"5194603446944555691"} │ -│ 2022-05-26 │ CN │ clickhouse-connect │ sdist │ bandersnatch │ │ │ 0.0.7 │ {"4zD5MYQz4JkP1QqsJIS":"0","name":"8881321089124243208"} │ -└────────────┴──────────────┴────────────────────┴───────┴──────────────┴──────────────┴────────┴─────────┴──────────────────────────────────────────────────────────┘ - -2行の結果が返されました。経過時間: 0.149秒。 -``` - -ここでのデータの読み込みのパフォーマンスの違いに注意してください。JSONカラムは、挿入時に型推論を必要とし、1つの型より多くの型を持つカラムが存在する場合、追加のストレージも必要です。JSONタイプは構成可能ですが([JSONスキーマの設計](/integrations/data-formats/json/schema)を参照)、明示的にカラムを宣言する場合と同等のパフォーマンスを提供しますが、初期設定では意図的に柔軟です。この柔軟性は、ある程度のコストを伴います。 - -### JSONタイプを使用する場合 {#when-to-use-the-json-type} - -データに次のような特性がある場合は、JSONタイプを使用してください: - -* **予測できないキー**があり、時間の経過とともに変更される可能性がある。 -* **異なるタイプの値**を含む(例:パスが文字列のこともあれば、数値のこともある)。 -* 厳密なタイプ付けが実行できない場合にスキーマの柔軟性が必要です。 - -データ構造が既知で一貫している場合、データがJSON形式であっても、JSONタイプが必要となることはほとんどありません。特に、データが次のようである場合: - -* **知られたキーを持つフラットな構造**:標準のカラムタイプ(例:String)を使用します。 -* **予測可能なネスト**:Tuple、Array、またはNestedタイプをこれらの構造に使用します。 -* **異なるタイプを持つ予測可能な構造**:DynamicまたはVariantタイプなどを検討してください。 - -上記の例のように、静的カラムを予測可能なトップレベルキーに使用し、ペイロードの動的セクションに対して単一のJSONカラムを使用するという方法のミックスも可能です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md.hash deleted file mode 100644 index f70da9d6560..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/loading.md.hash +++ /dev/null @@ -1 +0,0 @@ -967c6f95ca7ea957 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md deleted file mode 100644 index 6b4a8dc0144..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md +++ /dev/null @@ -1,664 +0,0 @@ ---- -title: 'Other JSON approaches' -slug: '/integrations/data-formats/json/other-approaches' -description: 'Other approaches to modeling JSON' -keywords: -- 'json' -- 'formats' ---- - - - - -# JSONモデリングの他のアプローチ - -**以下はClickHouseにおけるJSONのモデリングの代替手法です。これらは、JSON型の開発前に適用できたものであり、完全性のために文書化されています。そのため、多くのユースケースにおいては一般的には推奨されず、適用されません。** - -:::note オブジェクトレベルアプローチを適用する -異なるテクニックは、同じスキーマ内の異なるオブジェクトに対して適用される場合があります。例えば、一部のオブジェクトは`String`型を使用するのが最適であり、他のものは`Map`型を使用するのが最適です。`String`型が一度使用されると、それ以上のスキーマの決定を行う必要はありません。一方で、`Map`のキー内にサブオブジェクトをネストすることも可能です - JSONを表す`String`を含む形で、以下に示す通りです。 -::: - -## Stringを使用する {#using-string} - -オブジェクトが非常に動的で、予測できない構造を持ち、任意のネストされたオブジェクトが含まれている場合、ユーザーは`String`型を使用するべきです。値は、以下に示すようにJSON関数を使用してクエリ時に抽出できます。 - -上記のように構造化アプローチでデータを扱うことは、動的JSONを持つユーザーにとっては実行可能でないことがしばしばあります。これは、変更が加えられる可能性があるか、スキーマが十分に理解されていない場合です。絶対的な柔軟性のために、ユーザーは単にJSONを`String`として保存し、必要に応じてフィールドを抽出するための関数を使用できます。これは、JSONを構造化されたオブジェクトとして扱うことの真逆を表しています。この柔軟性には、重要な欠点が伴い、主にクエリの構文の複雑さの増加やパフォーマンスの劣化をもたらします。 - -前述の通り、[元の人オブジェクト](/integrations/data-formats/json/schema#static-vs-dynamic-json)については、`tags`カラムの構造を保証することはできません。元の行(`company.labels`を含むが、ここでは無視します)を挿入し、`Tags`カラムを`String`として宣言します: - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple(city String, geo Tuple(lat Float32, lng Float32), street String, suite String, zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple(catchPhrase String, name String), - `dob` Date, - `tags` String -) -ENGINE = MergeTree -ORDER BY username - -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021"}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -Ok. -1 row in set. Elapsed: 0.002 sec. -``` - -`tags`カラムを選択すると、JSONが文字列として挿入されたことがわかります: - -```sql -SELECT tags -FROM people - -┌─tags───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ {"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}} │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -1 row in set. Elapsed: 0.001 sec. -``` - -[`JSONExtract`](/sql-reference/functions/json-functions#jsonextract-functions)関数を使用して、このJSONから値を取得できます。以下の簡単な例を考えてみましょう: - -```sql -SELECT JSONExtractString(tags, 'holidays') as holidays FROM people - -┌─holidays──────────────────────────────────────┐ -│ [{"year":2024,"location":"Azores, Portugal"}] │ -└───────────────────────────────────────────────┘ - -1 row in set. Elapsed: 0.002 sec. -``` - -関数は、`String`カラム`tags`への参照と、抽出するためのJSON内のパスの両方を必要とすることに注意してください。ネストされたパスは、関数をネストさせる必要があります。例えば、`JSONExtractUInt(JSONExtractString(tags, 'car'), 'year')`は、カラム`tags.car.year`を抽出します。ネストされたパスの抽出は、関数[`JSON_QUERY`](/sql-reference/functions/json-functions#json_query)および[`JSON_VALUE`](/sql-reference/functions/json-functions#json_value)を通じて簡素化できます。 - -`arxiv`データセットの極端なケースを考えてみましょう。このデータセットでは、本文全体を`String`として扱います。 - -```sql -CREATE TABLE arxiv ( - body String -) -ENGINE = MergeTree ORDER BY () -``` - -このスキーマに挿入するには、`JSONAsString`形式を使用する必要があります: - -```sql -INSERT INTO arxiv SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/arxiv/arxiv.json.gz', 'JSONAsString') - -0 rows in set. Elapsed: 25.186 sec. Processed 2.52 million rows, 1.38 GB (99.89 thousand rows/s., 54.79 MB/s.) -``` - -年ごとにリリースされた論文の数をカウントしたい場合、以下のクエリを考えてみましょう。単なる文字列を使用した場合と[構造化バージョン](/integrations/data-formats/json/inference#creating-tables)のスキーマを対比させます: - -```sql --- 構造化スキーマを使用 -SELECT - toYear(parseDateTimeBestEffort(versions.created[1])) AS published_year, - count() AS c -FROM arxiv_v2 -GROUP BY published_year -ORDER BY c ASC -LIMIT 10 - -┌─published_year─┬─────c─┐ -│ 1986 │ 1 │ -│ 1988 │ 1 │ -│ 1989 │ 6 │ -│ 1990 │ 26 │ -│ 1991 │ 353 │ -│ 1992 │ 3190 │ -│ 1993 │ 6729 │ -│ 1994 │ 10078 │ -│ 1995 │ 13006 │ -│ 1996 │ 15872 │ -└────────────────┴───────┘ - -10 rows in set. Elapsed: 0.264 sec. Processed 2.31 million rows, 153.57 MB (8.75 million rows/s., 582.58 MB/s.) - --- 非構造化Stringを使用 - -SELECT - toYear(parseDateTimeBestEffort(JSON_VALUE(body, '$.versions[0].created'))) AS published_year, - count() AS c -FROM arxiv -GROUP BY published_year -ORDER BY published_year ASC -LIMIT 10 - -┌─published_year─┬─────c─┐ -│ 1986 │ 1 │ -│ 1988 │ 1 │ -│ 1989 │ 6 │ -│ 1990 │ 26 │ -│ 1991 │ 353 │ -│ 1992 │ 3190 │ -│ 1993 │ 6729 │ -│ 1994 │ 10078 │ -│ 1995 │ 13006 │ -│ 1996 │ 15872 │ -└────────────────┴───────┘ - -10 rows in set. Elapsed: 1.281 sec. Processed 2.49 million rows, 4.22 GB (1.94 million rows/s., 3.29 GB/s.) -Peak memory usage: 205.98 MiB. -``` - -ここでのXPath式の使用に注目してください。これはメソッドによってJSONをフィルタリングします。すなわち、`JSON_VALUE(body, '$.versions[0].created')`です。 - -String関数は明らかに遅く(> 10倍)、インデックスを用いた明示的な型変換に比べてパフォーマンスが劣ります。上記のクエリは常に全表スキャンと各行の処理を要求します。このようなクエリは、このような小規模なデータセットでは依然として速いですが、大規模なデータセットではパフォーマンスが劣化します。 - -このアプローチの柔軟性は、明確なパフォーマンスと構文コストを伴い、スキーマ内の非常に動的なオブジェクトにのみ使用すべきです。 - -### シンプルなJSON関数 {#simple-json-functions} - -上記の例では、JSON*ファミリーの関数が使用されています。これらは、[simdjson](https://github.com/simdjson/simdjson)に基づくフルJSONパーサーを利用しており、厳密に解析され、異なるレベルでネストされた同じフィールドを区別します。これらの関数は、文法的には正しいが適切にフォーマットされていないJSON(例えば、キー間に二重スペースがある場合)も処理できます。 - -より高速で厳密な関数セットも利用可能です。これらの`simpleJSON*`関数は、主にJSONの構造とフォーマットについて厳密な仮定を行うことにより、潜在的に優れたパフォーマンスを提供します。具体的には: - -- フィールド名は定数でなければなりません -- フィールド名の一貫したエンコーディング(例:`simpleJSONHas('{"abc":"def"}', 'abc') = 1`)が必要ですが、`visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0`は無効です -- フィールド名は、すべてのネストされた構造の間で一意である必要があります。ネストのレベル間での区別は行われず、マッチは無差別に行われます。複数のフィールドが一致する場合、最初に現れたものが使用されます。 -- 文字列リテラル以外の特殊文字はありません。これにはスペースも含まれます。以下は無効であり、解析されません。 - - ```json - {"@timestamp": 893964617, "clientip": "40.135.0.0", "request": {"method": "GET", - "path": "/images/hm_bg.jpg", "version": "HTTP/1.0"}, "status": 200, "size": 24736} - ``` - -次の例は正しく解析されます: - -```json -{"@timestamp":893964617,"clientip":"40.135.0.0","request":{"method":"GET", - "path":"/images/hm_bg.jpg","version":"HTTP/1.0"},"status":200,"size":24736} - -パフォーマンスが重要で、JSONが上記の要件を満たす場合、これらの関数が適切であることがあります。前述のクエリの例を`simpleJSON*`関数を使用するように再記述すると、以下のようになります: - -```sql -SELECT - toYear(parseDateTimeBestEffort(simpleJSONExtractString(simpleJSONExtractRaw(body, 'versions'), 'created'))) AS published_year, - count() AS c -FROM arxiv -GROUP BY published_year -ORDER BY published_year ASC -LIMIT 10 - -┌─published_year─┬─────c─┐ -│ 1986 │ 1 │ -│ 1988 │ 1 │ -│ 1989 │ 6 │ -│ 1990 │ 26 │ -│ 1991 │ 353 │ -│ 1992 │ 3190 │ -│ 1993 │ 6729 │ -│ 1994 │ 10078 │ -│ 1995 │ 13006 │ -│ 1996 │ 15872 │ -└────────────────┴───────┘ - -10 rows in set. Elapsed: 0.964 sec. Processed 2.48 million rows, 4.21 GB (2.58 million rows/s., 4.36 GB/s.) -``` - -上記のクエリは、`simpleJSONExtractString`を使用して`created`キーを抽出し、公開日の日付のみを必要とするという事実を利用しています。この場合、`simpleJSON*`関数の制限は、パフォーマンスの向上を伴って許容されるものとなります。 - -## Mapを使用する {#using-map} - -任意のキーを格納するためにオブジェクトが使用される場合、主に1つのタイプの値を持つ場合は、`Map`型を使用することを検討してください。理想的には、ユニークなキーの数は数百を超えないべきです。`Map`型は、サブオブジェクトを持つオブジェクトにも使用できますが、その場合は一貫性が必要です。一般的に、`Map`型はラベルやタグに使用することが推奨されます。例えば、ログデータのKubernetesポッドラベルなどです。 - -`Map`はネストされた構造を表現する簡単な方法を提供しますが、いくつかの顕著な制限があります: - -- フィールドはすべて同じ型でなければなりません。 -- サブカラムにアクセスするには特別なマップ構文が必要です。なぜなら、フィールドはカラムとして存在しないからです。オブジェクト全体が**カラム**です。 -- サブカラムにアクセスする際には、全ての兄弟とそれぞれの値を含む`Map`値をロードします。大きなマップの場合、これには重大なパフォーマンスペナルティが伴う可能性があります。 - -:::note 文字列キー -オブジェクトを`Map`としてモデリングする際には、`String`キーを使用してJSONキー名を格納します。したがって、マップは常に`Map(String, T)`となり、ここで`T`はデータに依存します。 -::: - -#### プリミティブ値 {#primitive-values} - -`Map`の最もシンプルな適用は、オブジェクトが同じプリミティブ型の値を含む場合です。ほとんどの場合、これは`String`型の値`T`を使用することを含みます。 - -先ほどの[人物JSON](/integrations/data-formats/json/schema#static-vs-dynamic-json)を考えてみましょう。ここでは、`company.labels`オブジェクトが動的であることが決定されていました。重要なことに、このオブジェクトには文字列型のキーと値のペアのみが追加されることが期待されています。したがって、これを`Map(String, String)`として宣言できます: - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple(city String, geo Tuple(lat Float32, lng Float32), street String, suite String, zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple(catchPhrase String, name String, labels Map(String,String)), - `dob` Date, - `tags` String -) -ENGINE = MergeTree -ORDER BY username -``` - -元の完全なJSONオブジェクトを挿入します: - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021"}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -Ok. - -1 row in set. Elapsed: 0.002 sec. -``` - -リクエストオブジェクト内のこれらのフィールドをクエリするには、マップ構文が必要です。例えば: - -```sql -SELECT company.labels FROM people - -┌─company.labels───────────────────────────────┐ -│ {'type':'database systems','founded':'2021'} │ -└──────────────────────────────────────────────┘ - -1 row in set. Elapsed: 0.001 sec. - -SELECT company.labels['type'] AS type FROM people - -┌─type─────────────┐ -│ database systems │ -└──────────────────┘ - -1 row in set. Elapsed: 0.001 sec. -``` - -マップ関数の完全なセットが、クエリするために利用可能であり、[こちら](/sql-reference/functions/tuple-map-functions.md)に説明されています。データが一貫した型でない場合、[必要な型変換](/sql-reference/functions/type-conversion-functions)を実行するための関数が存在します。 - -#### オブジェクト値 {#object-values} - -`Map`型は、サブオブジェクトがあるオブジェクトにも考慮できる場合がありますが、その場合は一貫性が必要です。 - -例えば、`persons`オブジェクトの`tags`キーが一貫した構造を要求する場合、各`tag`のサブオブジェクトは`name`と`time`カラムを持つ必要があります。このようなJSONドキュメントの簡易例は以下のようになります: - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "tags": { - "hobby": { - "name": "Diving", - "time": "2024-07-11 14:18:01" - }, - "car": { - "name": "Tesla", - "time": "2024-07-11 15:18:23" - } - } -} -``` - -これは、`Map(String, Tuple(name String, time DateTime))`を使用してモデル化できます。以下を参照してください: - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `tags` Map(String, Tuple(name String, time DateTime)) -) -ENGINE = MergeTree -ORDER BY username - -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","tags":{"hobby":{"name":"Diving","time":"2024-07-11 14:18:01"},"car":{"name":"Tesla","time":"2024-07-11 15:18:23"}}} - -Ok. - -1 row in set. Elapsed: 0.002 sec. - -SELECT tags['hobby'] AS hobby -FROM people -FORMAT JSONEachRow - -{"hobby":{"name":"Diving","time":"2024-07-11 14:18:01"}} - -1 row in set. Elapsed: 0.001 sec. -``` - -この場合のマップの適用は通常稀であり、動的キー名を持つデータをサブオブジェクトなしで再設計する必要があることを示唆しています。たとえば、上記は次のように再設計され、`Array(Tuple(key String, name String, time DateTime))`の使用を許可します。 - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "tags": [ - { - "key": "hobby", - "name": "Diving", - "time": "2024-07-11 14:18:01" - }, - { - "key": "car", - "name": "Tesla", - "time": "2024-07-11 15:18:23" - } - ] -} -``` - -## Nestedを使用する {#using-nested} - -[Nested型](/sql-reference/data-types/nested-data-structures/nested)は、静的オブジェクトをモデル化するために使用できます。これらは変更されることが滅多にないため、`Tuple`や`Array(Tuple)`の代替手段を提供します。一般的に、このタイプをJSONに使用することは避けるべきです。なぜなら、その挙動はしばしば混乱を招くからです。`Nested`の主な利点は、サブカラムをオーダリングキーで使用できることです。 - -以下に、静的オブジェクトをモデル化するためにNested型を使用する例を示します。以下は、JSONのシンプルなログエントリです: - -```json -{ - "timestamp": 897819077, - "clientip": "45.212.12.0", - "request": { - "method": "GET", - "path": "/french/images/hm_nav_bar.gif", - "version": "HTTP/1.0" - }, - "status": 200, - "size": 3305 -} -``` - -`request`キーを`Nested`として宣言できます。`Tuple`と同様に、サブカラムを指定する必要があります。 - -```sql --- デフォルト -SET flatten_nested=1 -CREATE table http -( - timestamp Int32, - clientip IPv4, - request Nested(method LowCardinality(String), path String, version LowCardinality(String)), - status UInt16, - size UInt32, -) ENGINE = MergeTree() ORDER BY (status, timestamp); -``` - -### flatten_nested {#flatten_nested} - -設定`flatten_nested`は、ネストされた動作を制御します。 - -#### flatten_nested=1 {#flatten_nested1} - -値が`1`(デフォルト)は、任意のレベルのネスティングをサポートしません。この値では、ネストされたデータ構造を長さが同じ複数の[Array](/sql-reference/data-types/array)カラムとして考えることが最も簡単です。フィールド`method`、`path`、および`version`はすべて実質的に別々の`Array(Type)`カラムであり、1つの重要な制約があります:**`method`、`path`、および`version`フィールドの長さは同じでなければなりません。** `SHOW CREATE TABLE`を使用すると、以下のように示されます: - -```sql -SHOW CREATE TABLE http - -CREATE TABLE http -( - `timestamp` Int32, - `clientip` IPv4, - `request.method` Array(LowCardinality(String)), - `request.path` Array(String), - `request.version` Array(LowCardinality(String)), - `status` UInt16, - `size` UInt32 -) -ENGINE = MergeTree -ORDER BY (status, timestamp) -``` - -以下に、このテーブルに挿入します: - -```sql -SET input_format_import_nested_json = 1; -INSERT INTO http -FORMAT JSONEachRow -{"timestamp":897819077,"clientip":"45.212.12.0","request":[{"method":"GET","path":"/french/images/hm_nav_bar.gif","version":"HTTP/1.0"}],"status":200,"size":3305} -``` - -ここで注目すべき重要なポイントがいくつかあります: - -* JSONをネストされた構造として挿入するために、設定`input_format_import_nested_json`を使用する必要があります。これがないと、JSONをフラットにする必要があります。すなわち、 - - ```sql - INSERT INTO http FORMAT JSONEachRow - {"timestamp":897819077,"clientip":"45.212.12.0","request":{"method":["GET"],"path":["/french/images/hm_nav_bar.gif"],"version":["HTTP/1.0"]},"status":200,"size":3305} - ``` - -* ネストされたフィールド`method`、`path`、`version`は、JSON配列として渡す必要があります。すなわち、 - - ```json - { - "@timestamp": 897819077, - "clientip": "45.212.12.0", - "request": { - "method": [ - "GET" - ], - "path": [ - "/french/images/hm_nav_bar.gif" - ], - "version": [ - "HTTP/1.0" - ] - }, - "status": 200, - "size": 3305 - } - ``` - -カラムはドット表記を使用してクエリできます: - -```sql -SELECT clientip, status, size, `request.method` FROM http WHERE has(request.method, 'GET'); - -┌─clientip────┬─status─┬─size─┬─request.method─┐ -│ 45.212.12.0 │ 200 │ 3305 │ ['GET'] │ -└─────────────┴────────┴──────┴────────────────┘ -1 row in set. Elapsed: 0.002 sec. -``` - -サブカラム'の`Array`を使用することにより、[Array関数](/sql-reference/functions/array-functions)が活用できることに注意してください。特に、[ARRAY JOIN](/sql-reference/statements/select/array-join)句が役立つ場合があります - もしあなたのカラムに複数の値がある場合です。 - -#### flatten_nested=0 {#flatten_nested0} - -これは任意のレベルのネスティングを許可し、ネストされたカラムを単一の`Tuple`の配列として保持します - 事実的に、これらは`Array(Tuple)`と同じになります。 - -**これは好ましい方法であり、しばしばJSONを`Nested`に使用する最もシンプルな方法です。以下に示すように、すべてのオブジェクトをリストにするだけで済みます。** - -以下に、テーブルを再作成し、行を再挿入します: - -```sql -CREATE TABLE http -( - `timestamp` Int32, - `clientip` IPv4, - `request` Nested(method LowCardinality(String), path String, version LowCardinality(String)), - `status` UInt16, - `size` UInt32 -) -ENGINE = MergeTree -ORDER BY (status, timestamp) - -SHOW CREATE TABLE http - --- note Nested type is preserved. -CREATE TABLE default.http -( - `timestamp` Int32, - `clientip` IPv4, - `request` Nested(method LowCardinality(String), path String, version LowCardinality(String)), - `status` UInt16, - `size` UInt32 -) -ENGINE = MergeTree -ORDER BY (status, timestamp) - -INSERT INTO http -FORMAT JSONEachRow -{"timestamp":897819077,"clientip":"45.212.12.0","request":[{"method":"GET","path":"/french/images/hm_nav_bar.gif","version":"HTTP/1.0"}],"status":200,"size":3305} -``` - -ここで注意すべき重要なポイントがいくつかあります: - -* 挿入には`input_format_import_nested_json`は必要ありません。 -* `Nested`型は`SHOW CREATE TABLE`で保持されます。この列は実質的に`Array(Tuple(Nested(method LowCardinality(String), path String, version LowCardinality(String))))`です。 -* 結果として、`request`を配列として挿入する必要があります。すなわち、 - - ```json - { - "timestamp": 897819077, - "clientip": "45.212.12.0", - "request": [ - { - "method": "GET", - "path": "/french/images/hm_nav_bar.gif", - "version": "HTTP/1.0" - } - ], - "status": 200, - "size": 3305 - } - ``` - -カラムは再びドット表記を用いてクエリできます: - -```sql -SELECT clientip, status, size, `request.method` FROM http WHERE has(request.method, 'GET'); - -┌─clientip────┬─status─┬─size─┬─request.method─┐ -│ 45.212.12.0 │ 200 │ 3305 │ ['GET'] │ -└─────────────┴────────┴──────┴────────────────┘ -1 row in set. Elapsed: 0.002 sec. -``` - -### 例 {#example} - -上記のデータのより大きな例が、S3のパブリックバケットにあります: `s3://datasets-documentation/http/`。 - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/documents-01.ndjson.gz', 'JSONEachRow') -LIMIT 1 -FORMAT PrettyJSONEachRow - -{ - "@timestamp": "893964617", - "clientip": "40.135.0.0", - "request": { - "method": "GET", - "path": "\/images\/hm_bg.jpg", - "version": "HTTP\/1.0" - }, - "status": "200", - "size": "24736" -} - -1 row in set. Elapsed: 0.312 sec. -``` - -制約とJSONの入力形式に基づいて、このサンプルデータセットを挿入するには、以下のクエリを実行します。ここでは、`flatten_nested=0`を設定します。 - -次の文は1000万行を挿入するため、実行には数分かかる場合があります。必要に応じて`LIMIT`を適用してください: - -```sql -INSERT INTO http -SELECT `@timestamp` AS `timestamp`, clientip, [request], status, -size FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/documents-01.ndjson.gz', -'JSONEachRow'); -``` - -このデータをクエリするには、要求フィールドにアクセスするために配列としてアクセスする必要があります。以下に、固定された時間範囲内でのエラーおよびHTTPメソッドを要約します。 - -```sql -SELECT status, request.method[1] as method, count() as c -FROM http -WHERE status >= 400 - AND toDateTime(timestamp) BETWEEN '1998-01-01 00:00:00' AND '1998-06-01 00:00:00' -GROUP by method, status -ORDER BY c DESC LIMIT 5; - -┌─status─┬─method─┬─────c─┐ -│ 404 │ GET │ 11267 │ -│ 404 │ HEAD │ 276 │ -│ 500 │ GET │ 160 │ -│ 500 │ POST │ 115 │ -│ 400 │ GET │ 81 │ -└────────┴────────┴───────┘ - -5 rows in set. Elapsed: 0.007 sec. -``` - -### ペアワイズ配列を使用する {#using-pairwise-arrays} - -ペアワイズ配列は、JSONを文字列として表現する柔軟性と、より構造化されたアプローチのパフォーマンスのバランスを提供します。このスキーマは柔軟性があり、新しいフィールドをルートに追加することができます。ただし、これはかなり複雑なクエリ構文を必要とし、ネストされた構造とは互換性がありません。 - -たとえば、次のようなテーブルを考えてみましょう: - -```sql -CREATE TABLE http_with_arrays ( - keys Array(String), - values Array(String) -) -ENGINE = MergeTree ORDER BY tuple(); -``` - -このテーブルに挿入するには、JSONをキーと値のリストとして構造化する必要があります。以下のクエリは、`JSONExtractKeysAndValues`を使用してこれを達成する例を示しています: - -```sql -SELECT - arrayMap(x -> (x.1), JSONExtractKeysAndValues(json, 'String')) AS keys, - arrayMap(x -> (x.2), JSONExtractKeysAndValues(json, 'String')) AS values -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/documents-01.ndjson.gz', 'JSONAsString') -LIMIT 1 -FORMAT Vertical - -Row 1: -────── -keys: ['@timestamp','clientip','request','status','size'] -values: ['893964617','40.135.0.0','{"method":"GET","path":"/images/hm_bg.jpg","version":"HTTP/1.0"}','200','24736'] - -1 row in set. Elapsed: 0.416 sec. -``` - -要求カラムは、文字列として表されたネストされた構造のままであることに注意してください。ルートに新しいキーを追加することができ、JSON自体にも任意の違いを持つことができます。ローカルテーブルに挿入するには、次のクエリを実行します: - -```sql -INSERT INTO http_with_arrays -SELECT - arrayMap(x -> (x.1), JSONExtractKeysAndValues(json, 'String')) AS keys, - arrayMap(x -> (x.2), JSONExtractKeysAndValues(json, 'String')) AS values -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/http/documents-01.ndjson.gz', 'JSONAsString') - -0 rows in set. Elapsed: 12.121 sec. Processed 10.00 million rows, 107.30 MB (825.01 thousand rows/s., 8.85 MB/s.) -``` - -この構造をクエリするには、`indexOf`関数を使用して必要なキーのインデックスを特定する必要があります(これは値の順序と一致する必要があります)。これを使用して値配列カラムにアクセスできます。すなわち、`values[indexOf(keys, 'status')]`。要求カラムに対しては、JSON解析メソッドが引き続き必要です。この場合、`simpleJSONExtractString`ですが、 - -```sql -SELECT toUInt16(values[indexOf(keys, 'status')]) as status, - simpleJSONExtractString(values[indexOf(keys, 'request')], 'method') as method, - count() as c -FROM http_with_arrays -WHERE status >= 400 - AND toDateTime(values[indexOf(keys, '@timestamp')]) BETWEEN '1998-01-01 00:00:00' AND '1998-06-01 00:00:00' -GROUP by method, status ORDER BY c DESC LIMIT 5; - -┌─status─┬─method─┬─────c─┐ -│ 404 │ GET │ 11267 │ -│ 404 │ HEAD │ 276 │ -│ 500 │ GET │ 160 │ -│ 500 │ POST │ 115 │ -│ 400 │ GET │ 81 │ -└────────┴────────┴───────┘ - -5 rows in set. Elapsed: 0.383 sec. Processed 8.22 million rows, 1.97 GB (21.45 million rows/s., 5.15 GB/s.) -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md.hash deleted file mode 100644 index e2e51eddd87..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/other.md.hash +++ /dev/null @@ -1 +0,0 @@ -fa9d9fb7005d0254 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md deleted file mode 100644 index 60509110b46..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md +++ /dev/null @@ -1,960 +0,0 @@ ---- -title: 'JSONスキーマの設計' -slug: '/integrations/data-formats/json/schema' -description: 'JSONスキーマを最適に設計する方法' -keywords: -- 'json' -- 'clickhouse' -- 'inserting' -- 'loading' -- 'formats' -- 'schema' -- 'structured' -- 'semi-structured' -score: 20 ---- - -import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; -import Image from '@theme/IdealImage'; -import json_column_per_type from '@site/static/images/integrations/data-ingestion/data-formats/json_column_per_type.png'; -import json_offsets from '@site/static/images/integrations/data-ingestion/data-formats/json_offsets.png'; -import shared_json_column from '@site/static/images/integrations/data-ingestion/data-formats/json_shared_column.png'; - - - -# スキーマの設計 - -[スキーマ推論](/integrations/data-formats/json/inference)を使用して、JSONデータの初期スキーマを確立し、S3などでJSONデータファイルをクエリすることができますが、ユーザーはデータに対して最適化されたバージョン管理されたスキーマを確立することを目指すべきです。以下に、JSON構造のモデリングに推奨されるアプローチを示します。 -## 静的JSONと動的JSON {#static-vs-dynamic-json} - -JSONのスキーマを定義する際の主なタスクは、各キーの値に適切な型を決定することです。ユーザーには、JSON階層内の各キーに対して以下のルールを再帰的に適用して、各キーの適切な型を決定することを推奨します。 - -1. **プリミティブ型** - キーの値がプリミティブ型である場合、サブオブジェクトの一部であるかルート上にあるかに関係なく、一般的なスキーマの[設計ベストプラクティス](/data-modeling/schema-design)および[type optimization rules](/data-modeling/schema-design#optimizing-types)に従ってその型を選択してください。以下の`phone_numbers`のようなプリミティブの配列は、`Array()`としてモデル化できます。例えば、`Array(String)`。 -2. **静的 vs 動的** - キーの値が複雑なオブジェクト(すなわち、オブジェクトまたはオブジェクトの配列)である場合、そのオブジェクトが変化する可能性があるかどうかを決定してください。新しいキーが稀に追加されるオブジェクトでは、新しいキーの追加が予測可能であり、[`ALTER TABLE ADD COLUMN`](/sql-reference/statements/alter/column#add-column)を介したスキーマ変更で対処できる場合、これらは**静的**と見なされます。これは、いくつかのJSONドキュメントに提供されるのはキーのサブセットのみであるオブジェクトを含みます。新しいキーが頻繁に追加されるオブジェクトや予測不可能な場合は、**動的**と見なすべきです。**ここでの例外は、数百または数千のサブキーを持つ構造であり、便利さのために動的と見なすことができます**。 - -値が**静的**か**動的**かを確認するには、以下の関連セクション[**静的オブジェクトの取り扱い**](/integrations/data-formats/json/schema#handling-static-structures)および[**動的オブジェクトの取り扱い**](/integrations/data-formats/json/schema#handling-semi-structured-dynamic-structures)を参照してください。 - -

- -**重要:** 上記のルールは再帰的に適用する必要があります。キーの値が動的であると判断された場合、さらなる評価は必要なく、[**動的オブジェクトの取り扱い**](/integrations/data-formats/json/schema#handling-semi-structured-dynamic-structures)のガイドラインに従うことができます。オブジェクトが静的な場合は、サブキーを評価し続け、キーの値がプリミティブであるか動的キーが見つかるまで続けます。 - -これらのルールを示すために、以下のJSON例を使用して人格を表現します: - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "street": "Victor Plains", - "suite": "Suite 879", - "city": "Wisokyburgh", - "zipcode": "90566-7771", - "geo": { - "lat": -43.9509, - "lng": -34.4618 - } - } - ], - "phone_numbers": [ - "010-692-6593", - "020-192-3333" - ], - "website": "clickhouse.com", - "company": { - "name": "ClickHouse", - "catchPhrase": "The real-time data warehouse for analytics", - "labels": { - "type": "database systems", - "founded": "2021" - } - }, - "dob": "2007-03-31", - "tags": { - "hobby": "Databases", - "holidays": [ - { - "year": 2024, - "location": "Azores, Portugal" - } - ], - "car": { - "model": "Tesla", - "year": 2023 - } - } -} -``` - -これらのルールを適用すると: - -- ルートキー`name`、`username`、`email`、`website`はタイプ`String`として表現できます。カラム`phone_numbers`は型`Array(String)`のプリミティブの配列で、`dob`と`id`はそれぞれタイプ`Date`と`UInt32`です。 -- `address`オブジェクトに新しいキーが追加されることはなく(新しい住所オブジェクトのみ)、したがってそれは**静的**と見なされます。再帰処理を続けると、すべてのサブカラムはプリミティブ(タイプ`String`)と見なすことができますが、`geo`を除く。これもまた、2つの`Float32`カラム(`lat`と`lon`)を持つ静的構造です。 -- `tags`カラムは**動的**です。このオブジェクトに新しい任意のタグが追加されることを想定します。 -- `company`オブジェクトは**静的**で、常に指定された3つのキーしか含まれません。サブキー`name`と`catchPhrase`は`String`タイプです。キー`labels`は**動的**です。このオブジェクトに新しい任意のタグが追加されることを想定しています。値は常に文字列タイプのキーと値のペアになります。 - -:::note -数百または数千の静的キーを持つ構造は動的と見なすことができます。これは、これらに対して静的にカラムを宣言することは現実的ではありません。ただし、可能であれば、ストレージと推論のオーバーヘッドを節約するために[スキップパス](#using-type-hints-and-skipping-paths)を使用してください。 -::: - -## 静的な構造の取り扱い {#handling-static-structures} - -静的な構造は、名前付きタプル、すなわち`Tuple`を使用して処理することを推奨します。オブジェクトの配列は、タプルの配列、すなわち`Array(Tuple)`を使用して保持できます。タプル内でも、カラムとそのそれぞれの型は同じルールを使用して定義する必要があります。これにより、以下に示すようにネストされたオブジェクトを表すためのネストされたタプルが作成される可能性があります。 - -これを説明するために、動的オブジェクトを省略した先のJSON人物例を使用します: - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "street": "Victor Plains", - "suite": "Suite 879", - "city": "Wisokyburgh", - "zipcode": "90566-7771", - "geo": { - "lat": -43.9509, - "lng": -34.4618 - } - } - ], - "phone_numbers": [ - "010-692-6593", - "020-192-3333" - ], - "website": "clickhouse.com", - "company": { - "name": "ClickHouse", - "catchPhrase": "The real-time data warehouse for analytics" - }, - "dob": "2007-03-31" -} -``` - -このテーブルのスキーマは以下のようになります: - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple(city String, geo Tuple(lat Float32, lng Float32), street String, suite String, zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple(catchPhrase String, name String), - `dob` Date -) -ENGINE = MergeTree -ORDER BY username -``` - -`company`カラムが`Tuple(catchPhrase String, name String)`として定義されていることに注目してください。`address`キーは`Array(Tuple)`を使用し、`geo`カラムを表現するためにネストされた`Tuple`を使用します。 - -現在の構造のJSONをこのテーブルに挿入できます: - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics"},"dob":"2007-03-31"} -``` - -上記の例では、データが最小限ですが、以下に示すように、タプルカラムをその期間区切り名でクエリできます。 - -```sql -SELECT - address.street, - company.name -FROM people - -┌─address.street────┬─company.name─┐ -│ ['Victor Plains'] │ ClickHouse │ -└───────────────────┴──────────────┘ -``` - -`address.street`カラムが`Array`として返される点に注意してください。配列内の特定のオブジェクトに位置でアクセスするには、カラム名の後に配列オフセットを指定する必要があります。たとえば、最初の住所から通りにアクセスするには: - -```sql -SELECT address.street[1] AS street -FROM people - -┌─street────────┐ -│ Victor Plains │ -└───────────────┘ - -1 row in set. Elapsed: 0.001 sec. -``` - -サブカラムは、[`24.12`](https://clickhouse.com/blog/clickhouse-release-24-12#json-subcolumns-as-table-primary-key)からのキーの順序付けにも使用できます: - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple(city String, geo Tuple(lat Float32, lng Float32), street String, suite String, zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple(catchPhrase String, name String), - `dob` Date -) -ENGINE = MergeTree -ORDER BY company.name -``` -### デフォルト値の処理 {#handling-default-values} - -JSONオブジェクトが構造化されている場合でも、提供されるキーのサブセットのみでスパースなことがよくあります。幸いにも、`Tuple`型はJSONペイロード内のすべてのカラムを必要としません。提供されていない場合は、デフォルト値が使用されます。 - -先の`people`テーブルと、キー`suite`、`geo`、`phone_numbers`、および`catchPhrase`が欠けている次のスパースJSONを考えます。 - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "street": "Victor Plains", - "city": "Wisokyburgh", - "zipcode": "90566-7771" - } - ], - "website": "clickhouse.com", - "company": { - "name": "ClickHouse" - }, - "dob": "2007-03-31" -} -``` - -以下のように、この行を正常に挿入できることがわかります: - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","city":"Wisokyburgh","zipcode":"90566-7771"}],"website":"clickhouse.com","company":{"name":"ClickHouse"},"dob":"2007-03-31"} - -Ok. - -1 row in set. Elapsed: 0.002 sec. -``` - -この単一行をクエリすると、欠落したカラム(サブオブジェクトを含む)にデフォルト値が使用されることがわかります: - -```sql -SELECT * -FROM people -FORMAT PrettyJSONEachRow - -{ - "id": "1", - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "city": "Wisokyburgh", - "geo": { - "lat": 0, - "lng": 0 - }, - "street": "Victor Plains", - "suite": "", - "zipcode": "90566-7771" - } - ], - "phone_numbers": [], - "website": "clickhouse.com", - "company": { - "catchPhrase": "", - "name": "ClickHouse" - }, - "dob": "2007-03-31" -} - -1 row in set. Elapsed: 0.001 sec. -``` - -:::note 空とnullの区別 -ユーザーが値が空であることと提供されていないことを区別する必要がある場合、[Nullable](/sql-reference/data-types/nullable)型を使用できます。これは、ストレージとクエリのパフォーマンスに悪影響を与えるため、絶対に必要ない限り[避けるべきです](/best-practices/select-data-types#avoid-nullable-columns)。 -::: -### 新しいカラムの扱い {#handling-new-columns} - -静的なJSONキーの場合、構造化されたアプローチが最も簡単ですが、スキーマに対する変更を計画できる場合(すなわち、新しいキーが事前に知られていて、それに応じてスキーマを変更できる場合)でもこのアプローチを使用できます。 - -ClickHouseは、デフォルトでペイロード内に提供され、スキーマに存在しないJSONキーを無視することに注意してください。次の修正されたJSONペイロードの`nickname`キーが追加された場合を考えます: - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "nickname": "Clicky", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "street": "Victor Plains", - "suite": "Suite 879", - "city": "Wisokyburgh", - "zipcode": "90566-7771", - "geo": { - "lat": -43.9509, - "lng": -34.4618 - } - } - ], - "phone_numbers": [ - "010-692-6593", - "020-192-3333" - ], - "website": "clickhouse.com", - "company": { - "name": "ClickHouse", - "catchPhrase": "The real-time data warehouse for analytics" - }, - "dob": "2007-03-31" -} -``` - -このJSONは、`nickname`キーが無視されて正常に挿入されます: - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","nickname":"Clicky","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics"},"dob":"2007-03-31"} - -Ok. - -1 row in set. Elapsed: 0.002 sec. -``` - -[`ALTER TABLE ADD COLUMN`](/sql-reference/statements/alter/column#add-column)コマンドを使用してスキーマにカラムを追加できます。`DEFAULT`句を使用してデフォルトを指定できます。これは、後続の挿入中に指定されていない場合に使用されます。この値が存在しない行(それは作成前に挿入されたため)もこのデフォルト値を返します。デフォルト値が指定されていない場合、型のデフォルト値が使用されます。 - -例えば: - -```sql --- 初期行を挿入します(nicknameは無視されます) -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","nickname":"Clicky","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics"},"dob":"2007-03-31"} - --- カラムを追加します -ALTER TABLE people - (ADD COLUMN `nickname` String DEFAULT 'no_nickname') - --- 新しい行を挿入します(同じデータで異なるid) -INSERT INTO people FORMAT JSONEachRow -{"id":2,"name":"Clicky McCliickHouse","nickname":"Clicky","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics"},"dob":"2007-03-31"} - --- 2行を選択します -SELECT id, nickname FROM people - -┌─id─┬─nickname────┐ -│ 2 │ Clicky │ -│ 1 │ no_nickname │ -└────┴─────────────┘ - -2 rows in set. Elapsed: 0.001 sec. -``` -## 半構造化/動的構造の取り扱い {#handling-semi-structured-dynamic-structures} - - - -JSONデータが半構造化されており、キーが動的に追加できたり、複数の型を持つ場合は、[`JSON`](/sql-reference/data-types/newjson)型を推奨します。 - -特に、データに以下の条件がある場合はJSON型を使用します: - -- **予測不可能なキー**を持ち、時間と共に変わる可能性がある。 -- **異なる型の値**(例えば、パスが時々文字列を含み、時々数値を含む)を含む。 -- 厳密な型指定が実現できないスキーマ柔軟性が必要。 -- 幾つかの**静的なパス**があるが明示的に宣言することは現実的ではない場合。これは稀である傾向があります。 - -先の[人物JSON](/integrations/data-formats/json/schema#static-vs-dynamic-json)では、`company.labels`オブジェクトが動的であると判断されました。 - -`company.labels`が任意のキーを含むと仮定しましょう。さらに、この構造内の任意のキーの型は行ごとに一貫していない可能性があります。例えば: - -```json -{ - "id": 1, - "name": "Clicky McCliickHouse", - "username": "Clicky", - "email": "clicky@clickhouse.com", - "address": [ - { - "street": "Victor Plains", - "suite": "Suite 879", - "city": "Wisokyburgh", - "zipcode": "90566-7771", - "geo": { - "lat": -43.9509, - "lng": -34.4618 - } - } - ], - "phone_numbers": [ - "010-692-6593", - "020-192-3333" - ], - "website": "clickhouse.com", - "company": { - "name": "ClickHouse", - "catchPhrase": "The real-time data warehouse for analytics", - "labels": { - "type": "database systems", - "founded": "2021", - "employees": 250 - } - }, - "dob": "2007-03-31", - "tags": { - "hobby": "Databases", - "holidays": [ - { - "year": 2024, - "location": "Azores, Portugal" - } - ], - "car": { - "model": "Tesla", - "year": 2023 - } - } -} -``` - -```json -{ - "id": 2, - "name": "Analytica Rowe", - "username": "Analytica", - "address": [ - { - "street": "Maple Avenue", - "suite": "Apt. 402", - "city": "Dataford", - "zipcode": "11223-4567", - "geo": { - "lat": 40.7128, - "lng": -74.006 - } - } - ], - "phone_numbers": [ - "123-456-7890", - "555-867-5309" - ], - "website": "fastdata.io", - "company": { - "name": "FastData Inc.", - "catchPhrase": "Streamlined analytics at scale", - "labels": { - "type": [ - "real-time processing" - ], - "founded": 2019, - "dissolved": 2023, - "employees": 10 - } - }, - "dob": "1992-07-15", - "tags": { - "hobby": "Running simulations", - "holidays": [ - { - "year": 2023, - "location": "Kyoto, Japan" - } - ], - "car": { - "model": "Audi e-tron", - "year": 2022 - } - } -} -``` - -`company.labels`カラムの動的な性質を考慮するにあたり、以下のようなオプションでこのデータをモデル化できます: - -- **単一JSONカラム** - スキーマ全体を単一の`JSON`カラムとして表すことで、すべての構造がその下で動的になります。 -- **ターゲットJSONカラム** - `company.labels`カラムにのみ`JSON`型を使用し、他のすべてのカラムに対して上記の構造化されたスキーマを維持します。 - -最初のアプローチ[は先の方法論と一致しません](#static-vs-dynamic-json)が、単一のJSONカラムアプローチはプロトタイピングやデータエンジニアリングタスクに役立ちます。 - -ClickHouseのスケールでの本番展開では、構造を明示的にし、可能であれば動的なサブ構造に対してJSON型を使用することを推奨します。 - -厳密なスキーマには多くの利点があります: - -- **データ検証** - 厳密なスキーマを強制することで、特定の構造を除いてカラムの爆発のリスクを回避します。 -- **カラムの爆発のリスクを回避** - JSON型は潜在的に千のカラムにスケールしますが、サブカラムが専用カラムとして保存される場合、数えきれないカラムファイルが作成され、パフォーマンスに影響を与える可能性があります。これを軽減するために、JSONで使用される基本の[Dynamic type](/sql-reference/data-types/dynamic)には、個別のカラムファイルとして保存されるユニークなパスの数を制限する[`max_dynamic_paths`](/sql-reference/data-types/newjson#reading-json-paths-as-sub-columns)パラメータがあります。閾値に達すると、追加のパスはコンパクトなエンコーディング形式を使用して共有カラムファイルに保存され、パフォーマンスとストレージの効率を維持しながら、柔軟なデータ取り込みをサポートします。ただし、この共有カラムファイルへのアクセスは、パフォーマンスが劣ることがあります。ただし、JSONカラムは[タイプヒント](#using-type-hints-and-skipping-paths)と共に使用できます。「ヒント付け」されたカラムは、専用のカラムと同じパフォーマンスを提供します。 -- **パスと型の簡単な内省** - JSON型は、推論された型とパスを特定するための[内省関数](/sql-reference/data-types/newjson#introspection-functions)をサポートしていますが、静的構造は探るのに簡単です。例えば`DESCRIBE`を使って。 - -### 単一JSONカラム {#single-json-column} - -このアプローチはプロトタイピングやデータエンジニアリングタスクに役立ちます。本番では、必要に応じて動的なサブ構造にのみ`JSON`を使用するようにしてください。 - -:::note パフォーマンスの考慮 -単一のJSONカラムは、必要でないJSONパスをスキップ(保存しない)することで最適化できます。また、[タイプヒント](#using-type-hints-and-skipping-paths)を使用することもできます。タイプヒントを使用することで、ユーザーはサブカラムの型を明示的に定義でき、推論と間接処理をクエリ時にスキップできます。これにより、明示的なスキーマを使用している場合と同じパフォーマンスを提供できます。[“タイプヒントを使用してパスをスキップする”](#using-type-hints-and-skipping-paths)の詳細を参照してください。 -::: - -単一JSONカラムのスキーマは次のようにシンプルです: - -```sql -SET enable_json_type = 1; - -CREATE TABLE people -( - `json` JSON(username String) -) -ENGINE = MergeTree -ORDER BY json.username; -``` - -:::note -`username`カラムのJSON定義には[タイプヒント](#using-type-hints-and-skipping-paths)を提供しています。これは、順序付け/主キーで使用するためです。これにより、ClickHouseはこのカラムがnullにならないことを知り、使用すべき`username`サブカラムを把握します(各タイプごとに複数存在する可能性があるため、さもなければあいまいです)。 -::: - -上記のテーブルに行を挿入するには、`JSONAsObject`形式を使用できます: - -```sql -INSERT INTO people FORMAT JSONAsObject -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021","employees":250}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -1 row in set. Elapsed: 0.028 sec. - -INSERT INTO people FORMAT JSONAsObject -{"id":2,"name":"Analytica Rowe","username":"Analytica","address":[{"street":"Maple Avenue","suite":"Apt. 402","city":"Dataford","zipcode":"11223-4567","geo":{"lat":40.7128,"lng":-74.006}}],"phone_numbers":["123-456-7890","555-867-5309"],"website":"fastdata.io","company":{"name":"FastData Inc.","catchPhrase":"Streamlined analytics at scale","labels":{"type":["real-time processing"],"founded":2019,"dissolved":2023,"employees":10}},"dob":"1992-07-15","tags":{"hobby":"Running simulations","holidays":[{"year":2023,"location":"Kyoto, Japan"}],"car":{"model":"Audi e-tron","year":2022}}} - -1 row in set. Elapsed: 0.004 sec. -``` - - -```sql -SELECT * -FROM people -FORMAT Vertical - -Row 1: -────── -json: {"address":[{"city":"Dataford","geo":{"lat":40.7128,"lng":-74.006},"street":"Maple Avenue","suite":"Apt. 402","zipcode":"11223-4567"}],"company":{"catchPhrase":"Streamlined analytics at scale","labels":{"dissolved":"2023","employees":"10","founded":"2019","type":["real-time processing"]},"name":"FastData Inc."},"dob":"1992-07-15","id":"2","name":"Analytica Rowe","phone_numbers":["123-456-7890","555-867-5309"],"tags":{"car":{"model":"Audi e-tron","year":"2022"},"hobby":"Running simulations","holidays":[{"location":"Kyoto, Japan","year":"2023"}]},"username":"Analytica","website":"fastdata.io"} - -Row 2: -────── -json: {"address":[{"city":"Wisokyburgh","geo":{"lat":-43.9509,"lng":-34.4618},"street":"Victor Plains","suite":"Suite 879","zipcode":"90566-7771"}],"company":{"catchPhrase":"The real-time data warehouse for analytics","labels":{"employees":"250","founded":"2021","type":"database systems"},"name":"ClickHouse"},"dob":"2007-03-31","email":"clicky@clickhouse.com","id":"1","name":"Clicky McCliickHouse","phone_numbers":["010-692-6593","020-192-3333"],"tags":{"car":{"model":"Tesla","year":"2023"},"hobby":"Databases","holidays":[{"location":"Azores, Portugal","year":"2024"}]},"username":"Clicky","website":"clickhouse.com"} - -2 rows in set. Elapsed: 0.005 sec. -``` - -推論されたサブカラムとその型をは、[内省関数](/sql-reference/data-types/newjson#introspection-functions)を使用して決定できます。例えば: - -```sql -SELECT JSONDynamicPathsWithTypes(json) as paths -FROM people -FORMAT PrettyJsonEachRow - -{ - "paths": { - "address": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "company.catchPhrase": "String", - "company.labels.employees": "Int64", - "company.labels.founded": "String", - "company.labels.type": "String", - "company.name": "String", - "dob": "Date", - "email": "String", - "id": "Int64", - "name": "String", - "phone_numbers": "Array(Nullable(String))", - "tags.car.model": "String", - "tags.car.year": "Int64", - "tags.hobby": "String", - "tags.holidays": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "website": "String" - } -} -{ - "paths": { - "address": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "company.catchPhrase": "String", - "company.labels.dissolved": "Int64", - "company.labels.employees": "Int64", - "company.labels.founded": "Int64", - "company.labels.type": "Array(Nullable(String))", - "company.name": "String", - "dob": "Date", - "id": "Int64", - "name": "String", - "phone_numbers": "Array(Nullable(String))", - "tags.car.model": "String", - "tags.car.year": "Int64", - "tags.hobby": "String", - "tags.holidays": "Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))", - "website": "String" - } -} - -2 rows in set. Elapsed: 0.009 sec. -``` - -内省関数による完全なリストについては、["内省関数"](/sql-reference/data-types/newjson#introspection-functions)を参照してください。 - -[サブパスにアクセスできます](/sql-reference/data-types/newjson#reading-json-paths-as-sub-columns) `.`記法を使用して、例えば: - -```sql -SELECT json.name, json.email FROM people - -┌─json.name────────────┬─json.email────────────┐ -│ Analytica Rowe │ ᴺᵁᴺᴺ │ -│ Clicky McCliickHouse │ clicky@clickhouse.com │ -└──────────────────────┴───────────────────────┘ - -2 rows in set. Elapsed: 0.006 sec. -``` - -行に欠けているカラムは`NULL`として返される点に注意してください。 - -さらに、同じ型のパスに対しては別々のサブカラムが作成されます。例えば、`company.labels.type`に対して、`String`型と`Array(Nullable(String))`型の両方にサブカラムが存在します。両方が可能な限り返されますが、特定のサブカラムを`.:`記法を使用してターゲットすることができます。 - -```sql -SELECT json.company.labels.type -FROM people - -┌─json.company.labels.type─┐ -│ database systems │ -│ ['real-time processing'] │ -└──────────────────────────┘ - -2 rows in set. Elapsed: 0.007 sec. - -SELECT json.company.labels.type.:String -FROM people - -┌─json.company⋯e.:`String`─┐ -│ ᴺᵁᴺᴺ │ -│ database systems │ -└──────────────────────────┘ - -2 rows in set. Elapsed: 0.009 sec. -``` - -ネストされたサブオブジェクトを返すには、`^`が必要です。これは、読み取るカラムの数が多すぎないようにするための設計上の選択です。明示的に要求されない限り、オブジェクトにアクセスすると`NULL`が返されるでしょう。 - -```sql --- サブオブジェクトはデフォルトで返されません -SELECT json.company.labels -FROM people - -┌─json.company.labels─┐ -│ ᴺᵁᴺᴺ │ -│ ᴺᵁᴺᴺ │ -└─────────────────────┘ - -2 rows in set. Elapsed: 0.002 sec. - --- `^`記法を使ってサブオブジェクトを返します -SELECT json.^company.labels -FROM people - -┌─json.^`company`.labels─────────────────────────────────────────────────────────────────┐ -│ {"employees":"250","founded":"2021","type":"database systems"} │ -│ {"dissolved":"2023","employees":"10","founded":"2019","type":["real-time processing"]} │ -└────────────────────────────────────────────────────────────────────────────────────────┘ - -2 rows in set. Elapsed: 0.004 sec. -``` -### ターゲットとした JSON カラム {#targeted-json-column} - -プロトタイピングやデータエンジニアリングの課題では便利ですが、可能な限りプロダクションでは明示的なスキーマを使用することをお勧めします。 - -以前の例は、`company.labels` カラムのための単一の `JSON` カラムでモデル化できます。 - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple(city String, geo Tuple(lat Float32, lng Float32), street String, suite String, zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple(catchPhrase String, name String, labels JSON), - `dob` Date, - `tags` String -) -ENGINE = MergeTree -ORDER BY username -``` - -このテーブルには、`JSONEachRow` フォーマットを使用して挿入できます: - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021","employees":250}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -1 row in set. Elapsed: 0.450 sec. - -INSERT INTO people FORMAT JSONEachRow -{"id":2,"name":"Analytica Rowe","username":"Analytica","address":[{"street":"Maple Avenue","suite":"Apt. 402","city":"Dataford","zipcode":"11223-4567","geo":{"lat":40.7128,"lng":-74.006}}],"phone_numbers":["123-456-7890","555-867-5309"],"website":"fastdata.io","company":{"name":"FastData Inc.","catchPhrase":"Streamlined analytics at scale","labels":{"type":["real-time processing"],"founded":2019,"dissolved":2023,"employees":10}},"dob":"1992-07-15","tags":{"hobby":"Running simulations","holidays":[{"year":2023,"location":"Kyoto, Japan"}],"car":{"model":"Audi e-tron","year":2022}}} - -1 row in set. Elapsed: 0.440 sec. -``` - -```sql -SELECT * -FROM people -FORMAT Vertical - -Row 1: -────── -id: 2 -name: Analytica Rowe -username: Analytica -email: -address: [('Dataford',(40.7128,-74.006),'Maple Avenue','Apt. 402','11223-4567')] -phone_numbers: ['123-456-7890','555-867-5309'] -website: fastdata.io -company: ('Streamlined analytics at scale','FastData Inc.','{"dissolved":"2023","employees":"10","founded":"2019","type":["real-time processing"]}') -dob: 1992-07-15 -tags: {"hobby":"Running simulations","holidays":[{"year":2023,"location":"Kyoto, Japan"}],"car":{"model":"Audi e-tron","year":2022}} - -Row 2: -────── -id: 1 -name: Clicky McCliickHouse -username: Clicky -email: clicky@clickhouse.com -address: [('Wisokyburgh',(-43.9509,-34.4618),'Victor Plains','Suite 879','90566-7771')] -phone_numbers: ['010-692-6593','020-192-3333'] -website: clickhouse.com -company: ('The real-time data warehouse for analytics','ClickHouse','{"employees":"250","founded":"2021","type":"database systems"}') -dob: 2007-03-31 -tags: {"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}} - -2 rows in set. Elapsed: 0.005 sec. -``` - -[Introspection functions](/sql-reference/data-types/newjson#introspection-functions)を使用して、`company.labels` カラムの推測されたパスとタイプを確認できます。 - -```sql -SELECT JSONDynamicPathsWithTypes(company.labels) AS paths -FROM people -FORMAT PrettyJsonEachRow - -{ - "paths": { - "dissolved": "Int64", - "employees": "Int64", - "founded": "Int64", - "type": "Array(Nullable(String))" - } -} -{ - "paths": { - "employees": "Int64", - "founded": "String", - "type": "String" - } -} - -2 rows in set. Elapsed: 0.003 sec. -``` -### 型ヒントとパスのスキップを使用する {#using-type-hints-and-skipping-paths} - -型ヒントを使用することで、パスおよびそのサブカラムのタイプを指定し、不必要な型推論を防ぐことができます。以下の例を考えると、JSON カラム `company.labels` 内の JSON キー `dissolved`、`employees`、`founded` のタイプを指定します。 - -```sql -CREATE TABLE people -( - `id` Int64, - `name` String, - `username` String, - `email` String, - `address` Array(Tuple( - city String, - geo Tuple( - lat Float32, - lng Float32), - street String, - suite String, - zipcode String)), - `phone_numbers` Array(String), - `website` String, - `company` Tuple( - catchPhrase String, - name String, - labels JSON(dissolved UInt16, employees UInt16, founded UInt16)), - `dob` Date, - `tags` String -) -ENGINE = MergeTree -ORDER BY username -``` - -```sql -INSERT INTO people FORMAT JSONEachRow -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021","employees":250}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -1 row in set. Elapsed: 0.450 sec. - -INSERT INTO people FORMAT JSONEachRow -{"id":2,"name":"Analytica Rowe","username":"Analytica","address":[{"street":"Maple Avenue","suite":"Apt. 402","city":"Dataford","zipcode":"11223-4567","geo":{"lat":40.7128,"lng":-74.006}}],"phone_numbers":["123-456-7890","555-867-5309"],"website":"fastdata.io","company":{"name":"FastData Inc.","catchPhrase":"Streamlined analytics at scale","labels":{"type":["real-time processing"],"founded":2019,"dissolved":2023,"employees":10}},"dob":"1992-07-15","tags":{"hobby":"Running simulations","holidays":[{"year":2023,"location":"Kyoto, Japan"}],"car":{"model":"Audi e-tron","year":2022}}} - -1 row in set. Elapsed: 0.440 sec. -``` - -これらのカラムには、今や明示的な型があります: - -```sql -SELECT JSONAllPathsWithTypes(company.labels) AS paths -FROM people -FORMAT PrettyJsonEachRow - -{ - "paths": { - "dissolved": "UInt16", - "employees": "UInt16", - "founded": "UInt16", - "type": "String" - } -} -{ - "paths": { - "dissolved": "UInt16", - "employees": "UInt16", - "founded": "UInt16", - "type": "Array(Nullable(String))" - } -} - -2 rows in set. Elapsed: 0.003 sec. -``` - -さらに、私たちは、ストレージを最小化し、不要なパスの推論を避けるために、[`SKIP` および `SKIP REGEXP`](/sql-reference/data-types/newjson) パラメータを使用して、保存したくない JSON 内のパスをスキップすることができます。たとえば、上記のデータに対して単一の JSON カラムを使用する場合を考えてみましょう。`address` と `company` パスをスキップできます: - -```sql -CREATE TABLE people -( - `json` JSON(username String, SKIP address, SKIP company) -) -ENGINE = MergeTree -ORDER BY json.username - -INSERT INTO people FORMAT JSONAsObject -{"id":1,"name":"Clicky McCliickHouse","username":"Clicky","email":"clicky@clickhouse.com","address":[{"street":"Victor Plains","suite":"Suite 879","city":"Wisokyburgh","zipcode":"90566-7771","geo":{"lat":-43.9509,"lng":-34.4618}}],"phone_numbers":["010-692-6593","020-192-3333"],"website":"clickhouse.com","company":{"name":"ClickHouse","catchPhrase":"The real-time data warehouse for analytics","labels":{"type":"database systems","founded":"2021","employees":250}},"dob":"2007-03-31","tags":{"hobby":"Databases","holidays":[{"year":2024,"location":"Azores, Portugal"}],"car":{"model":"Tesla","year":2023}}} - -1 row in set. Elapsed: 0.450 sec. - -INSERT INTO people FORMAT JSONAsObject -{"id":2,"name":"Analytica Rowe","username":"Analytica","address":[{"street":"Maple Avenue","suite":"Apt. 402","city":"Dataford","zipcode":"11223-4567","geo":{"lat":40.7128,"lng":-74.006}}],"phone_numbers":["123-456-7890","555-867-5309"],"website":"fastdata.io","company":{"name":"FastData Inc.","catchPhrase":"Streamlined analytics at scale","labels":{"type":["real-time processing"],"founded":2019,"dissolved":2023,"employees":10}},"dob":"1992-07-15","tags":{"hobby":"Running simulations","holidays":[{"year":2023,"location":"Kyoto, Japan"}],"car":{"model":"Audi e-tron","year":2022}}} - -1 row in set. Elapsed: 0.440 sec. -``` - -私たちのカラムにデータが除外されていることに注目してください: - -```sql - -SELECT * -FROM people -FORMAT PrettyJSONEachRow - -{ - "json": { - "dob" : "1992-07-15", - "id" : "2", - "name" : "Analytica Rowe", - "phone_numbers" : [ - "123-456-7890", - "555-867-5309" - ], - "tags" : { - "car" : { - "model" : "Audi e-tron", - "year" : "2022" - }, - "hobby" : "Running simulations", - "holidays" : [ - { - "location" : "Kyoto, Japan", - "year" : "2023" - } - ] - }, - "username" : "Analytica", - "website" : "fastdata.io" - } -} -{ - "json": { - "dob" : "2007-03-31", - "email" : "clicky@clickhouse.com", - "id" : "1", - "name" : "Clicky McCliickHouse", - "phone_numbers" : [ - "010-692-6593", - "020-192-3333" - ], - "tags" : { - "car" : { - "model" : "Tesla", - "year" : "2023" - }, - "hobby" : "Databases", - "holidays" : [ - { - "location" : "Azores, Portugal", - "year" : "2024" - } - ] - }, - "username" : "Clicky", - "website" : "clickhouse.com" - } -} - -2 rows in set. Elapsed: 0.004 sec. -``` -#### 型ヒントでパフォーマンスを最適化する {#optimizing-performance-with-type-hints} - -型ヒントは、不必要な型推論を回避する方法以上のものを提供します - ストレージと処理の間接を完全に排除し、[最適なプリミティブ型](/data-modeling/schema-design#optimizing-types)を指定できるようにします。型ヒントを持つ JSON パスは、常に従来のカラムのように保存され、[**識別子カラム**](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse#storage-extension-for-dynamically-changing-data)やクエリ時の動的解決の必要性を回避します。 - -これにより、定義された型ヒントを使用すれば、ネストされた JSON キーは、最初から最上位カラムとしてモデル化されている場合と同じパフォーマンスと効率を実現します。 - -その結果、ほとんど一貫しているが、JSON の柔軟性から利益を得るデータセットに対して、型ヒントはスキーマやインジェストパイプラインを再構築する必要なくパフォーマンスを維持する便利な方法を提供します。 -### ダイナミックパスの設定 {#configuring-dynamic-paths} - -ClickHouse は、各 JSON パスを真の列指向レイアウトでサブカラムとして保存し、従来のカラムと同様のパフォーマンス上の利点(圧縮、SIMD 加速処理、最小限のディスク I/O など)を可能にします。JSON データ内の各ユニークなパスと型の組み合わせは、ディスク上でそれ自身のカラムファイルになります。 - -Column per JSON path - -たとえば、異なる型で 2 つの JSON パスが挿入されると、ClickHouse はそれぞれの[具体的な型の値を異なるサブカラムに保存します](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse#storage-extension-for-dynamically-changing-data)。これらのサブカラムには独立してアクセスでき、不必要な I/O を最小限に抑えます。複数の型を持つカラムをクエリする際、値は依然として単一の列指向応答として返されます。 - -さらに、オフセットを活用することで、ClickHouse はこれらのサブカラムが密度を保つようにし、存在しない JSON パスのためにデフォルト値を保存しません。このアプローチは圧縮を最大化し、さらに I/O を削減します。 - -JSON offsets - -しかし、高いカーディナリティまたは高い変動のある JSON 構造(テレメトリパイプライン、ログ、または機械学習の特徴ストアなど)におけるシナリオでは、この動作はカラムファイルの爆発を引き起こす可能性があります。各新しいユニークな JSON パスは新しいカラムファイルをもたらし、各型バリアントはそのパスの下で追加のカラムファイルをもたらします。これはリードパフォーマンスには最適ですが、運用上の課題(ファイルディスクリプタの枯渇、メモリ使用量の増加、小さなファイルの数が多いためマージが遅くなる)を導入します。 - -これを軽減するために、ClickHouse はオーバーフローサブカラムの概念を導入します。異なる JSON パスの数が閾値を超えた場合、追加のパスはコンパクトにエンコードされた形式で単一の共有ファイルに保存されます。このファイルは依然としてクエリ可能ですが、専用のサブカラムと同じ性能特性の利益を享受することはありません。 - -Shared JSON column - -この閾値は、JSON 型宣言における[`max_dynamic_paths`](/sql-reference/data-types/newjson#reaching-the-limit-of-dynamic-paths-inside-json) パラメータで制御されます。 - -```sql -CREATE TABLE logs -( - payload JSON(max_dynamic_paths = 500) -) -ENGINE = MergeTree -ORDER BY tuple(); -``` - -**このパラメータを高すぎる設定は避けてください** - 大きな値はリソース消費を増加させ、効率を低下させます。一般的な指針として、10,000 を下回るように保ってください。高い動的構造を持つワークロードには、型ヒントと `SKIP` パラメータを使用して、保存されるものを制限してください。 - -この新しいカラム型の実装に興味があるユーザーには、私たちの詳細なブログ記事 ["ClickHouse のための新しい強力な JSON データ型"](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse) の読解をお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md.hash deleted file mode 100644 index 13364d46d53..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/json/schema.md.hash +++ /dev/null @@ -1 +0,0 @@ -6bf43ff793ca6a0a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md deleted file mode 100644 index 3893256d23f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -sidebar_label: 'Parquet' -sidebar_position: 3 -slug: '/integrations/data-formats/parquet' -title: 'ClickHouse における Parquet の操作' -description: 'ClickHouse で Parquet を操作する方法について説明したページ' ---- - - - - -# ClickHouseでのParquetの操作 - -Parquetは、列指向の方法でデータを保存するための効率的なファイル形式です。 -ClickHouseは、Parquetファイルの読み書きをサポートしています。 - -:::tip -クエリでファイルパスを参照する際、ClickHouseが読み込もうとする場所は、使用しているClickHouseのバリアントによって異なります。 - -[`clickhouse-local`](/operations/utilities/clickhouse-local.md)を使用している場合は、ClickHouse Localを起動した場所に対して相対的な位置から読み込むことになります。 -ClickHouse Serverまたは`clickhouse client`を介してClickHouse Cloudを使用している場合は、サーバー上の`/var/lib/clickhouse/user_files/`ディレクトリに対して相対的な位置から読み込みます。 -::: - -## Parquetからのインポート {#importing-from-parquet} - -データをロードする前に、[file()](/sql-reference/functions/files.md/#file)関数を使用して、[例のparquetファイル](assets/data.parquet)の構造を調べることができます: - -```sql -DESCRIBE TABLE file('data.parquet', Parquet); -``` - -2番目の引数として[Parquet](/interfaces/formats.md/#data-format-parquet)を使用しているため、ClickHouseはファイル形式を認識します。これにより、カラムの型が表示されます: - -```response -┌─name─┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ path │ Nullable(String) │ │ │ │ │ │ -│ date │ Nullable(String) │ │ │ │ │ │ -│ hits │ Nullable(Int64) │ │ │ │ │ │ -└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -実際にデータをインポートする前に、SQLの力を利用してファイルを調べることもできます: - -```sql -SELECT * -FROM file('data.parquet', Parquet) -LIMIT 3; -``` -```response -┌─path──────────────────────┬─date───────┬─hits─┐ -│ Akiba_Hebrew_Academy │ 2017-08-01 │ 241 │ -│ Aegithina_tiphia │ 2018-02-01 │ 34 │ -│ 1971-72_Utah_Stars_season │ 2016-10-01 │ 1 │ -└───────────────────────────┴────────────┴──────┘ -``` - -:::tip -`file()`および`INFILE`/`OUTFILE`に対して明示的な形式設定をスキップすることができます。 -その場合、ClickHouseはファイル拡張子に基づいて形式を自動的に検出します。 -::: - -## 既存のテーブルへのインポート {#importing-to-an-existing-table} - -Parquetデータをインポートするために、テーブルを作成しましょう: - -```sql -CREATE TABLE sometable -( - `path` String, - `date` Date, - `hits` UInt32 -) -ENGINE = MergeTree -ORDER BY (date, path); -``` - -次に、`FROM INFILE`句を使用してデータをインポートできます: - -```sql -INSERT INTO sometable -FROM INFILE 'data.parquet' FORMAT Parquet; - -SELECT * -FROM sometable -LIMIT 5; -``` -```response -┌─path──────────────────────────┬───────date─┬─hits─┐ -│ 1988_in_philosophy │ 2015-05-01 │ 70 │ -│ 2004_Green_Bay_Packers_season │ 2015-05-01 │ 970 │ -│ 24_hours_of_lemans │ 2015-05-01 │ 37 │ -│ 25604_Karlin │ 2015-05-01 │ 20 │ -│ ASCII_ART │ 2015-05-01 │ 9 │ -└───────────────────────────────┴────────────┴──────┘ -``` - -ClickHouseがParquetの文字列(`date`カラム内)を`Date`型に自動的に変換したことに注意してください。これは、ClickHouseがターゲットテーブルの型に基づいて自動的に型キャストを行うからです。 - -## ローカルファイルをリモートサーバーに挿入する {#inserting-a-local-file-to-remote-server} - -ローカルのParquetファイルをリモートClickHouseサーバーに挿入したい場合、以下のようにファイルの内容を`clickhouse-client`にパイプして行うことができます: - -```sql -clickhouse client -q "INSERT INTO sometable FORMAT Parquet" < data.parquet -``` - -## Parquetファイルから新しいテーブルを作成する {#creating-new-tables-from-parquet-files} - -ClickHouseはparquetファイルのスキーマを読み取るため、テーブルを即座に作成することができます: - -```sql -CREATE TABLE imported_from_parquet -ENGINE = MergeTree -ORDER BY tuple() AS -SELECT * -FROM file('data.parquet', Parquet) -``` - -これにより、指定されたparquetファイルからテーブルが自動的に作成され、データが挿入されます: - -```sql -DESCRIBE TABLE imported_from_parquet; -``` -```response -┌─name─┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ path │ Nullable(String) │ │ │ │ │ │ -│ date │ Nullable(String) │ │ │ │ │ │ -│ hits │ Nullable(Int64) │ │ │ │ │ │ -└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -デフォルトでは、ClickHouseはカラム名、型、値に対して厳格です。しかし、時にはインポート中に存在しないカラムやサポートされていない値をスキップすることができます。これについては[Parquet設定](/interfaces/formats/Parquet#format-settings)で管理できます。 - -## Parquet形式にエクスポートする {#exporting-to-parquet-format} - -:::tip -ClickHouse Cloudを使用して`INTO OUTFILE`を使用する場合、ファイルが書き込まれるマシン上で`clickhouse client`でコマンドを実行する必要があります。 -::: - -任意のテーブルまたはクエリ結果をParquetファイルにエクスポートするために、`INTO OUTFILE`句を使用できます: - -```sql -SELECT * -FROM sometable -INTO OUTFILE 'export.parquet' -FORMAT Parquet -``` - -これにより、作業ディレクトリに`export.parquet`ファイルが作成されます。 - -## ClickHouseとParquetデータ型 {#clickhouse-and-parquet-data-types} -ClickHouseとParquetのデータ型はほぼ同一ですが、[わずかに異なります](/interfaces/formats/Parquet#data-types-matching-parquet)。例えば、ClickHouseは`DateTime`型をParquetの`int64`としてエクスポートします。その後、それをClickHouseにインポートすると、数値が表示されます([time.parquetファイル](assets/time.parquet)): - -```sql -SELECT * FROM file('time.parquet', Parquet); -``` -```response -┌─n─┬───────time─┐ -│ 0 │ 1673622611 │ -│ 1 │ 1673622610 │ -│ 2 │ 1673622609 │ -│ 3 │ 1673622608 │ -│ 4 │ 1673622607 │ -└───┴────────────┘ -``` - -この場合、[型変換](/sql-reference/functions/type-conversion-functions.md)を使用できます: - -```sql -SELECT - n, - toDateTime(time) <--- int to time -FROM file('time.parquet', Parquet); -``` -```response -┌─n─┬────toDateTime(time)─┐ -│ 0 │ 2023-01-13 15:10:11 │ -│ 1 │ 2023-01-13 15:10:10 │ -│ 2 │ 2023-01-13 15:10:09 │ -│ 3 │ 2023-01-13 15:10:08 │ -│ 4 │ 2023-01-13 15:10:07 │ -└───┴─────────────────────┘ -``` - - -## その他の参考情報 {#further-reading} - -ClickHouseは、さまざまなシナリオやプラットフォームをカバーするために、多くの形式(テキストとバイナリの両方)をサポートしています。次の記事で、さらに多くの形式やそれらとの操作方法を探ってください: - -- [CSVおよびTSV形式](csv-tsv.md) -- [Avro、ArrowおよびORC](arrow-avro-orc.md) -- [JSON形式](/integrations/data-ingestion/data-formats/json/intro.md) -- [正規表現とテンプレート](templates-regex.md) -- [ネイティブおよびバイナリ形式](binary.md) -- [SQL形式](sql.md) - -また、[clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)もチェックしてください。これは、Clickhouseサーバーを必要とせずにローカル/リモートファイルで作業できる、ポータブルなフル機能ツールです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md.hash deleted file mode 100644 index d885e816e6e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/parquet.md.hash +++ /dev/null @@ -1 +0,0 @@ -bfc300c6d7308d21 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md deleted file mode 100644 index a3c2a292ce9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -sidebar_label: 'SQL ダンプ' -slug: '/integrations/data-formats/sql' -title: 'ClickHouse で SQL データの挿入とダンプ' -description: '他のデータベースと ClickHouse 間でデータを SQL ダンプを使用して転送する方法について説明するページ。' ---- - - - - -# ClickHouseへのSQLデータの挿入とダンプ - -ClickHouseは、様々な方法でOLTPデータベースインフラに簡単に統合できます。1つの方法は、他のデータベースとClickHouseの間でSQLダンプを使用してデータを転送することです。 - -## SQLダンプの作成 {#creating-sql-dumps} - -データは、[SQLInsert](/interfaces/formats.md/#sqlinsert)を使用してSQL形式でダンプできます。ClickHouseは、`INSERT INTO VALUES(...` 形式でデータを書き込み、[`output_format_sql_insert_table_name`](/operations/settings/settings-formats.md/#output_format_sql_insert_table_name)設定オプションをテーブル名として使用します: - -```sql -SET output_format_sql_insert_table_name = 'some_table'; -SELECT * FROM some_data -INTO OUTFILE 'dump.sql' -FORMAT SQLInsert -``` - -カラム名は、[`output_format_sql_insert_include_column_names`](/operations/settings/settings-formats.md/#output_format_sql_insert_include_column_names)オプションを無効にすることで省略できます: - -```sql -SET output_format_sql_insert_include_column_names = 0 -``` - -これで、[dump.sql](assets/dump.sql)ファイルを別のOLTPデータベースに供給できます: - -```bash -mysql some_db < dump.sql -``` - -`some_db` MySQLデータベースに`some_table`テーブルが存在することを前提としています。 - -一部のDBMSは、一度のバッチで処理できる値の数に制限がある場合があります。デフォルトでは、ClickHouseは65k値のバッチを作成しますが、これは[`output_format_sql_insert_max_batch_size`](/operations/settings/settings-formats.md/#output_format_sql_insert_max_batch_size)オプションで変更可能です: - -```sql -SET output_format_sql_insert_max_batch_size = 1000; -``` - -### 値のセットをエクスポートする {#exporting-a-set-of-values} - -ClickHouseには、[Values](/interfaces/formats.md/#data-format-values)形式があり、これはSQLInsertに似ていますが、`INSERT INTO table VALUES`部分を省略し、値のセットのみを返します: - -```sql -SELECT * FROM some_data LIMIT 3 FORMAT Values -``` -```response -('Bangor_City_Forest','2015-07-01',34),('Alireza_Afzal','2017-02-01',24),('Akhaura-Laksam-Chittagong_Line','2015-09-01',30) -``` - -## SQLダンプからデータを挿入する {#inserting-data-from-sql-dumps} - -SQLダンプを読み込むために、[MySQLDump](/interfaces/formats.md/#mysqldump)が使用されます: - -```sql -SELECT * -FROM file('dump.sql', MySQLDump) -LIMIT 5 -``` -```response -┌─path───────────────────────────┬──────month─┬─hits─┐ -│ Bangor_City_Forest │ 2015-07-01 │ 34 │ -│ Alireza_Afzal │ 2017-02-01 │ 24 │ -│ Akhaura-Laksam-Chittagong_Line │ 2015-09-01 │ 30 │ -│ 1973_National_500 │ 2017-10-01 │ 80 │ -│ Attachment │ 2017-09-01 │ 1356 │ -└────────────────────────────────┴────────────┴──────┘ -``` - -デフォルトでは、ClickHouseは不明なカラムをスキップします(これは、[input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields)オプションで制御されます)及びダンプ内の最初に見つかったテーブルについてデータを処理します(複数のテーブルが単一ファイルにダンプされた場合)。DDLステートメントはスキップされます。MySQLダンプからテーブルへのデータのロードは次のように行います([mysql.sql](assets/mysql.sql)ファイル): - -```sql -INSERT INTO some_data -FROM INFILE 'mysql.sql' FORMAT MySQLDump -``` - -MySQLダンプファイルから自動的にテーブルを作成することもできます: - -```sql -CREATE TABLE table_from_mysql -ENGINE = MergeTree -ORDER BY tuple() AS -SELECT * -FROM file('mysql.sql', MySQLDump) -``` - -ここでは、ClickHouseが自動的に推測した構造に基づいて`table_from_mysql`という名前のテーブルを作成しました。ClickHouseはデータに基づいて型を検出するか、DDLが利用可能な場合はそれを使用します: - -```sql -DESCRIBE TABLE table_from_mysql; -``` -```response -┌─name──┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ path │ Nullable(String) │ │ │ │ │ │ -│ month │ Nullable(Date32) │ │ │ │ │ │ -│ hits │ Nullable(UInt32) │ │ │ │ │ │ -└───────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -## 他の形式 {#other-formats} - -ClickHouseは、多様なシナリオやプラットフォームをカバーするために、多くの形式のサポートを導入しています。以下の文書で、さまざまな形式や、それらとの作業方法について詳しく探求してください: - -- [CSVおよびTSV形式](csv-tsv.md) -- [Parquet](parquet.md) -- [JSON形式](/integrations/data-ingestion/data-formats/json/intro.md) -- [正規表現とテンプレート](templates-regex.md) -- [ネイティブおよびバイナリ形式](binary.md) -- **SQL形式** - -また、[clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)を確認してください - ClickHouseサーバーなしでローカル/リモートファイルで作業するためのポータブルなフル機能ツールです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md.hash deleted file mode 100644 index e2b28e88258..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/sql.md.hash +++ /dev/null @@ -1 +0,0 @@ -5bb96b0e6121247f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/templates-regex.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/templates-regex.md deleted file mode 100644 index 532a370abbd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/data-formats/templates-regex.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -sidebar_label: 'Regexp とテンプレート' -sidebar_position: 3 -slug: '/integrations/data-formats/templates-regexp' -title: 'テンプレートと正規表現を使用して ClickHouse でカスタムテキストデータをインポートおよびエクスポートする方法' -description: 'テンプレートと正規表現を使用して ClickHouse でカスタムテキストデータをインポートおよびエクスポートする方法を説明したページ' ---- - - - - - -# テンプレートと正規表現を使用したClickHouseにおけるカスタムテキストデータのインポートおよびエクスポート - -私たちはしばしばカスタムテキスト形式のデータを扱う必要があります。それは非標準形式、無効なJSON、または破損したCSVである可能性があります。CSVやJSONのような標準パーサーは、すべてのケースで機能しない場合があります。しかし、ClickHouseには強力なテンプレートと正規表現フォーマットが用意されています。 - -## テンプレートに基づいたインポート {#importing-based-on-a-template} -次の [ログファイル](assets/error.log) からデータをインポートしたいと仮定します: - -```bash -head error.log -``` -```response -2023/01/15 14:51:17 [error] client: 7.2.8.1, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/16 06:02:09 [error] client: 8.4.2.7, server: example.com "GET /apple-touch-icon-120x120.png HTTP/1.1" -2023/01/15 13:46:13 [error] client: 6.9.3.7, server: example.com "GET /apple-touch-icon.png HTTP/1.1" -2023/01/16 05:34:55 [error] client: 9.9.7.6, server: example.com "GET /h5/static/cert/icon_yanzhengma.png HTTP/1.1" -``` - -このデータをインポートするために、[テンプレート](/interfaces/formats.md/#format-template)フォーマットを使用できます。入力データの各行に対して値のプレースホルダーを持つテンプレート文字列を定義する必要があります: - -```response -
%20FORMAT%20JSONEachRow`。 **注意**: クエリはエンコードする必要があります。 - * `Endpoint Authentication type` - BASIC - * `Auth username` - ClickHouseのユーザー名 - * `Auth password` - ClickHouseのパスワード - -:::note - このHTTP Urlはエラーが発生しやすいです。問題を避けるためにエスケープが正確であることを確認してください。 -::: - - -
- -* 設定 - * `Input Kafka record value format` - ソースデータに応じて異なりますが、ほとんどの場合はJSONまたはAvroです。以下の設定では`JSON`を想定しています。 - * `advanced configurations`セクションで: - * `HTTP Request Method` - POSTに設定 - * `Request Body Format` - json - * `Batch batch size` - ClickHouseの推奨に従って、**少なくとも1000**に設定します。 - * `Batch json as array` - true - * `Retry on HTTP codes` - 400-500ですが、必要に応じて調整してください。たとえば、ClickHouseの前にHTTPプロキシがある場合は変わる可能性があります。 - * `Maximum Reties` - デフォルト(10)は適切ですが、より堅牢な再試行のために調整してください。 - - - -#### 5. 接続をテストする {#5-testing-the-connectivity} -HTTP Sinkによって構成されたトピックにメッセージを作成します - - -
- -作成したメッセージがClickHouseインスタンスに書き込まれていることを確認します。 - -### トラブルシューティング {#troubleshooting} -#### HTTP Sinkがメッセージをバッチ処理しない {#http-sink-doesnt-batch-messages} - -[Sink documentation](https://docs.confluent.io/kafka-connectors/http/current/overview.html#http-sink-connector-for-cp)によると: -> HTTP Sinkコネクタは、Kafkaヘッダー値が異なるメッセージのリクエストをバッチ処理しません。 - -1. Kafkaレコードが同じキーを持っているか確認します。 -2. HTTP API URLにパラメータを追加すると、各レコードがユニークなURLを生成する可能性があります。このため、追加のURLパラメータを使用する場合はバッチ処理が無効になります。 - -#### 400 Bad Request {#400-bad-request} -##### CANNOT_PARSE_QUOTED_STRING {#cannot_parse_quoted_string} -HTTP Sinkが`String`カラムにJSONオブジェクトを挿入する際に次のメッセージで失敗した場合: - -```response -Code: 26. DB::ParsingException: Cannot parse JSON string: expected opening quote: (while reading the value of key key_name): While executing JSONEachRowRowInputFormat: (at row 1). (CANNOT_PARSE_QUOTED_STRING) -``` - -URLに`input_format_json_read_objects_as_strings=1`設定をエンコードされた文字列`SETTINGS%20input_format_json_read_objects_as_strings%3D1`として追加します。 - -### GitHubデータセットをロードする(オプション) {#load-the-github-dataset-optional} - -この例ではGitHubデータセットのArrayフィールドが保持されることに注意してください。例では空のgithubトピックがあると仮定し、[kcat](https://github.com/edenhill/kcat)を使用してKafkaへのメッセージ挿入を行います。 - -##### 1. 設定を準備する {#1-prepare-configuration} - -自分のインストールタイプに関連するConnectの設定については[こちらの手順](https://docs.confluent.io/cloud/current/cp-component/connect-cloud-config.html#set-up-a-local-connect-worker-with-cp-install)に従ってください。スタンドアロンと分散クラスターの違いに注意します。Confluent Cloudを使用する場合、分散設定が関連します。 - -最も重要なパラメータは`http.api.url`です。ClickHouseの[HTTPインターフェース](../../../../interfaces/http.md)は、INSERT文をURLのパラメータとしてエンコードする必要があります。これはフォーマット(この場合は`JSONEachRow`)とターゲットデータベースを含む必要があります。形式はKafkaデータと一致し、HTTPペイロード内で文字列に変換されます。これらのパラメータはURLエスケープする必要があります。GitHubデータセットのこの形式の例(ClickHouseをローカルで実行していると仮定)は以下です: - -```response -://:?query=INSERT%20INTO%20.
%20FORMAT%20JSONEachRow - -http://localhost:8123?query=INSERT%20INTO%20default.github%20FORMAT%20JSONEachRow -``` - -HTTP SinkをClickHouseと使用する際に関連する追加パラメータは次のとおりです。完全なパラメータリストは[こちら](https://docs.confluent.io/kafka-connect-http/current/connector_config.html)で見つけることができます: - -* `request.method` - **POST**に設定 -* `retry.on.status.codes` - 任意のエラーコードで再試行するために400-500に設定。データに期待されるエラーに基づいて調整してください。 -* `request.body.format` - ほとんどの場合、これはJSONになります。 -* `auth.type` - ClickHouseでセキュリティを使用する場合はBASICに設定。その他のClickHouse互換の認証メカニズムは現在サポートされていません。 -* `ssl.enabled` - SSLを使用している場合はtrueに設定。 -* `connection.user` - ClickHouseのユーザー名。 -* `connection.password` - ClickHouseのパスワード。 -* `batch.max.size` - 単一のバッチで送信する行の数。適切に大きな数に設定されていることを確認してください。ClickHouseの[推奨事項](/sql-reference/statements/insert-into#performance-considerations)では、1000の値を最小限として検討する必要があります。 -* `tasks.max` - HTTP Sinkコネクタは1つ以上のタスクを実行することをサポートしています。これを使用してパフォーマンスを向上させることができます。バッチサイズとともに、これが主なパフォーマンス向上手段を表します。 -* `key.converter` - キーの型に応じて設定。 -* `value.converter` - トピックのデータ型に基づいて設定。このデータはスキーマを必要としません。ここでの形式は、`http.api.url`パラメータに指定されたFORMATと一致している必要があります。最もシンプルな方法はJSONとorg.apache.kafka.connect.json.JsonConverterコンバータを使用することです。値を文字列として扱うことも可能で、org.apache.kafka.connect.storage.StringConverterを介して行うことができますが、これには関数を使用して挿入文で値を抽出する必要があります。[Avroフォーマット](../../../../interfaces/formats.md#data-format-avro)もClickHouseでサポートされており、io.confluent.connect.avro.AvroConverterコンバータを使用する場合に利用可能です。 - -完全な設定リストは、プロキシの構成、再試行、高度なSSLの設定方法を含む完全なリストは[こちら](https://docs.confluent.io/kafka-connect-http/current/connector_config.html)で見ることができます。 - -GitHubサンプルデータ用の設定ファイルの例は[こちら](https://github.com/ClickHouse/clickhouse-docs/tree/main/docs/integrations/data-ingestion/kafka/code/connectors/http_sink)にあります。Connectがスタンドアロンモードで実行され、KafkaがConfluent Cloudでホストされていると仮定しています。 - -##### 2. ClickHouseテーブルを作成する {#2-create-the-clickhouse-table} - -テーブルが作成されていることを確認します。標準的なMergeTreeを使用した最小限のgithubデータセットの例を以下に示します。 - -```sql -CREATE TABLE github -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4,'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - labels Array(LowCardinality(String)), - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - assignees Array(LowCardinality(String)), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - requested_reviewers Array(LowCardinality(String)), - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at) -``` - -##### 3. Kafkaにデータを追加する {#3-add-data-to-kafka} - -メッセージをKafkaに挿入します。以下では、[kcat](https://github.com/edenhill/kcat)を使用して10kメッセージを挿入します。 - -```bash -head -n 10000 github_all_columns.ndjson | kcat -b : -X security.protocol=sasl_ssl -X sasl.mechanisms=PLAIN -X sasl.username= -X sasl.password= -t github -``` - -ターゲットテーブル「Github」を単純に読み込むことで、データの挿入を確認できます。 - -```sql -SELECT count() FROM default.github; - -| count\(\) | -| :--- | -| 10000 | -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/kafka-connect-http.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/kafka-connect-http.md.hash deleted file mode 100644 index 7a750b92d63..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/kafka-connect-http.md.hash +++ /dev/null @@ -1 +0,0 @@ -7047150fbf627e2a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md deleted file mode 100644 index a073202b665..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -sidebar_label: 'Integrating Kafka with ClickHouse' -sidebar_position: 1 -slug: '/integrations/kafka' -description: 'Introduction to Kafka with ClickHouse' -title: 'Integrating Kafka with ClickHouse' ---- - - - - -# KafkaとClickHouseの統合 - -[Apache Kafka](https://kafka.apache.org/)は、高性能なデータパイプライン、ストリーミング解析、データ統合、ミッションクリティカルなアプリケーションのために、何千もの企業によって使用されているオープンソースの分散イベントストリーミングプラットフォームです。KafkaとClickHouseを使用する多くのケースでは、ユーザーはKafkaベースのデータをClickHouseに挿入したいと考えるでしょう。以下では、両方のユースケースに対するいくつかのオプションを概説し、それぞれのアプローチの利点と欠点を特定します。 - -## オプションの選択 {#choosing-an-option} - -KafkaとClickHouseを統合する際には、使用される高レベルのアプローチについて早い段階でのアーキテクチャーの決定が必要です。以下に、最も一般的な戦略を概説します。 - -### ClickPipes for Kafka (ClickHouse Cloud) {#clickpipes-for-kafka-clickhouse-cloud} -* [**ClickPipes**](../clickpipes/kafka.md)は、ClickHouse Cloudにデータを取り込む最も簡単で直感的な方法を提供します。現在、Apache Kafka、Confluent Cloud、Amazon MSKをサポートしており、今後さらに多くのデータソースが追加される予定です。 - -### サードパーティのクラウドベースのKafka接続 {#3rd-party-cloud-based-kafka-connectivity} -* [**Confluent Cloud**](./confluent/index.md) - Confluentプラットフォームは、ClickHouse Connector Sinkを[Confluent Cloudにアップロードして実行する](./confluent/custom-connector.md)オプションや、Apache KafkaをHTTPまたはHTTPSを介してAPIと統合する[HTTP Sink Connector for Confluent Platform](./confluent/kafka-connect-http.md)を提供しています。 - -* [**Amazon MSK**](./msk/index.md) - Apache KafkaクラスタからClickHouseなどの外部システムにデータを転送するためのAmazon MSK Connectフレームワークをサポートしています。Amazon MSKにClickHouse Kafka Connectをインストールできます。 - -* [**Redpanda Cloud**](https://cloud.redpanda.com/) - RedpandaはKafka API互換のストリーミングデータプラットフォームで、ClickHouseの上流データソースとして使用できます。ホスティングされたクラウドプラットフォームであるRedpanda Cloudは、Kafkaプロトコルを介してClickHouseと統合されており、ストリーミング解析ワークロードのためのリアルタイムデータ取り込みを可能にします。 - -### セルフマネージドKafka接続 {#self-managed-kafka-connectivity} -* [**Kafka Connect**](./kafka-clickhouse-connect-sink.md) - Kafka Connectは、Apache Kafkaの無料のオープンソースコンポーネントで、Kafkaと他のデータシステム間のシンプルなデータ統合のための集中型データハブとして機能します。コネクタは、Kafkaから他のシステムへのデータストリーミングをスケーラブルかつ信頼性高く行う簡単な手段を提供します。ソースコネクタは他のシステムからKafkaトピックにデータを挿入し、シンクコネクタはKafkaトピックからClickHouseなどの他のデータストアにデータを配信します。 -* [**Vector**](./kafka-vector.md) - Vectorはベンダーに依存しないデータパイプラインです。Kafkaから読み取る能力があり、ClickHouseにイベントを送信することで、堅牢な統合オプションを提供します。 -* [**JDBC Connect Sink**](./kafka-connect-jdbc.md) - Kafka Connect JDBC Sinkコネクタを使用すると、Kafkaトピックから任意のリレーショナルデータベースにデータをエクスポートできます。 -* **カスタムコード** - KafkaとClickHouseのそれぞれのクライアントライブラリを使用したカスタムコードは、イベントのカスタム処理が必要な場合に適切です。これはこのドキュメントの範囲を超えます。 -* [**Kafkaテーブルエンジン**](./kafka-table-engine.md)は、ネイティブのClickHouse統合を提供します(ClickHouse Cloudでは利用できません)。このテーブルエンジンは、ソースシステムからデータを**プル**します。これにはClickHouseがKafkaへの直接アクセスを持っている必要があります。 -* [**命名コレクションを持つKafkaテーブルエンジン**](./kafka-table-engine-named-collections.md) - 命名コレクションを使用すると、KafkaとのネイティブなClickHouse統合が提供されます。このアプローチにより、複数のKafkaクラスターへの安全な接続を可能にし、構成管理を集中化し、スケーラビリティとセキュリティを向上させます。 - -### アプローチの選択 {#choosing-an-approach} -いくつかの決定ポイントに絞られます: - -* **接続性** - Kafkaテーブルエンジンは、ClickHouseが宛先である場合、Kafkaからデータをプルできる必要があります。これには双方向の接続が必要です。ネットワークの分離がある場合、たとえばClickHouseがクラウドにあり、Kafkaがセルフマネージドである場合、コンプライアンスやセキュリティの理由からこれを削除することに抵抗を感じるかもしれません。(このアプローチは現在ClickHouse Cloudではサポートされていません。)Kafkaテーブルエンジンは、ClickHouse内のリソースを利用し、消費者のためにスレッドを利用します。このリソースプレッシャーをClickHouseにかけることは、リソース制約のために難しい場合があり、あなたのアーキテクトが関心の分離を好むかもしれません。この場合、Kafkaデータをプルする責任を持つプロセスをClickHouseとは独立してスケールさせることができる、異なるハードウェアにデプロイ可能なKafka Connectのようなツールが好ましいかもしれません。 - -* **クラウドでのホスティング** - クラウドベンダーは、プラットフォーム上で利用可能なKafkaコンポーネントに制限を設ける場合があります。各クラウドベンダーに推奨されるオプションを探求するためのガイドに従ってください。 - -* **外部エンリッチメント** - メッセージはClickHouseに挿入される前に操作できますが、ユーザーは複雑なエンリッチメントをClickHouseの外部に移動したいと考えるかもしれません。 - -* **データフローの方向** - Vectorは、KafkaからClickHouseへのデータの転送のみをサポートしています。 - -## 前提条件 {#assumptions} - -上記のリンクされたユーザーガイドは、以下の前提に基づいています。 - -* あなたは、プロデューサー、コンシューマー、トピックなどのKafkaの基本を知っています。 -* これらの例のためにトピックが用意されていることを前提としています。すべてのデータがJSON形式でKafkaに保存されていると仮定していますが、Avroを使用する場合でも原則は同じです。 -* 私たちは、Kafkaデータを公開し消費するために、優れた[kcat](https://github.com/edenhill/kcat)(以前のkafkacat)を例として利用します。 -* サンプルデータをロードするためのいくつかのPythonスクリプトを参照していますが、例をあなたのデータセットに合わせて適応させてください。 -* あなたは、ClickHouseのマテリアライズドビューに広く精通しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md.hash deleted file mode 100644 index 0379fa40a50..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -cfafb6af58370ec5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md deleted file mode 100644 index c89179915b5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md +++ /dev/null @@ -1,405 +0,0 @@ ---- -sidebar_label: 'ClickHouse Kafka Connect Sink' -sidebar_position: 2 -slug: '/integrations/kafka/clickhouse-kafka-connect-sink' -description: 'The official Kafka connector from ClickHouse.' -title: 'ClickHouse Kafka Connect Sink' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - - -# ClickHouse Kafka Connect Sink - -:::note -助けが必要な場合は、[リポジトリに問題を登録してください](https://github.com/ClickHouse/clickhouse-kafka-connect/issues) または [ClickHouseのパブリックスラック](https://clickhouse.com/slack)で質問をしてください。 -::: -**ClickHouse Kafka Connect Sink**は、KafkaトピックからClickHouseテーブルにデータを提供するKafkaコネクタです。 - -### License {#license} - -Kafkaコネクタシンクは、[Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0)の下で配布されています。 - -### Requirements for the environment {#requirements-for-the-environment} - -[Kafka Connect](https://docs.confluent.io/platform/current/connect/index.html)フレームワークv2.7以降が環境にインストールされている必要があります。 - -### Version compatibility matrix {#version-compatibility-matrix} - -| ClickHouse Kafka Connect version | ClickHouse version | Kafka Connect | Confluent platform | -|----------------------------------|--------------------|---------------|--------------------| -| 1.0.0 | > 23.3 | > 2.7 | > 6.1 | - -### Main Features {#main-features} - -- ワンバウンドのセマンティクスを持つ状態で出荷されます。これは、コネクタによって状態ストアとして使用される新しいClickHouseコア機能である[KeeperMap](https://github.com/ClickHouse/ClickHouse/pull/39976)によって実現され、最小限のアーキテクチャを可能にします。 -- サードパーティの状態ストアをサポート:現在はメモリ内がデフォルトですが、KeeperMapを使用することも可能です(Redisは近日中に追加予定)。 -- コア統合:ClickHouseによって構築、維持、サポートされています。 -- [ClickHouse Cloud](https://clickhouse.com/cloud)に対して継続的にテストされています。 -- 宣言されたスキーマによるデータ挿入とスキーマレスデータをサポート。 -- ClickHouseのすべてのデータ型をサポートしています。 - -### Installation instructions {#installation-instructions} - -#### Gather your connection details {#gather-your-connection-details} - - - -#### General Installation Instructions {#general-installation-instructions} - -コネクタは、プラグインを実行するために必要なすべてのクラスファイルを含む単一のJARファイルとして配布されます。 - -プラグインをインストールするには、以下の手順を実行します。 - -- [Releases](https://github.com/ClickHouse/clickhouse-kafka-connect/releases)ページからConnector JARファイルを含むZIPアーカイブをダウンロードします。 -- ZIPファイルの内容を抽出し、所望の場所にコピーします。 -- Confluent Platformがプラグインを見つけることを許可するために、Connectプロパティファイルの[plugin.path](https://kafka.apache.org/documentation/#connectconfigs_plugin.path)設定にプラグインディレクトリへのパスを追加します。 -- configにトピック名、ClickHouseインスタンスのホスト名、およびパスワードを提供します。 - -```yml -connector.class=com.clickhouse.kafka.connect.ClickHouseSinkConnector -tasks.max=1 -topics= -ssl=true -jdbcConnectionProperties=?sslmode=STRICT -security.protocol=SSL -hostname= -database= -password= -ssl.truststore.location=/tmp/kafka.client.truststore.jks -port=8443 -value.converter.schemas.enable=false -value.converter=org.apache.kafka.connect.json.JsonConverter -exactlyOnce=true -username=default -schemas.enable=false -``` - -- Confluent Platformを再起動します。 -- Confluent Platformを使用している場合、Confluent Control Center UIにログインして、ClickHouse Sinkが利用可能なコネクタのリストにあることを確認します。 - -### Configuration options {#configuration-options} - -ClickHouse SinkをClickHouseサーバーに接続するには、次の情報を提供する必要があります。 - -- 接続詳細:hostname(**必須**)とport(オプション) -- ユーザー認証情報:password(**必須**)およびusername(オプション) -- コネクタクラス:`com.clickhouse.kafka.connect.ClickHouseSinkConnector`(**必須**) -- topicsまたはtopics.regex:ポーリングするKafkaトピック - トピック名はテーブル名と一致する必要があります(**必須**) -- キーおよび値変換器:トピック上のデータの種類に基づいて設定します。ワーカー設定にまだ定義されていない場合は必須です。 - -全ての設定オプションの完全な表: - -| Property Name | Description | Default Value | -|-------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| -| `hostname` (Required) | サーバーのホスト名またはIPアドレス | N/A | -| `port` | ClickHouseポート - デフォルトは8443(クラウドのHTTPS用)ですが、HTTP(セルフホストのデフォルト)の場合は8123にするべきです | `8443` | -| `ssl` | ClickHouseへのssl接続を有効にします | `true` | -| `jdbcConnectionProperties` | Clickhouseに接続する際の接続プロパティ。`?`で始まり、`param=value`の間を`&`で結合します | `""` | -| `username` | ClickHouseデータベースのユーザー名 | `default` | -| `password` (Required) | ClickHouseデータベースのパスワード | N/A | -| `database` | ClickHouseデータベース名 | `default` | -| `connector.class` (Required) | コネクタークラス(明示的に設定し、デフォルト値を保持) | `"com.clickhouse.kafka.connect.ClickHouseSinkConnector"` | -| `tasks.max` | コネクタタスクの数 | `"1"` | -| `errors.retry.timeout` | ClickHouse JDBCリトライタイムアウト | `"60"` | -| `exactlyOnce` | 一度だけの接続を有効にします | `"false"` | -| `topics` (Required) | ポーリングするKafkaトピック - トピック名はテーブル名と一致する必要があります | `""` | -| `key.converter` (Required* - See Description) | キーのタイプに応じて設定します。キーを渡す場合はここで必須です(ワーカー設定にまだ定義されていない場合)。 | `"org.apache.kafka.connect.storage.StringConverter"` | -| `value.converter` (Required* - See Description) | トピックのデータのタイプに基づいて設定します。サポート:- JSON、String、AvroまたはProtobuf形式。ワーカー設定にまだ定義されていない場合はここで必須です。 | `"org.apache.kafka.connect.json.JsonConverter"` | -| `value.converter.schemas.enable` | コネクタの値変換器のスキーマサポート | `"false"` | -| `errors.tolerance` | コネクタのエラー許容。サポート:none, all | `"none"` | -| `errors.deadletterqueue.topic.name` | 設定されている場合(errors.tolerance=allとともに)、失敗したバッチのためにDLQが使用されます([トラブルシューティング](#troubleshooting)を参照) | `""` | -| `errors.deadletterqueue.context.headers.enable` | DLQの追加ヘッダーを追加します | `""` | -| `clickhouseSettings` | ClickHouseの設定のカンマ区切りリスト(例:"insert_quorum=2, etc...") | `""` | -| `topic2TableMap` | トピック名をテーブル名にマッピングするカンマ区切りリスト(例:"topic1=table1, topic2=table2, etc...") | `""` | -| `tableRefreshInterval` | テーブル定義キャッシュを更新する時間(秒単位) | `0` | -| `keeperOnCluster` | セルフホストインスタンスのON CLUSTERパラメータの設定を許可します(例:`ON CLUSTER clusterNameInConfigFileDefinition`)正確に一度だけの接続状態テーブルのために([Distributed DDL Queries](/sql-reference/distributed-ddl)を参照) | `""` | -| `bypassRowBinary` | スキーマベースのデータ(Avro、Protobufなど)に対するRowBinaryとRowBinaryWithDefaultsの使用を無効にします - データに欠落したカラムがある場合やNullable/デフォルトが受け入れられない場合にのみ使用する必要があります | `"false"` | -| `dateTimeFormats` | DateTime64スキーマフィールドを解析するための日付時刻形式、`;`で区切ります(例:`someDateField=yyyy-MM-dd HH:mm:ss.SSSSSSSSS;someOtherDateField=yyyy-MM-dd HH:mm:ss`)。 | `""` | -| `tolerateStateMismatch` | コネクタがAFTER_PROCESSINGで保存された現在のオフセットよりも"早い"レコードをドロップすることを許可します(例:オフセット5が送信され、オフセット250が最後に記録されたオフセットの場合) | `"false"` | -| `ignorePartitionsWhenBatching` | 挿入のためにメッセージを収集するときにパーティションを無視します(ただし、`exactlyOnce`が`false`の場合のみ)。パフォーマンスノート:コネクタタスクが多いほど、タスクごとに割り当てられるKafkaパーティションは少なくなります - これはリターンが減ることを意味します。 | `"false"` | - -### Target Tables {#target-tables} - -ClickHouse Connect Sinkは、Kafkaトピックからメッセージを読み取り、適切なテーブルに書き込みます。ClickHouse Connect Sinkは、既存のテーブルにデータを書き込みます。データをそのテーブルに挿入し始める前に、適切なスキーマを持つターゲットテーブルがClickHouseに作成されていることを確認してください。 - -各トピックは、ClickHouse内に専用のターゲットテーブルを必要とします。ターゲットテーブル名は、ソーストピック名と一致する必要があります。 - -### Pre-processing {#pre-processing} - -ClickHouse Kafka Connect Sinkに送信される前にアウトバウンドメッセージを変換する必要がある場合は、[Kafka Connect Transformations](https://docs.confluent.io/platform/current/connect/transforms/overview.html)を使用してください。 - -### Supported Data types {#supported-data-types} - -**スキーマが宣言されている場合:** - -| Kafka Connect Type | ClickHouse Type | Supported | Primitive | -| --------------------------------------- |-----------------------| --------- | --------- | -| STRING | String | ✅ | Yes | -| INT8 | Int8 | ✅ | Yes | -| INT16 | Int16 | ✅ | Yes | -| INT32 | Int32 | ✅ | Yes | -| INT64 | Int64 | ✅ | Yes | -| FLOAT32 | Float32 | ✅ | Yes | -| FLOAT64 | Float64 | ✅ | Yes | -| BOOLEAN | Boolean | ✅ | Yes | -| ARRAY | Array(T) | ✅ | No | -| MAP | Map(Primitive, T) | ✅ | No | -| STRUCT | Variant(T1, T2, ...) | ✅ | No | -| STRUCT | Tuple(a T1, b T2, ...) | ✅ | No | -| STRUCT | Nested(a T1, b T2, ...) | ✅ | No | -| BYTES | String | ✅ | No | -| org.apache.kafka.connect.data.Time | Int64 / DateTime64 | ✅ | No | -| org.apache.kafka.connect.data.Timestamp | Int32 / Date32 | ✅ | No | -| org.apache.kafka.connect.data.Decimal | Decimal | ✅ | No | - -**スキーマが宣言されていない場合:** - -レコードはJSONに変換され、[JSONEachRow](../../../sql-reference/formats.mdx#jsoneachrow)形式でClickHouseに送信されます。 - -### Configuration Recipes {#configuration-recipes} - -迅速に始めるための一般的な設定レシピをいくつか紹介します。 - -#### Basic Configuration {#basic-configuration} - -始めるための最も基本的な設定 - Kafka Connectが分散モードで実行されており、`localhost:8443`でSSLが有効になっているClickHouseサーバーが実行されていることを前提とし、データはスキーマレスのJSONです。 - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - "tasks.max": "1", - "consumer.override.max.poll.records": "5000", - "consumer.override.max.partition.fetch.bytes": "5242880", - "database": "default", - "errors.retry.timeout": "60", - "exactlyOnce": "false", - "hostname": "localhost", - "port": "8443", - "ssl": "true", - "jdbcConnectionProperties": "?ssl=true&sslmode=strict", - "username": "default", - "password": "", - "topics": "", - "value.converter": "org.apache.kafka.connect.json.JsonConverter", - "value.converter.schemas.enable": "false", - "clickhouseSettings": "" - } -} -``` - -#### Basic Configuration with Multiple Topics {#basic-configuration-with-multiple-topics} - -コネクタは複数のトピックからデータを消費できます。 - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "topics": "SAMPLE_TOPIC, ANOTHER_TOPIC, YET_ANOTHER_TOPIC", - ... - } -} -``` - -#### Basic Configuration with DLQ {#basic-configuration-with-dlq} - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "errors.tolerance": "all", - "errors.deadletterqueue.topic.name": "", - "errors.deadletterqueue.context.headers.enable": "true" - } -} -``` - -#### Using with different data formats {#using-with-different-data-formats} - -##### Avro Schema Support {#avro-schema-support} - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "value.converter": "io.confluent.connect.avro.AvroConverter", - "value.converter.schema.registry.url": ":", - "value.converter.schemas.enable": "true" - } -} -``` - -##### Protobuf Schema Support {#protobuf-schema-support} - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "value.converter": "io.confluent.connect.protobuf.ProtobufConverter", - "value.converter.schema.registry.url": ":", - "value.converter.schemas.enable": "true" - } -} -``` - -注意:クラスが不足している問題が発生した場合、すべての環境がprotobuf変換器を含むわけではなく、依存関係がバンドルされた別のリリースのjarが必要になることがあります。 - -##### JSON Schema Support {#json-schema-support} - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "value.converter": "org.apache.kafka.connect.json.JsonConverter" - } -} -``` - -##### String Support {#string-support} - -コネクタは、異なるClickHouse形式のString Converterをサポートします:[JSON](/interfaces/formats#jsoneachrow)、[CSV](/interfaces/formats#csv)、および[TSV](/interfaces/formats#tabseparated)。 - -```json -{ - "name": "clickhouse-connect", - "config": { - "connector.class": "com.clickhouse.kafka.connect.ClickHouseSinkConnector", - ... - "value.converter": "org.apache.kafka.connect.storage.StringConverter", - "customInsertFormat": "true", - "insertFormat": "CSV" - } -} -``` - -### Logging {#logging} - -ログ記録はKafka Connect Platformによって自動的に提供されます。ログの宛先と形式は、Kafka connectの[設定ファイル](https://docs.confluent.io/platform/current/connect/logging.html#log4j-properties-file)を介して設定できます。 - -Confluent Platformを使用している場合は、CLIコマンドを実行することでログを確認できます。 - -```bash -confluent local services connect log -``` - -追加の詳細は公式の[チュートリアル](https://docs.confluent.io/platform/current/connect/logging.html)をチェックしてください。 - -### Monitoring {#monitoring} - -ClickHouse Kafka Connectは、[Java Management Extensions (JMX)](https://www.oracle.com/technical-resources/articles/javase/jmx.html)を介してランタイムメトリックを報告します。JMXはデフォルトでKafka Connectorで有効になっています。 - -ClickHouse Connect `MBeanName`: - -```java -com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} -``` - -ClickHouse Kafka Connectは次のメトリックを報告します: - -| Name | Type | Description | -|----------------------|------|-----------------------------------------------------------------------------------------| -| `receivedRecords` | long | 受け取ったレコードの総数。 | -| `recordProcessingTime` | long | レコードを統一構造にグループ化して変換するのにかかる合計時間(ナノ秒単位)。 | -| `taskProcessingTime` | long | ClickHouseにデータを処理して挿入するのにかかる合計時間(ナノ秒単位)。 | - -### Limitations {#limitations} - -- 削除はサポートされていません。 -- バッチサイズはKafka Consumerプロパティから引き継がれます。 -- KeeperMapを使って一度だけ接続している場合、オフセットが変更または巻き戻されると、その特定のトピックのKeeperMapから内容を削除する必要があります。(詳細は以下のトラブルシューティングガイドを参照) - -### Tuning Performance {#tuning-performance} - -「シンクコネクタのバッチサイズを調整したい」と思ったことがあれば、ここがあなたのセクションです。 - -##### Connect Fetch vs Connector Poll {#connect-fetch-vs-connector-poll} - -Kafka Connect(私たちのシンクコネクタが構築されているフレームワーク)は、バックグラウンドでKafkaトピックからメッセージを取得します(コネクタとは独立しています)。 - -このプロセスは、`fetch.min.bytes`と`fetch.max.bytes`を使用して制御できます。`fetch.min.bytes`は、フレームワークがコネクタに値を渡す前に必要な最小量を設定し(`fetch.max.wait.ms`で設定された時間制限まで)、`fetch.max.bytes`は上限サイズを設定します。コネクタにより大きなバッチを渡したい場合は、最小フェッチまたは最大待機を増やすというオプションがあります。 - -この取得したデータは、その後メッセージをポーリングするコネクタクライアントによって消費されます。この際、各ポーリングに対する量は`max.poll.records`によって制御されます。フェッチはポーリングとは独立していることに注意してください! - -これらの設定を調整する際、ユーザーはフェッチサイズが`max.poll.records`の複数のバッチを生成することを目指すべきです(設定`fetch.min.bytes`と`fetch.max.bytes`は圧縮データを表していることに注意してください) - そうすることで、各コネクタタスクができるだけ大きなバッチを挿入します。 - -ClickHouseは、頻繁だが小さなバッチよりも、わずかな遅延でも大きなバッチに最適化されています - バッチが大きいほど、パフォーマンスが良くなります。 - -```properties -consumer.max.poll.records=5000 -consumer.max.partition.fetch.bytes=5242880 -``` - -詳細については、[Confluentのドキュメント](https://docs.confluent.io/platform/current/connect/references/allconfigs.html#override-the-worker-configuration)や[Kafkaのドキュメント](https://kafka.apache.org/documentation/#consumerconfigs)をご覧ください。 - -#### Multiple high throughput topics {#multiple-high-throughput-topics} - -コネクタが複数のトピックを購読するように設定されていて、`topic2TableMap`を使用してトピックをテーブルにマッピングし、挿入時にボトルネックが発生して消費者の遅延が生じている場合、代わりにトピックごとに一つのコネクタを作成することを検討してください。この理由は、現在バッチがすべてのテーブルに対して[直列的に](https://github.com/ClickHouse/clickhouse-kafka-connect/blob/578ac07e8be1a920aaa3b26e49183595c3edd04b/src/main/java/com/clickhouse/kafka/connect/sink/ProxySinkTask.java#L95-L100)挿入されるからです。 - -トピックごとに一つのコネクタを作成することは、可能な限り速い挿入率を確保するための暫定策です。 - -### Troubleshooting {#troubleshooting} - -#### "State mismatch for topic `[someTopic]` partition `[0]`" {#state-mismatch-for-topic-sometopic-partition-0} - -これは、KeeperMapに保存されたオフセットがKafkaに保存されたオフセットと異なる場合に発生します。通常、トピックが削除されたか、オフセットが手動で調整されたときに発生します。 -これを修正するには、その特定のトピック+パーティションのために保存されている古い値を削除する必要があります。 - -**注意:この調整は一度だけの接続に影響を与える可能性があります。** - -#### "What errors will the connector retry?" {#what-errors-will-the-connector-retry} - -現在のところ、焦点は一時的でリトライ可能なエラーの特定にあります。これには次のものが含まれます: - -- `ClickHouseException` - ClickHouseによってスローされる可能性がある一般的な例外です。 - サーバーが過負荷であるときによくスローされ、以下のエラーコードが特に一時的拡張されます: - - 3 - UNEXPECTED_END_OF_FILE - - 159 - TIMEOUT_EXCEEDED - - 164 - READONLY - - 202 - TOO_MANY_SIMULTANEOUS_QUERIES - - 203 - NO_FREE_CONNECTION - - 209 - SOCKET_TIMEOUT - - 210 - NETWORK_ERROR - - 242 - TABLE_IS_READ_ONLY - - 252 - TOO_MANY_PARTS - - 285 - TOO_FEW_LIVE_REPLICAS - - 319 - UNKNOWN_STATUS_OF_INSERT - - 425 - SYSTEM_ERROR - - 999 - KEEPER_EXCEPTION - - 1002 - UNKNOWN_EXCEPTION -- `SocketTimeoutException` - ソケットがタイムアウトしたときにスローされます。 -- `UnknownHostException` - ホストが解決できないときにスローされます。 -- `IOException` - ネットワークに問題がある場合にスローされます。 - -#### "All my data is blank/zeroes" {#all-my-data-is-blankzeroes} -おそらく、データ内のフィールドがテーブル内のフィールドと一致していません - これは特にCDC(およびDebezium形式)で一般的です。 -一般的な解決策の一つは、コネクタ設定にフラット変換を追加することです: - -```properties -transforms=flatten -transforms.flatten.type=org.apache.kafka.connect.transforms.Flatten$Value -transforms.flatten.delimiter=_ -``` - -これにより、データがネストされたJSONからフラットなJSONに変換されます(`_`を区切り文字として使用)。テーブル内のフィールドは「field1_field2_field3」形式に従うことになります(例:「before_id」、「after_id」など)。 - -#### "I want to use my Kafka keys in ClickHouse" {#i-want-to-use-my-kafka-keys-in-clickhouse} -Kafkaのキーはデフォルトでは値フィールドに保存されませんが、`KeyToValue`変換を使用してキーを値フィールドに移動できます(新しい`_key`フィールド名の下に): - -```properties -transforms=keyToValue -transforms.keyToValue.type=com.clickhouse.kafka.connect.transforms.KeyToValue -transforms.keyToValue.field=_key -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md.hash deleted file mode 100644 index 78edf8e9689..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md.hash +++ /dev/null @@ -1 +0,0 @@ -e01b6db07e67d33a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md deleted file mode 100644 index 48f41b7022b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -sidebar_label: 'Kafka Connect JDBCコネクタ' -sidebar_position: 4 -slug: '/integrations/kafka/kafka-connect-jdbc' -description: 'Kafka ConnectおよびClickHouseと組み合わせてJDBCコネクタシンクを使用する' -title: 'JDBC Connector' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - - -# JDBC Connector - -:::note -このコネクタは、データが単純で、intなどの基本データ型から成る場合のみ使用するべきです。マップなどのClickHouse独自の型はサポートされていません。 -::: - -私たちの例では、Kafka ConnectのConfluentディストリビューションを利用しています。 - -以下に、単一のKafkaトピックからメッセージを取得し、ClickHouseテーブルに行を挿入する簡単なインストール手順を説明します。Kafka環境を持っていない場合は、寛大な無料プランを提供するConfluent Cloudをお勧めします。 - -JDBCコネクタにはスキーマが必要であることに注意してください(JDBCコネクタでは通常のJSONやCSVを使用できません)。スキーマは各メッセージにエンコードできますが、関連するオーバーヘッドを避けるために[Confluentスキーマレジストリを使用することを強く推奨します](https://www.confluent.io/blog/kafka-connect-deep-dive-converters-serialization-explained/#json-schemas)。提供される挿入スクリプトは、メッセージから自動的にスキーマを推測し、これをレジストリに挿入します。このスクリプトは他のデータセットでも再利用できます。Kafkaのキーは文字列であると仮定されます。Kafkaスキーマに関する詳細は[こちら](https://docs.confluent.io/platform/current/schema-registry/index.html)で確認できます。 - -### License {#license} -JDBCコネクタは[Confluent Community License](https://www.confluent.io/confluent-community-license)の下で配布されています。 - -### Steps {#steps} -#### Gather your connection details {#gather-your-connection-details} - - -#### 1. Install Kafka Connect and Connector {#1-install-kafka-connect-and-connector} - -Confluentパッケージをダウンロードしてローカルにインストールしたことを前提とします。コネクタをインストールするための手順は[こちら](https://docs.confluent.io/kafka-connect-jdbc/current/#install-the-jdbc-connector)に記載されています。 - -confluent-hubインストール方法を使用する場合、ローカルの設定ファイルが更新されます。 - -KafkaからClickHouseにデータを送信するために、コネクタのSinkコンポーネントを使用します。 - -#### 2. Download and install the JDBC Driver {#2-download-and-install-the-jdbc-driver} - -ClickHouse JDBCドライバ`clickhouse-jdbc--shaded.jar`を[こちら](https://github.com/ClickHouse/clickhouse-java/releases)からダウンロードしてインストールします。Kafka Connectにインストールする詳細は[こちら](https://docs.confluent.io/kafka-connect-jdbc/current/#installing-jdbc-drivers)を参照してください。他のドライバも動作する可能性がありますが、テストは行われていません。 - -:::note - -一般的な問題: ドキュメントではjarを`share/java/kafka-connect-jdbc/`にコピーすることを推奨しています。Connectがドライバを見つけられない場合は、ドライバを`share/confluent-hub-components/confluentinc-kafka-connect-jdbc/lib/`にコピーしてください。または、ドライバを含むように`plugin.path`を変更します - 以下を参照してください。 - -::: - -#### 3. Prepare Configuration {#3-prepare-configuration} - -あなたのインストールタイプに関連するConnectのセットアップは[こちらの指示](https://docs.confluent.io/cloud/current/cp-component/connect-cloud-config.html#set-up-a-local-connect-worker-with-cp-install)に従って行ってください。スタンドアロンと分散クラスターの違いに留意してください。Confluent Cloudを使用する場合、分散セットアップが関連します。 - -ClickHouseでJDBCコネクタを使用するために関連するパラメータは以下の通りです。全パラメータリストは[こちら](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/index.html)で確認できます。 - -* `_connection.url_` - これは `jdbc:clickhouse://<clickhouse host>:<clickhouse http port>/<target database>`の形式である必要があります。 -* `connection.user` - 対象データベースへの書き込みアクセスを持つユーザー。 -* `table.name.format` - データを挿入するClickHouseテーブル。このテーブルは存在する必要があります。 -* `batch.size` - 一度に送信する行の数。この値は適切に大きい数に設定してください。ClickHouseの[推奨事項](/sql-reference/statements/insert-into#performance-considerations)では、1000の値を最低限として考慮する必要があります。 -* `tasks.max` - JDBC Sinkコネクタは一つまたは複数のタスクを実行することをサポートします。これはパフォーマンスを向上させるために使用できます。バッチサイズと共に、パフォーマンスを改善するための主要な手段を表します。 -* `value.converter.schemas.enable` - スキーマレジストリを使用している場合はfalseに設定し、メッセージにスキーマを埋め込む場合はtrueに設定します。 -* `value.converter` - データ型に応じて設定します。例えば、JSONの場合は`io.confluent.connect.json.JsonSchemaConverter`。 -* `key.converter` - `org.apache.kafka.connect.storage.StringConverter`に設定します。文字列キーを使用します。 -* `pk.mode` - ClickHouseには関連しません。noneに設定します。 -* `auto.create` - サポートされておらず、falseにする必要があります。 -* `auto.evolve` - この設定はfalseを推奨しますが、将来的にはサポートされる可能性があります。 -* `insert.mode` - "insert"に設定します。他のモードは現在サポートされていません。 -* `key.converter` - キーの種類に応じて設定します。 -* `value.converter` - トピック上のデータに基づいて設定します。このデータにはサポートされているスキーマが必要です - JSON、Avro、またはProtobufフォーマット。 - -テストのためにサンプルデータセットを使用する場合は、以下を設定してください: - -* `value.converter.schemas.enable` - スキーマレジストリを使用しているためfalseに設定します。各メッセージにスキーマを埋め込む場合はtrueに設定します。 -* `key.converter` - "org.apache.kafka.connect.storage.StringConverter"に設定します。文字列キーを使用します。 -* `value.converter` - "io.confluent.connect.json.JsonSchemaConverter"に設定します。 -* `value.converter.schema.registry.url` - スキーマサーバーのURLとともに、スキーマサーバーの認証情報を`value.converter.schema.registry.basic.auth.user.info`パラメータを介して設定します。 - -Githubのサンプルデータに関する設定ファイルの例は[こちら](https://github.com/ClickHouse/kafka-samples/tree/main/github_events/jdbc_sink)にあります。これをスタンドアロンモードで実行し、KafkaがConfluent Cloudにホストされていると仮定します。 - -#### 4. Create the ClickHouse table {#4-create-the-clickhouse-table} - -テーブルが作成されていることを確認し、以前の例から既に存在する場合は削除します。縮小されたGithubデータセットに対応した例を以下に示します。現在サポートされていないArrayやMap型がないことに注意してください: - -```sql -CREATE TABLE github -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at) -``` - -#### 5. Start Kafka Connect {#5-start-kafka-connect} - -[スタンドアロン](https://docs.confluent.io/cloud/current/cp-component/connect-cloud-config.html#standalone-cluster)または[分散](https://docs.confluent.io/cloud/current/cp-component/connect-cloud-config.html#distributed-cluster)モードでKafka Connectを起動します。 - -```bash -./bin/connect-standalone connect.properties.ini github-jdbc-sink.properties.ini -``` - -#### 6. Add data to Kafka {#6-add-data-to-kafka} - -提供された[スクリプトと設定](https://github.com/ClickHouse/kafka-samples/tree/main/producer)を使用してKafkaにメッセージを挿入します。github.configを変更してKafkaの認証情報を含める必要があります。このスクリプトは現在Confluent Cloudでの使用に設定されています。 - -```bash -python producer.py -c github.config -``` - -このスクリプトは任意のndjsonファイルをKafkaトピックに挿入するために使用できます。これにより、自動的にスキーマを推測しようとします。提供されたサンプル設定は10,000メッセージのみを挿入するように設定されています - 必要に応じて[ここで変更](https://github.com/ClickHouse/clickhouse-docs/tree/main/docs/integrations/data-ingestion/kafka/code/producer/github.config#L25)してください。この設定は、Kafkaへの挿入中にデータセットから互換性のないArrayフィールドを削除します。 - -これは、JDBCコネクタがメッセージをINSERT文に変換するために必要です。自分のデータを使用している場合は、スキーマを各メッセージに挿入する(_value.converter.schemas.enable_をtrueに設定する)か、クライアントがレジストリにスキーマを参照してメッセージを公開するようにしてください。 - -Kafka Connectはメッセージの消費を開始し、ClickHouseに行を挿入するはずです。「[JDBC Compliance Mode] トランザクションはサポートされていません。」という警告が表示されることがありますが、これは予期されるものであり、無視することができます。 - -ターゲットテーブル「Github」を簡単に読み取ることで、データの挿入を確認できます。 - -```sql -SELECT count() FROM default.github; -``` - -```response -| count\(\) | -| :--- | -| 10000 | -``` - -### Recommended Further Reading {#recommended-further-reading} - -* [Kafka Sink Configuration Parameters](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/sink_config_options.html#sink-config-options) -* [Kafka Connect Deep Dive – JDBC Source Connector](https://www.confluent.io/blog/kafka-connect-deep-dive-jdbc-source-connector) -* [Kafka Connect JDBC Sink deep-dive: Working with Primary Keys](https://rmoff.net/2021/03/12/kafka-connect-jdbc-sink-deep-dive-working-with-primary-keys/) -* [Kafka Connect in Action: JDBC Sink](https://www.youtube.com/watch?v=b-3qN_tlYR4&t=981s) - 読むよりも見ることを好む方のために。 -* [Kafka Connect Deep Dive – Converters and Serialization Explained](https://www.confluent.io/blog/kafka-connect-deep-dive-converters-serialization-explained/#json-schemas) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md.hash deleted file mode 100644 index 518e566695e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -53bcdb21ce278466 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md deleted file mode 100644 index 4a268ca291e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: 'Integrating ClickHouse with Kafka using Named Collections' -description: 'How to use named collections to connect clickhouse to kafka' -keywords: -- 'named collection' -- 'how to' -- 'kafka' -slug: '/integrations/data-ingestion/kafka/kafka-table-engine-named-collections' ---- - - - - - -# Integrating ClickHouse with Kafka using Named Collections - -## Introduction {#introduction} - -このガイドでは、名前付きコレクションを使用してClickHouseをKafkaに接続する方法を探ります。名前付きコレクションの設定ファイルを使用することにはいくつかの利点があります: -- 設定の中央管理と簡易化。 -- SQLテーブル定義を変更することなく設定の変更が可能。 -- 単一の設定ファイルを検査することで、設定の確認や問題解決が容易に。 - -このガイドは、Apache Kafka 3.4.1およびClickHouse 24.5.1でテストされました。 - -## Assumptions {#assumptions} - -このドキュメントは、次のことを前提としています: -1. 動作中のKafkaクラスタ。 -2. セットアップされ、稼働中のClickHouseクラスタ。 -3. SQLの基礎知識と、ClickHouseおよびKafkaの設定に精通していること。 - -## Prerequisites {#prerequisites} - -名前付きコレクションを作成するユーザーが必要なアクセス権を持っていることを確認してください: - -```xml -1 -1 -1 -1 -``` - -アクセス制御を有効にする詳細については、[ユーザー管理ガイド](./../../../guides/sre/user-management/index.md)を参照してください。 - -## Configuration {#configuration} - -ClickHouseの `config.xml` ファイルに次のセクションを追加します: - -```xml - - - - - c1-kafka-1:9094,c1-kafka-2:9094,c1-kafka-3:9094 - cluster_1_clickhouse_topic - cluster_1_clickhouse_consumer - JSONEachRow - 0 - 1 - 1 - - - - SASL_SSL - false - PLAIN - kafka-client - kafkapassword1 - all - latest - - - - - - c2-kafka-1:29094,c2-kafka-2:29094,c2-kafka-3:29094 - cluster_2_clickhouse_topic - cluster_2_clickhouse_consumer - JSONEachRow - 0 - 1 - 1 - - - - SASL_SSL - false - PLAIN - kafka-client - kafkapassword2 - all - latest - - - -``` - -### Configuration Notes {#configuration-notes} - -1. Kafkaのアドレスおよび関連する設定を、あなたのKafkaクラスタのセットアップに合わせて調整してください。 -2. ``の前のセクションにはClickHouse Kafkaエンジンのパラメーターが含まれています。パラメーターの完全なリストについては、[Kafkaエンジンのパラメーター](/engines/table-engines/integrations/kafka)を参照してください。 -3. ``内のセクションには拡張Kafka設定オプションが含まれています。詳細なオプションについては、[librdkafkaの設定](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md)を参照してください。 -4. この例では `SASL_SSL` セキュリティプロトコルと `PLAIN` メカニズムを使用しています。これらの設定は、あなたのKafkaクラスタの構成に基づいて調整してください。 - -## Creating Tables and Databases {#creating-tables-and-databases} - -ClickHouseクラスタ上で必要なデータベースとテーブルを作成します。ClickHouseを単一ノードとして実行している場合は、SQLコマンドのクラスター部分を省略し、`ReplicatedMergeTree`の代わりに他のエンジンを使用します。 - -### Create the Database {#create-the-database} - -```sql -CREATE DATABASE kafka_testing ON CLUSTER LAB_CLICKHOUSE_CLUSTER; -``` - -### Create Kafka Tables {#create-kafka-tables} - -最初のKafkaクラスタ用の最初のKafkaテーブルを作成します: - -```sql -CREATE TABLE kafka_testing.first_kafka_table ON CLUSTER LAB_CLICKHOUSE_CLUSTER -( - `id` UInt32, - `first_name` String, - `last_name` String -) -ENGINE = Kafka(cluster_1); -``` - -2つ目のKafkaクラスタ用の2つ目のKafkaテーブルを作成します: - -```sql -CREATE TABLE kafka_testing.second_kafka_table ON CLUSTER STAGE_CLICKHOUSE_CLUSTER -( - `id` UInt32, - `first_name` String, - `last_name` String -) -ENGINE = Kafka(cluster_2); -``` - -### Create Replicated Tables {#create-replicated-tables} - -最初のKafkaテーブル用のテーブルを作成します: - -```sql -CREATE TABLE kafka_testing.first_replicated_table ON CLUSTER STAGE_CLICKHOUSE_CLUSTER -( - `id` UInt32, - `first_name` String, - `last_name` String -) ENGINE = ReplicatedMergeTree() -ORDER BY id; -``` - -2つ目のKafkaテーブル用のテーブルを作成します: - -```sql -CREATE TABLE kafka_testing.second_replicated_table ON CLUSTER STAGE_CLICKHOUSE_CLUSTER -( - `id` UInt32, - `first_name` String, - `last_name` String -) ENGINE = ReplicatedMergeTree() -ORDER BY id; -``` - -### Create Materialized Views {#create-materialized-views} - -最初のKafkaテーブルから最初の複製テーブルにデータを挿入するためのマテリアライズドビューを作成します: - -```sql -CREATE MATERIALIZED VIEW kafka_testing.cluster_1_mv ON CLUSTER STAGE_CLICKHOUSE_CLUSTER TO first_replicated_table AS -SELECT - id, - first_name, - last_name -FROM first_kafka_table; -``` - -2つ目のKafkaテーブルから2つ目の複製テーブルにデータを挿入するためのマテリアライズドビューを作成します: - -```sql -CREATE MATERIALIZED VIEW kafka_testing.cluster_2_mv ON CLUSTER STAGE_CLICKHOUSE_CLUSTER TO second_replicated_table AS -SELECT - id, - first_name, - last_name -FROM second_kafka_table; -``` - -## Verifying the Setup {#verifying-the-setup} - -Kafkaクラスタ上に関連する消費者グループが見えるはずです: -- `cluster_1_clickhouse_consumer` on `cluster_1` -- `cluster_2_clickhouse_consumer` on `cluster_2` - -どのClickHouseノードでも次のクエリを実行して、両方のテーブルのデータを確認します: - -```sql -SELECT * FROM first_replicated_table LIMIT 10; -``` - -```sql -SELECT * FROM second_replicated_table LIMIT 10; -``` - -### Note {#note} - -このガイドでは、両方のKafkaトピックに取り込まれたデータは同じです。実際には、異なる場合があります。必要に応じて、任意の数のKafkaクラスタを追加できます。 - -例の出力: - -```sql -┌─id─┬─first_name─┬─last_name─┐ -│ 0 │ FirstName0 │ LastName0 │ -│ 1 │ FirstName1 │ LastName1 │ -│ 2 │ FirstName2 │ LastName2 │ -└────┴────────────┴───────────┘ -``` - -これで、名前付きコレクションを使用してClickHouseとKafkaを統合する設定が完了しました。ClickHouseの `config.xml` ファイルにKafka設定を集中させることで、設定の管理と調整が容易になり、効率的な統合を保証します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md.hash deleted file mode 100644 index 2264f8ca131..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md.hash +++ /dev/null @@ -1 +0,0 @@ -dc90f14fa4a20534 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md deleted file mode 100644 index 10373d48aa1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md +++ /dev/null @@ -1,495 +0,0 @@ ---- -sidebar_label: 'Kafka Table Engine' -sidebar_position: 5 -slug: '/integrations/kafka/kafka-table-engine' -description: 'Using the Kafka Table Engine' -title: 'Using the Kafka table engine' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; -import Image from '@theme/IdealImage'; -import kafka_01 from '@site/static/images/integrations/data-ingestion/kafka/kafka_01.png'; -import kafka_02 from '@site/static/images/integrations/data-ingestion/kafka/kafka_02.png'; -import kafka_03 from '@site/static/images/integrations/data-ingestion/kafka/kafka_03.png'; -import kafka_04 from '@site/static/images/integrations/data-ingestion/kafka/kafka_04.png'; - - -# Kafkaテーブルエンジンの使用 - - - -:::note -Kafkaテーブルエンジンは [ClickHouse Cloud](https://clickhouse.com/cloud) ではサポートされていません。 [ClickPipes](../clickpipes/kafka.md) または [Kafka Connect](./kafka-clickhouse-connect-sink.md) を検討してください。 -::: - -### KafkaからClickHouseへ {#kafka-to-clickhouse} - -Kafkaテーブルエンジンを使用するには、[ClickHouseマテリアライズドビュー](../../../guides/developer/cascading-materialized-views.md)に大まかに精通している必要があります。 - -#### 概要 {#overview} - -まずは最も一般的なユースケースに焦点を当てます:Kafkaテーブルエンジンを使用して、KafkaからClickHouseにデータを挿入します。 - -Kafkaテーブルエンジンは、ClickHouseがKafkaトピックから直接読み取ることを可能にします。トピック上のメッセージを表示するには便利ですが、エンジンは設計上、一度限りの取得しか許可しません。つまり、テーブルにクエリが発行されると、キューからデータを消費し、消費者オフセットを増加させてから、呼び出し元に結果を返します。実際には、これらのオフセットをリセットしない限り、データを再読することはできません。 - -テーブルエンジンから読み取ったデータを永続化するには、データをキャプチャして他のテーブルに挿入する手段が必要です。トリガーを使用したマテリアライズドビューは、この機能をネイティブに提供します。マテリアライズドビューはテーブルエンジンの読み取りを開始し、一連のドキュメントを受信します。TO句はデータの行き先を決定します - 通常は[Merge Treeファミリー](../../../engines/table-engines/mergetree-family/index.md)のテーブルです。このプロセスは以下のように視覚化されます: - - - -#### ステップ {#steps} - -##### 1. 準備 {#1-prepare} - -ターゲットトピックにデータが格納されている場合、以下の内容をデータセット用に適応できます。あるいは、サンプルのGithubデータセットが[こちら](https://datasets-documentation.s3.eu-west-3.amazonaws.com/kafka/github_all_columns.ndjson)に用意されています。このデータセットは以下の例で使用し、簡潔さのためにフルデータセットに対して[ClickHouseリポジトリ](https://github.com/ClickHouse/ClickHouse)に関するGithubイベントに制限しています。これは、データセットに付随して発表されたほとんどのクエリで機能するのに十分です。 - -##### 2. ClickHouseの設定 {#2-configure-clickhouse} - -これは、セキュアなKafkaに接続する場合に必要なステップです。これらの設定は、SQL DDLコマンドを介して渡すことはできず、ClickHouseのconfig.xmlで設定する必要があります。SASLによって保護されたインスタンスに接続することを前提としています。これは、Confluent Cloudと対話する際に最もシンプルな方法です。 - -```xml - - - username - password - sasl_ssl - PLAIN - - -``` - -上記のスニペットをconf.d/ディレクトリ内の新しいファイルに配置するか、既存の設定ファイルに統合してください。設定を構成できる方法については、[こちら](../../../engines/table-engines/integrations/kafka.md#configuration)を参照してください。 - -このチュートリアルで使用する`KafkaEngine`というデータベースを作成します: - -```sql -CREATE DATABASE KafkaEngine; -``` - -データベースを作成したら、データベースに切り替えてください: - -```sql -USE KafkaEngine; -``` - -##### 3. 目的のテーブルを作成 {#3-create-the-destination-table} - -目的のテーブルを準備します。以下の例では、簡潔さのために削減されたGitHubスキーマを使用しています。MergeTreeテーブルエンジンを使用しますが、この例は[MergeTreeファミリー](../../../engines/table-engines/mergetree-family/index.md)の任意のメンバーに簡単に適応可能です。 - -```sql -CREATE TABLE github -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - labels Array(LowCardinality(String)), - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - assignees Array(LowCardinality(String)), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - requested_reviewers Array(LowCardinality(String)), - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at) -``` - -##### 4. トピックを作成してデータを入れる {#4-create-and-populate-the-topic} - -次に、トピックを作成します。これにはいくつかのツールを使用できます。ローカルでKafkaを実行している場合、[RPK](https://docs.redpanda.com/current/get-started/rpk-install/)を使用すると便利です。以下のコマンドを実行して、5パーティションを持つ`github`というトピックを作成します: - -```bash -rpk topic create -p 5 github --brokers : -``` - -Confluent CloudでKafkaを実行している場合は、[Confluent CLI](https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/kcat.html#produce-records)を使用することを好むかもしれません: - -```bash -confluent kafka topic create --if-not-exists github -``` - -次に、このトピックにデータを入れる必要があります。これを行うために[kcat](https://github.com/edenhill/kcat)を使用します。認証が無効なローカルKafkaで動作している場合、以下のコマンドを実行できます: - -```bash -cat github_all_columns.ndjson | -kcat -P \ - -b : \ - -t github -``` - -または、あなたのKafkaクラスターがSASLを使用して認証している場合は、以下のようにします: - -```bash -cat github_all_columns.ndjson | -kcat -P \ - -b : \ - -t github - -X security.protocol=sasl_ssl \ - -X sasl.mechanisms=PLAIN \ - -X sasl.username= \ - -X sasl.password= \ -``` - -データセットには200,000行が含まれているため、数秒で取り込まれるはずです。より大きなデータセットを操作したい場合は、[ClickHouse/kafka-samples](https://github.com/ClickHouse/kafka-samples) GitHubリポジトリの [大規模データセットのセクション](https://github.com/ClickHouse/kafka-samples/tree/main/producer#large-datasets)を確認してください。 - -##### 5. Kafkaテーブルエンジンを作成 {#5-create-the-kafka-table-engine} - -以下の例は、マージツリーテーブルと同じスキーマを持つテーブルエンジンを作成します。これは厳密には必要ありませんが、目的のテーブルにはエイリアスや一時的なカラムがあっても構いません。ただし、設定は重要です。KafkaトピックからJSONを消費するためにデータ型として `JSONEachRow` を使用することに注意してください。`github` と `clickhouse` の値はそれぞれトピック名と消費者グループ名を表します。トピックは実際には複数の値のリストを持つことができます。 - -```sql -CREATE TABLE github_queue -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - labels Array(LowCardinality(String)), - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - assignees Array(LowCardinality(String)), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - requested_reviewers Array(LowCardinality(String)), - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) - ENGINE = Kafka('kafka_host:9092', 'github', 'clickhouse', - 'JSONEachRow') settings kafka_thread_per_consumer = 0, kafka_num_consumers = 1; -``` - -エンジン設定やパフォーマンス調整については以下で説明します。この時点で、テーブル`github_queue`でシンプルなSELECTを実行するといくつかの行が読まれるはずです。これは、消費者のオフセットを前に進めることになり、これらの行が再読み込みされないようにします。制限および必要なパラメータ`stream_like_engine_allow_direct_select`にも注意してください。 - -##### 6. マテリアライズドビューを作成 {#6-create-the-materialized-view} - -マテリアライズドビューは、先に作成した二つのテーブルを接続し、Kafkaテーブルエンジンからデータを読み取り、ターゲットのマージツリーテーブルに挿入します。いくつかのデータ変換を実行できます。ここでは単純な読み取りと挿入を行います。`*`の使用は、カラム名が同一であることを前提としています(大文字と小文字は区別)。 - -```sql -CREATE MATERIALIZED VIEW github_mv TO github AS -SELECT * -FROM github_queue; -``` - -作成時のマテリアライズドビューはKafkaエンジンに接続し、読み取りを開始します:ターゲットテーブルに行を挿入します。このプロセスは無限に続き、Kafkaへの後続のメッセージ挿入が消費されます。さらにメッセージをKafkaに挿入するためにスクリプトを再実行しても構いません。 - -##### 7. 行が挿入されたことを確認 {#7-confirm-rows-have-been-inserted} - -ターゲットテーブルにデータが存在することを確認します: - -```sql -SELECT count() FROM github; -``` - -200,000行が表示されるはずです: -```response -┌─count()─┐ -│ 200000 │ -└─────────┘ -``` - -#### 一般的な操作 {#common-operations} - -##### メッセージ消費の停止と再起動 {#stopping--restarting-message-consumption} - -メッセージ消費を停止するには、Kafkaエンジンテーブルを切り離すことができます: - -```sql -DETACH TABLE github_queue; -``` - -これにより、消費者グループのオフセットには影響しません。消費を再開し、前のオフセットから続けるには、テーブルを再接続します。 - -```sql -ATTACH TABLE github_queue; -``` - -##### Kafkaメタデータの追加 {#adding-kafka-metadata} - -データがClickHouseに取り込まれた後、元のKafkaメッセージのメタデータを追跡することは有益です。例えば、特定のトピックやパーティションのどの程度を消費したのかを知りたい場合があります。この目的のために、Kafkaテーブルエンジンは複数の[仮想カラム](../../../engines/table-engines/index.md#table_engines-virtual_columns)を公開しています。これらは私たちのターゲットテーブルにカラムとして保持するためにスキーマを修正し、マテリアライズドビューのSELECT文を変更することによって持続させることができます。 - -まず、上で説明した停止操作を実行して、ターゲットテーブルにカラムを追加する前にテーブルを切り離します。 - -```sql -DETACH TABLE github_queue; -``` - -以下に、行の発生源トピックとパーティションを識別する情報カラムを追加します。 - -```sql -ALTER TABLE github - ADD COLUMN topic String, - ADD COLUMN partition UInt64; -``` - -次に、仮想カラムが必要に応じてマッピングされていることを確認する必要があります。 -仮想カラムは`_`で接頭辞が付けられます。 -仮想カラムの完全なリストは[こちら](../../../engines/table-engines/integrations/kafka.md#virtual-columns)で確認できます。 - -仮想カラムでテーブルを更新するためには、マテリアライズドビューをドロップし、Kafkaエンジンテーブルを再接続し、マテリアライズドビューを再作成する必要があります。 - -```sql -DROP VIEW github_mv; -``` - -```sql -ATTACH TABLE github_queue; -``` - -```sql -CREATE MATERIALIZED VIEW github_mv TO github AS -SELECT *, _topic as topic, _partition as partition -FROM github_queue; -``` - -新しく取り込まれた行にはメタデータが含まれるはずです。 - -```sql -SELECT actor_login, event_type, created_at, topic, partition -FROM github -LIMIT 10; -``` - -結果は次のようになります: - -| actor_login | event_type | created_at | topic | partition | -| :--- | :--- | :--- | :--- | :--- | -| IgorMinar | CommitCommentEvent | 2011-02-12 02:22:00 | github | 0 | -| queeup | CommitCommentEvent | 2011-02-12 02:23:23 | github | 0 | -| IgorMinar | CommitCommentEvent | 2011-02-12 02:23:24 | github | 0 | -| IgorMinar | CommitCommentEvent | 2011-02-12 02:24:50 | github | 0 | -| IgorMinar | CommitCommentEvent | 2011-02-12 02:25:20 | github | 0 | -| dapi | CommitCommentEvent | 2011-02-12 06:18:36 | github | 0 | -| sourcerebels | CommitCommentEvent | 2011-02-12 06:34:10 | github | 0 | -| jamierumbelow | CommitCommentEvent | 2011-02-12 12:21:40 | github | 0 | -| jpn | CommitCommentEvent | 2011-02-12 12:24:31 | github | 0 | -| Oxonium | CommitCommentEvent | 2011-02-12 12:31:28 | github | 0 | - -##### Kafkaエンジン設定の変更 {#modify-kafka-engine-settings} - -Kafkaエンジンテーブルを削除し、新しい設定で再作成することをお勧めします。このプロセス中にマテリアライズドビューを変更する必要はありません - Kafkaエンジンテーブルが再作成されるとメッセージ消費は再開されます。 - -##### 問題のデバッグ {#debugging-issues} - -認証問題などのエラーは、KafkaエンジンDDLへの応答には報告されません。問題の診断には、メインのClickHouseログファイルclickhouse-server.err.logを使用することをお勧めします。底層のKafkaクライアントライブラリ[librdkafka](https://github.com/edenhill/librdkafka)のさらなるトレースロギングは、設定を通じて有効にできます。 - -```xml - - all - -``` - -##### 不正なメッセージの処理 {#handling-malformed-messages} - -Kafkaはしばしばデータの「ダンピンググラウンド」として使用されます。これにより、トピックが混在したメッセージフォーマットや不整合なフィールド名を含むことになります。これを避けるために、Kafka StreamsやksqlDBのようなKafkaの機能を活用して、メッセージが挿入される前に適切に整形され、一貫性を持たせることをお勧めします。これらのオプションが利用できない場合、ClickHouseには役立ついくつかの機能があります。 - -* メッセージフィールドを文字列として扱う。必要に応じて、マテリアライズドビュー文でクレンジングやキャストを行うための関数を使用できます。これは本番環境のソリューションとは見なされませんが、一時的な取り込みには役立つかもしれません。 -* トピックからJSONを消費する場合、JSONEachRowフォーマットを使用し、[`input_format_skip_unknown_fields`](/operations/settings/formats#input_format_skip_unknown_fields)の設定を使用します。データを書き込む際、デフォルトでは、ClickHouseは入力データにターゲットテーブルに存在しないカラムが含まれている場合、例外をスローします。しかし、このオプションが有効にされている場合、これらの過剰なカラムは無視されます。再度言いますが、これは本番レベルの解決策でなく、他の人を混乱させる可能性があります。 -* `kafka_skip_broken_messages`の設定を考慮します。これにより、ユーザーは不正なメッセージのブロックごとの許容度を指定する必要があります - kafka_max_block_sizeの文脈で考慮されます。この許容度が超過されると(絶対メッセージで測定)、通常の例外動作が戻り、他のメッセージはスキップされます。 - -##### 配信セマンティクスと重複の問題 {#delivery-semantics-and-challenges-with-duplicates} - -Kafkaテーブルエンジンには少なくとも一度のセマンティクスがあります。いくつかの既知の稀な状況下で重複が可能です。例えば、メッセージがKafkaから読み取られ、ClickHouseに成功裏に挿入される場合があります。新しいオフセットがコミットされる前に、Kafkaへの接続が失われた場合、この状況でブロックの再試行が必要です。このブロックは、ターゲットテーブルとして分散テーブルまたはReplicatedMergeTreeを使用すると[デデュプリケート](https://engines/table-engines/mergetree-family/replication)される可能性があります。これにより重複行の可能性は減少しますが、同一ブロックに依存します。Kafkaのリバランスのようなイベントは、稀な状況で重複を引き起こすことがあり、この仮定を無効にする可能性があります。 - -##### クオラムベースの挿入 {#quorum-based-inserts} - -ClickHouseでより高い配信保証が必要な場合、[クオラムベースの挿入](/operations/settings/settings#insert_quorum)が必要になることがあります。これは、マテリアライズドビューやターゲットテーブルで設定できません。しかし、ユーザープロファイルに設定することができます: - -```xml - - - 2 - - -``` - -### ClickHouseからKafkaへ {#clickhouse-to-kafka} - -あまり一般的ではないユースケースですが、ClickHouseのデータをKafkaに永続化することもできます。例えば、Kafkaテーブルエンジンに行を手動で挿入します。このデータは、同じKafkaエンジンによって読み取られ、そのマテリアライズドビューがデータをMerge Treeテーブルに配置します。最後に、既存のソーステーブルからテーブルを読み取るためにKafkaへの挿入におけるマテリアライズドビューの適用を示します。 - -#### ステップ {#steps-1} - -私たちの最初の目標は次のように示されます: - - - -KafkaからClickHouseへのステップでテーブルとビューが作成されていると仮定し、トピックは完全に消費されているとしましょう。 - -##### 1. 行を直接挿入 {#1-inserting-rows-directly} - -まず、ターゲットテーブルのカウントを確認します。 - -```sql -SELECT count() FROM github; -``` - -200,000行があるはずです: -```response -┌─count()─┐ -│ 200000 │ -└─────────┘ -``` - -次に、GitHubターゲットテーブルからKafkaテーブルエンジンであるgithub_queueに行を挿入します。JSONEachRowフォーマットを使用し、SELECTを100に制限していることに注意してください。 - -```sql -INSERT INTO github_queue SELECT * FROM github LIMIT 100 FORMAT JSONEachRow -``` - -GitHubの行の再カウントを行い、それが100増加したことを確認してください。上記のダイアグラムで示すように、行はKafkaテーブルエンジンを介してKafkaに挿入された後、同じエンジンによって再度読み込まれ、マテリアライズドビューによってGitHubターゲットテーブルに挿入されます。 - -```sql -SELECT count() FROM github; -``` - -100行の追加が表示されるはずです: -```response -┌─count()─┐ -│ 200100 │ -└─────────┘ -``` - -##### 2. マテリアライズドビューの使用 {#2-using-materialized-views} - -テーブルにドキュメントが挿入されると、マテリアライズドビューがトリガーされ、行がKafkaエンジンに挿入され、新しいトピックに送信されます。このプロセスを明示するには、以下の手順を実行します: - -新しいKafkaトピック`github_out`または同等のものを作成します。Kafkaテーブルエンジン`github_out_queue`がこのトピックを指すように設定します。 - -```sql -CREATE TABLE github_out_queue -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - labels Array(LowCardinality(String)), - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - assignees Array(LowCardinality(String)), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - requested_reviewers Array(LowCardinality(String)), - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) - ENGINE = Kafka('host:port', 'github_out', 'clickhouse_out', - 'JSONEachRow') settings kafka_thread_per_consumer = 0, kafka_num_consumers = 1; -``` - -次に、GitHubテーブルを指す新しいマテリアライズドビュー`github_out_mv`を作成し、行がトリガーされたときに上記のエンジンに行を挿入します。GitHubテーブルへの追加は、その結果、新しいKafkaトピックにプッシュされます。 - -```sql -CREATE MATERIALIZED VIEW github_out_mv TO github_out_queue AS -SELECT file_time, event_type, actor_login, repo_name, - created_at, updated_at, action, comment_id, path, - ref, ref_type, creator_user_login, number, title, - labels, state, assignee, assignees, closed_at, merged_at, - merge_commit_sha, requested_reviewers, merged_by, - review_comments, member_login -FROM github -FORMAT JsonEachRow; -``` - -元のgithubトピックに挿入すると、[KafkaからClickHouse](#kafka-to-clickhouse)の一部として作成されたものが、ドキュメントが「github_clickhouse」トピックに現れます。これを確認するには、ネイティブKafkaツールを使用してください。以下のように100行をgithubトピックに挿入します: - -```sql -head -n 10 github_all_columns.ndjson | -kcat -P \ - -b : \ - -t github - -X security.protocol=sasl_ssl \ - -X sasl.mechanisms=PLAIN \ - -X sasl.username= \ - -X sasl.password= -``` - -`github_out`トピックの読み取りでメッセージの配信を確認できるはずです。 - -```sql -kcat -C \ - -b : \ - -t github_out \ - -X security.protocol=sasl_ssl \ - -X sasl.mechanisms=PLAIN \ - -X sasl.username= \ - -X sasl.password= \ - -e -q | -wc -l -``` - -これはエレガントな例ですが、Kafkaエンジンと組み合わせて使用する際のマテリアライズドビューの力を示しています。 - -### クラスターとパフォーマンス {#clusters-and-performance} - -#### ClickHouseクラスターとの作業 {#working-with-clickhouse-clusters} - -Kafka消費者グループを通じて、複数のClickHouseインスタンスが同じトピックから読み取ることが可能です。各消費者はトピックのパーティションに1:1のマッピングで割り当てられます。Kafkaテーブルエンジンを使用してClickHouseの消費をスケールさせる場合、クラスター内の消費者の総数はトピック上のパーティション数を超えることができません。したがって、事前にトピックのパーティショニングが適切に設定されていることを確認してください。 - -複数のClickHouseインスタンスは、同じ消費者グループIDを使用してトピックから読み取るように設定できます - これはKafkaテーブルエンジンの作成中に指定されます。したがって、各インスタンスは1つ以上のパーティションから読み取り、ローカルターゲットテーブルにセグメントを挿入します。ターゲットテーブルは、データの重複を処理するためにReplicatedMergeTreeを使用するように設定される可能性があります。このアプローチでは、十分なKafkaパーティションが提供されれば、Kafkaの読み取りをClickHouseクラスターとスケールすることができます。 - - - -#### パフォーマンス調整 {#tuning-performance} - -Kafkaエンジンテーブルのスループットパフォーマンスを向上させるために、次の点を考慮してください: - -* パフォーマンスは、メッセージのサイズ、フォーマット、ターゲットテーブルの種類によって異なります。単一のテーブルエンジンで100k行/秒は達成可能と見なされるべきです。デフォルトで、メッセージはブロックで読み取られ、kafka_max_block_sizeパラメータによって制御されます。デフォルトでは、これは[max_insert_block_size](/operations/settings/settings#max_insert_block_size)に設定されており、デフォルト値は1,048,576です。メッセージが非常に大きくない限り、これはほぼ常に増加するべきです。500kから1Mの間の値は珍しくありません。スループットパフォーマンスへの影響をテストして評価してください。 -* テーブルエンジンの消費者数は、kafka_num_consumersを使用して増加させることができます。ただし、デフォルトでは挿入は単一スレッドで線形化され、kafka_thread_per_consumerのデフォルト値を変更しない限り。これを1に設定するとフラッシュが並行して実行されることが保証されます。N個の消費者を持つKafkaエンジンテーブルの作成(およびkafka_thread_per_consumer=1)は、各々がマテリアライズドビューおよびkafka_thread_per_consumer=0を持つN個のKafkaエンジンを作成することと論理的には同じです。 -* 消費者を増やすことは無料の操作ではありません。各消費者は自身のバッファとスレッドを維持し、サーバーへのオーバーヘッドが増加します。消費者のオーバーヘッドを意識し、まずクラスター全体にわたって線形にスケールアップすることが望ましいです。 -* Kafkaメッセージのスループットが変動する場合は、ストリームフラッシュインターバルをms単位で増加させて、より大きなブロックがフラッシュされるようにすることを検討してください。 -* [background_message_broker_schedule_pool_size](/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size)は、バックグラウンドタスクを実行するスレッドの数を設定します。これらのスレッドはKafkaストリーミングに使用されます。この設定はClickHouseサーバーの起動時に適用され、ユーザーセッション内で変更できず、デフォルトは16です。ログにタイムアウトが表示された場合は、これを増加させると適切かもしれません。 -* Kafkaとの通信には、スレッドを作成するlibrdkafkaライブラリが使用されます。大量のKafkaテーブルや消費者がある場合、大量のコンテキストスイッチが発生する可能性があります。この負荷をクラスター全体に分散させ、可能な限りターゲットテーブルだけを複製するか、複数トピックから読み取るためにテーブルエンジンを使用することを検討してください - 値のリストがサポートされています。特定のトピックからのデータをフィルタリングする各マテリアライズドビューが、単一のテーブルから読み取ることができます。 - -設定変更は、必ずテストしてください。Kafka消費者の遅延を監視し、適切なスケールを確保することをお勧めします。 - -#### 追加設定 {#additional-settings} - -上記の設定に加えて、次の内容も関心があるかもしれません: - -* [Kafka_max_wait_ms](/operations/settings/settings#kafka_max_wait_ms) - 再試行前にKafkaからメッセージを読み取るための待機時間(ミリ秒単位)。ユーザープロファイルレベルで設定され、デフォルトは5000です。 - -底層のlibrdkafkaからの[すべての設定](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)も、ClickHouse設定ファイル内の_kafka_要素内に配置できます - 設定名はXML要素で、ドットをアンダースコアに置き換える必要があります: - -```xml - - - false - - -``` - -これは専門的な設定であり、詳細な説明についてはKafkaのドキュメントを参照することをお勧めします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md.hash deleted file mode 100644 index b93f4cebd51..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine.md.hash +++ /dev/null @@ -1 +0,0 @@ -35dae84998681f84 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md deleted file mode 100644 index 6dea9738e20..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -sidebar_label: 'Vector with Kafka' -sidebar_position: 3 -slug: '/integrations/kafka/kafka-vector' -description: 'Using Vector with Kafka and ClickHouse' -title: 'Using Vector with Kafka and ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - -## Kafka と ClickHouse を使用した Vector の利用 {#using-vector-with-kafka-and-clickhouse} - -Vector は、Kafka からデータを読み取り、ClickHouse にイベントを送信する能力を持つ、ベンダーに依存しないデータパイプラインです。 - -ClickHouse と連携するための [はじめに](../etl-tools/vector-to-clickhouse.md) ガイドは、ログのユースケースとファイルからのイベントの読み取りに焦点を当てています。私たちは、Kafka トピックに保持されているイベントを含む [Github サンプルデータセット](https://datasets-documentation.s3.eu-west-3.amazonaws.com/kafka/github_all_columns.ndjson) を利用します。 - -Vector は、プッシュまたはプルモデルを通じてデータを取得するために [sources](https://vector.dev/docs/about/concepts/#sources) を利用します。一方、[sinks](https://vector.dev/docs/about/concepts/#sinks) はイベントの宛先を提供します。したがって、私は Kafka ソースと ClickHouse シンクを利用します。Kafka はシンクとしてサポートされていますが、ClickHouse ソースは利用できないため、ClickHouse から Kafka にデータを転送したいユーザーには Vector は適していません。 - -Vector はデータの [変換](https://vector.dev/docs/reference/configuration/transforms/) もサポートしています。これは本ガイドの範囲を超えています。この機能が必要な場合は、ユーザーは自身のデータセットについて Vector ドキュメントを参照してください。 - -現在の ClickHouse シンクの実装は HTTP インターフェースを使用しています。現時点では ClickHouse シンクは JSON スキーマの使用をサポートしていません。データはプレーン JSON 形式または文字列として Kafka に公開する必要があります。 - -### ライセンス {#license} -Vector は [MPL-2.0 ライセンス](https://github.com/vectordotdev/vector/blob/master/LICENSE)のもとで配布されています。 - -### 接続詳細をまとめる {#gather-your-connection-details} - - -### 手順 {#steps} - -1. Kafka の `github` トピックを作成し、[Github データセット](https://datasets-documentation.s3.eu-west-3.amazonaws.com/kafka/github_all_columns.ndjson) を挿入します。 - -```bash -cat /opt/data/github/github_all_columns.ndjson | kcat -b : -X security.protocol=sasl_ssl -X sasl.mechanisms=PLAIN -X sasl.username= -X sasl.password= -t github -``` - -このデータセットは、`ClickHouse/ClickHouse` リポジトリに焦点を当てた 200,000 行から構成されています。 - -2. 目標のテーブルが作成されていることを確認します。以下ではデフォルトのデータベースを使用します。 - -```sql - -CREATE TABLE github -( - file_time DateTime, - event_type Enum('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, - 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), - actor_login LowCardinality(String), - repo_name LowCardinality(String), - created_at DateTime, - updated_at DateTime, - action Enum('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), - comment_id UInt64, - path String, - ref LowCardinality(String), - ref_type Enum('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), - creator_user_login LowCardinality(String), - number UInt32, - title String, - labels Array(LowCardinality(String)), - state Enum('none' = 0, 'open' = 1, 'closed' = 2), - assignee LowCardinality(String), - assignees Array(LowCardinality(String)), - closed_at DateTime, - merged_at DateTime, - merge_commit_sha String, - requested_reviewers Array(LowCardinality(String)), - merged_by LowCardinality(String), - review_comments UInt32, - member_login LowCardinality(String) -) ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at); - -``` - -3. [Vector をダウンロードしてインストール](https://vector.dev/docs/setup/quickstart/)します。`kafka.toml` 構成ファイルを作成し、Kafka と ClickHouse インスタンスの値を変更します。 - -```toml -[sources.github] -type = "kafka" -auto_offset_reset = "smallest" -bootstrap_servers = ":" -group_id = "vector" -topics = [ "github" ] -tls.enabled = true -sasl.enabled = true -sasl.mechanism = "PLAIN" -sasl.username = "" -sasl.password = "" -decoding.codec = "json" - -[sinks.clickhouse] -type = "clickhouse" -inputs = ["github"] -endpoint = "http://localhost:8123" -database = "default" -table = "github" -skip_unknown_fields = true -auth.strategy = "basic" -auth.user = "username" -auth.password = "password" -buffer.max_events = 10000 -batch.timeout_secs = 1 -``` - -この構成および Vector の動作に関する重要な注意点がいくつかあります: - -- この例は Confluent Cloud に対してテストされています。したがって、`sasl.*` および `ssl.enabled` のセキュリティオプションは、セルフマネージドのケースには適さない可能性があります。 -- 構成パラメータ `bootstrap_servers` にはプロトコルプレフィックスは必要ありません。例: `pkc-2396y.us-east-1.aws.confluent.cloud:9092` -- ソースパラメータ `decoding.codec = "json"` は、メッセージが ClickHouse シンクに単一の JSON オブジェクトとして渡されることを保証します。メッセージを文字列として扱い、デフォルトの `bytes` 値を使用した場合、メッセージの内容はフィールド `message` に追加されます。このほとんどの場合、[Vector のはじめに](../etl-tools/vector-to-clickhouse.md#4-parse-the-logs)ガイドに記載されたように ClickHouse で処理が必要です。 -- Vector はメッセージにいくつかのフィールドを [追加します](https://vector.dev/docs/reference/configuration/sources/kafka/#output-data)。私たちの例では、設定パラメータ `skip_unknown_fields = true` を通じて ClickHouse シンクでこれらのフィールドを無視します。これは、ターゲットテーブルスキーマの一部でないフィールドを無視します。これらのメタフィールド(例: `offset`)を追加するようにスキーマを調整してください。 -- シンクがイベントのソースを `inputs` パラメータを通じて参照していることに注意してください。 -- ClickHouse シンクの動作については [こちら](https://vector.dev/docs/reference/configuration/sinks/clickhouse/#buffers-and-batches)を参照してください。最適なスループットを得るために、ユーザーは `buffer.max_events`、`batch.timeout_secs`、および `batch.max_bytes` パラメータを調整したいと考えるかもしれません。ClickHouse の [推奨事項](/sql-reference/statements/insert-into#performance-considerations) によれば、単一のバッチに含まれるイベント数の最小値として 1000 を考慮すべきです。均一な高スループットユースケースでは、ユーザーはパラメータ `buffer.max_events` を増やすことを検討します。可変スループットの場合は、パラメータ `batch.timeout_secs` の変更が必要です。 -- パラメータ `auto_offset_reset = "smallest"` は Kafka ソースにトピックの最初から始めさせます。これにより、ステップ (1) で公開されたメッセージを消費できるようになります。異なる動作が必要なユーザーは、[こちら](https://vector.dev/docs/reference/configuration/sources/kafka/#auto_offset_reset)を参照してください。 - -4. Vector を起動します。 - -```bash -vector --config ./kafka.toml -``` - -デフォルトでは、[ヘルスチェック](https://vector.dev/docs/reference/configuration/sinks/clickhouse/#healthcheck)が ClickHouse への挿入開始前に必要です。これにより、接続が確立され、スキーマが読み取れることが確認されます。問題が発生した場合は、`VECTOR_LOG=debug` を前に置くことで、さらに詳細なログを取得できます。 - -5. データの挿入を確認します。 - -```sql -SELECT count() as count FROM github; -``` - -| count | -| :--- | -| 200000 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md.hash deleted file mode 100644 index 5cb9eb699d7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-vector.md.hash +++ /dev/null @@ -1 +0,0 @@ -f825de77f5745ca0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md deleted file mode 100644 index 59a3fc0758f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -sidebar_label: 'Amazon MSK with Kafka Connector Sink' -sidebar_position: 1 -slug: '/integrations/kafka/cloud/amazon-msk/' -description: 'The official Kafka connector from ClickHouse with Amazon MSK' -keywords: -- 'integration' -- 'kafka' -- 'amazon msk' -- 'sink' -- 'connector' -title: 'Amazon MSK with ClickHouse との統合' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - - -# Amazon MSKとClickHouseの統合 - -
- -
- -## 前提条件 {#prerequisites} -以下を前提とします: -* あなたは [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md)、Amazon MSK、MSKコネクタに精通しているものとします。Amazon MSKの [はじめにガイド](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) と [MSK Connectガイド](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html) を推奨します。 -* MSKブローカーが公開アクセス可能であること。開発者ガイドの [公開アクセス](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html) セクションを参照してください。 - -## ClickHouseの公式KafkaコネクタとAmazon MSK {#the-official-kafka-connector-from-clickhouse-with-amazon-msk} - -### 接続の詳細を収集する {#gather-your-connection-details} - - - -### ステップ {#steps} -1. [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md)に精通していることを確認してください。 -1. [MSKインスタンスを作成](https://docs.aws.amazon.com/msk/latest/developerguide/create-cluster.html)します。 -1. [IAMロールを作成して割り当て](https://docs.aws.amazon.com/msk/latest/developerguide/create-client-iam-role.html)ます。 -1. ClickHouse Connect Sinkの [リリースページ](https://github.com/ClickHouse/clickhouse-kafka-connect/releases)から`jar`ファイルをダウンロードします。 -1. Amazon MSKコンソールの [カスタムプラグインページ](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect-plugins.html)にダウンロードした`jar`ファイルをインストールします。 -1. コネクタが公開のClickHouseインスタンスと通信する場合、[インターネットアクセスを有効にします](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect-internet-access.html)。 -1. 設定にトピック名、ClickHouseインスタンスのホスト名、およびパスワードを提供します。 -```yml -connector.class=com.clickhouse.kafka.connect.ClickHouseSinkConnector -tasks.max=1 -topics= -ssl=true -security.protocol=SSL -hostname= -database= -password= -ssl.truststore.location=/tmp/kafka.client.truststore.jks -port=8443 -value.converter.schemas.enable=false -value.converter=org.apache.kafka.connect.json.JsonConverter -exactlyOnce=true -username=default -schemas.enable=false -``` - -## パフォーマンスチューニング {#performance-tuning} -パフォーマンスを向上させる方法の一つは、バッチサイズとKafkaから取得されるレコードの数を調整することです。以下を**worker**構成に追加します: -```yml -consumer.max.poll.records=[レコード数] -consumer.max.partition.fetch.bytes=[レコード数 * バイト単位のレコードサイズ] -``` - -使用する具体的な値は、希望するレコード数やレコードサイズによって異なります。例えば、デフォルトの値は以下の通りです: - -```yml -consumer.max.poll.records=500 -consumer.max.partition.fetch.bytes=1048576 -``` - -詳細(実装やその他の考慮事項)は、公式の [Kafka](https://kafka.apache.org/documentation/#consumerconfigs) および -[Amazon MSK](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect-workers.html#msk-connect-create-custom-worker-config) ドキュメントに記載されています。 - -## MSK Connect用のネットワーキングに関する注意事項 {#notes-on-networking-for-msk-connect} - -MSK ConnectがClickHouseに接続するためには、MSKクラスターをプライベートサブネットに配置し、インターネットアクセスのためにプライベートNATを接続することを推奨します。設定手順は以下に示します。公共サブネットもサポートされていますが、ENIにElastic IPアドレスを常に割り当てる必要があるため推奨されません。詳細は[AWSがこちらで提供しています](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect-internet-access.html)。 - -1. **プライベートサブネットを作成する:** VPC内に新しいサブネットを作成し、それをプライベートサブネットとして指定します。このサブネットはインターネットへの直接アクセスを持ってはいけません。 -1. **NATゲートウェイを作成する:** VPCの公共サブネット内にNATゲートウェイを作成します。NATゲートウェイは、プライベートサブネット内のインスタンスがインターネットまたは他のAWSサービスに接続できるようにしますが、インターネットがそれらのインスタンスに接続を開始することを防ぎます。 -1. **ルートテーブルを更新する:** インターネット向けのトラフィックをNATゲートウェイへ誘導するルートを追加します。 -1. **セキュリティグループおよびネットワークACLの構成を確認する:** [セキュリティグループ](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html)と[ネットワークACL(アクセス制御リスト)](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html)を設定して、ClickHouseインスタンスへの関連トラフィックを許可します。 - 1. ClickHouse Cloudの場合、9440および8443ポートでの着信トラフィックを許可するようセキュリティグループを設定します。 - 1. セルフホスティングのClickHouseの場合、設定ファイルのポート(デフォルトは8123)での着信トラフィックを許可するようセキュリティグループを設定します。 -1. **MSKにセキュリティグループをアタッチする:** NATゲートウェイにルーティングされた新しいセキュリティグループがMSKクラスターにアタッチされていることを確認します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md.hash deleted file mode 100644 index 511af690307..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -27a473f0703c6280 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md deleted file mode 100644 index be96b6b8af5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -sidebar_label: 'Redshift' -slug: '/integrations/redshift' -description: 'Redshift から ClickHouse へのデータ移行' -title: 'Redshift から ClickHouse へのデータ移行' ---- - -import redshiftToClickhouse from '@site/static/images/integrations/data-ingestion/redshift/redshift-to-clickhouse.png'; -import push from '@site/static/images/integrations/data-ingestion/redshift/push.png'; -import pull from '@site/static/images/integrations/data-ingestion/redshift/pull.png'; -import pivot from '@site/static/images/integrations/data-ingestion/redshift/pivot.png'; -import s3_1 from '@site/static/images/integrations/data-ingestion/redshift/s3-1.png'; -import s3_2 from '@site/static/images/integrations/data-ingestion/redshift/s3-2.png'; -import Image from '@theme/IdealImage'; - - -# Redshift から ClickHouse へのデータ移行 - -## 関連コンテンツ {#related-content} - -
- -
- -- ブログ: [分析ワークロードの最適化: Redshift と ClickHouse の比較](https://clickhouse.com/blog/redshift-vs-clickhouse-comparison) - -## 概要 {#introduction} - -[Amazon Redshift](https://aws.amazon.com/redshift/) は、Amazon Web Services の一部である人気のクラウドデータウェアハウジングソリューションです。このガイドでは、Redshift インスタンスから ClickHouse へのデータ移行のためのさまざまなアプローチを紹介します。以下の三つのオプションをカバーします: - - - -ClickHouse インスタンスの観点からは、次のいずれかを行うことができます: - -1. **[PUSH](#push-data-from-redshift-to-clickhouse)** サードパーティの ETL/ELT ツールまたはサービスを使用して ClickHouse にデータを送信する - -2. **[PULL](#pull-data-from-redshift-to-clickhouse)** ClickHouse JDBC ブリッジを活用して Redshift からデータを取得する - -3. **[PIVOT](#pivot-data-from-redshift-to-clickhouse-using-s3)** S3 オブジェクトストレージを使用して「アンロード後にロード」ロジックを使用する - -:::note -このチュートリアルでは Redshift をデータソースとして使用しました。ただし、ここで説明する移行アプローチは Redshift に限定されるものではなく、互換性のあるデータソースに対して同様の手順を導き出すことができます。 -::: - - -## Redshift から ClickHouse へのデータプッシュ {#push-data-from-redshift-to-clickhouse} - -プッシュシナリオでは、サードパーティのツールまたはサービス(カスタムコードまたは [ETL/ELT](https://en.wikipedia.org/wiki/Extract,_transform,_load#ETL_vs._ELT) ソフトウェア)を活用して、データを ClickHouse インスタンスに送信することを目的としています。例えば、[Airbyte](https://www.airbyte.com/) のようなソフトウェアを使用して、Redshift インスタンス(ソース)と ClickHouse(デスティネーション)間でデータを移動することができます([Airbyte の統合ガイドを参照してください](/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md))。 - - - -### 利点 {#pros} - -* ETL/ELT ソフトウェアのコネクタの既存カタログを活用できる。 -* データを同期するための組み込み機能(追加/上書き/インクリメントロジック)。 -* データ変換シナリオを可能にする(例えば、[dbt の統合ガイドを参照](/integrations/data-ingestion/etl-tools/dbt/index.md))。 - -### 欠点 {#cons} - -* ユーザーは ETL/ELT インフラを設定および維持する必要がある。 -* アーキテクチャにサードパーティの要素を導入し、潜在的なスケーラビリティのボトルネックになる可能性がある。 - - -## Redshift から ClickHouse へのデータプル {#pull-data-from-redshift-to-clickhouse} - -プルシナリオでは、ClickHouse JDBC ブリッジを活用して、ClickHouse インスタンスから直接 Redshift クラスターに接続し、`INSERT INTO ... SELECT` クエリを実行します: - - - -### 利点 {#pros-1} - -* すべての JDBC 互換ツールに一般的 -* ClickHouse 内から複数の外部データソースをクエリするための洗練されたソリューション - -### 欠点 {#cons-1} - -* スケーラビリティのボトルネックになる可能性のある ClickHouse JDBC ブリッジインスタンスを必要とする - - -:::note -Redshift は PostgreSQL に基づいていますが、ClickHouse PostgreSQL テーブル関数やテーブルエンジンを使用することはできません。なぜなら、ClickHouse は PostgreSQL バージョン 9 以上を要求し、Redshift API は古いバージョン(8.x)に基づいているからです。 -::: - -### チュートリアル {#tutorial} - -このオプションを使用するには、ClickHouse JDBC ブリッジを設定する必要があります。ClickHouse JDBC ブリッジは、JDBC 接続を処理し、ClickHouse インスタンスとデータソースの間のプロキシとして機能するスタンドアロンの Java アプリケーションです。このチュートリアルでは、[サンプルデータベース](https://docs.aws.amazon.com/redshift/latest/dg/c_sampledb.html)を持つ事前に設定された Redshift インスタンスを使用しました。 - -1. ClickHouse JDBC ブリッジを展開します。詳細については、[外部データソース用の JDBC のユーザーガイド](/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md)を参照してください。 - -:::note -ClickHouse Cloud を使用している場合は、別の環境で ClickHouse JDBC ブリッジを実行し、[remoteSecure](/sql-reference/table-functions/remote/) 関数を使用して ClickHouse Cloud に接続する必要があります。 -::: - -2. ClickHouse JDBC ブリッジの Redshift データソースを構成します。例えば、`/etc/clickhouse-jdbc-bridge/config/datasources/redshift.json ` - - ```json - { - "redshift-server": { - "aliases": [ - "redshift" - ], - "driverUrls": [ - "https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/2.1.0.4/redshift-jdbc42-2.1.0.4.jar" - ], - "driverClassName": "com.amazon.redshift.jdbc.Driver", - "jdbcUrl": "jdbc:redshift://redshift-cluster-1.ckubnplpz1uv.us-east-1.redshift.amazonaws.com:5439/dev", - "username": "awsuser", - "password": "", - "maximumPoolSize": 5 - } - } - ``` - -3. ClickHouse JDBC ブリッジが展開されて実行されている場合、ClickHouse から Redshift インスタンスをクエリし始めることができます。 - - ```sql - SELECT * - FROM jdbc('redshift', 'select username, firstname, lastname from users limit 5') - ``` - - ```response - Query id: 1b7de211-c0f6-4117-86a2-276484f9f4c0 - - ┌─username─┬─firstname─┬─lastname─┐ - │ PGL08LJI │ Vladimir │ Humphrey │ - │ XDZ38RDD │ Barry │ Roy │ - │ AEB55QTM │ Reagan │ Hodge │ - │ OWY35QYB │ Tamekah │ Juarez │ - │ MSD36KVR │ Mufutau │ Watkins │ - └──────────┴───────────┴──────────┘ - - 5 rows in set. Elapsed: 0.438 sec. - ``` - - ```sql - SELECT * - FROM jdbc('redshift', 'select count(*) from sales') - ``` - - ```response - Query id: 2d0f957c-8f4e-43b2-a66a-cc48cc96237b - - ┌──count─┐ - │ 172456 │ - └────────┘ - - 1 rows in set. Elapsed: 0.304 sec. - ``` - - -4. 次に、`INSERT INTO ... SELECT` ステートメントを使用してデータをインポートします。 - - ```sql - # 3 カラムの TABLE CREATION - CREATE TABLE users_imported - ( - `username` String, - `firstname` String, - `lastname` String - ) - ENGINE = MergeTree - ORDER BY firstname - ``` - - ```response - Query id: c7c4c44b-cdb2-49cf-b319-4e569976ab05 - - Ok. - - 0 rows in set. Elapsed: 0.233 sec. - ``` - - ```sql - # データのインポート - INSERT INTO users_imported (*) SELECT * - FROM jdbc('redshift', 'select username, firstname, lastname from users') - ``` - - ```response - Query id: 9d3a688d-b45a-40f4-a7c7-97d93d7149f1 - - Ok. - - 0 rows in set. Elapsed: 4.498 sec. Processed 49.99 thousand rows, 2.49 MB (11.11 thousand rows/s., 554.27 KB/s.) - ``` - -## S3 を使用して Redshift から ClickHouse へのデータピボット {#pivot-data-from-redshift-to-clickhouse-using-s3} - -このシナリオでは、データを中間ピボット形式で S3 にエクスポートし、次のステップで S3 から ClickHouse にデータをロードします。 - - - -### 利点 {#pros-2} - -* Redshift と ClickHouse の両方が強力な S3 統合機能を持っている。 -* Redshift の `UNLOAD` コマンドや ClickHouse S3 テーブル関数 / テーブルエンジンの既存機能を利用。 -* ClickHouse の S3 への並列読み取りと高スループット機能によりシームレスにスケールできます。 -* Apache Parquet のような洗練された圧縮フォーマットを活用できる。 - -### 欠点 {#cons-2} - -* プロセスに 2 ステップ(Redshift からアンロードして ClickHouse へロード)が必要です。 - -### チュートリアル {#tutorial-1} - -1. Redshift の [UNLOAD](https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html) 機能を使用して、既存のプライベート S3 バケットにデータをエクスポートします: - - - - これにより、S3 に生データを含むパートファイルが生成されます。 - - - -2. ClickHouse にテーブルを作成します: - - ```sql - CREATE TABLE users - ( - username String, - firstname String, - lastname String - ) - ENGINE = MergeTree - ORDER BY username - ``` - - または、ClickHouse は `CREATE TABLE ... EMPTY AS SELECT` を使用してテーブル構造を推測することができます: - - ```sql - CREATE TABLE users - ENGINE = MergeTree ORDER BY username - EMPTY AS - SELECT * FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV') - ``` - - これは特にデータがデータ型に関する情報を含むフォーマット(例: Parquet)である場合に効果的です。 - -3. S3 ファイルを ClickHouse にロードします。`INSERT INTO ... SELECT` ステートメントを使用します: - ```sql - INSERT INTO users SELECT * - FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV') - ``` - - ```response - Query id: 2e7e219a-6124-461c-8d75-e4f5002c8557 - - Ok. - - 0 rows in set. Elapsed: 0.545 sec. Processed 49.99 thousand rows, 2.34 MB (91.72 thousand rows/s., 4.30 MB/s.) - ``` - -:::note -この例では、CSV をピボットフォーマットとして使用しました。ただし、生産ワークロードでは、圧縮とストレージコストを削減しつつトランスファー時間を短縮できるため、大規模な移行には Apache Parquet を最良の選択肢としてお勧めします。(デフォルトでは、各行グループは SNAPPY を使用して圧縮されています。)ClickHouse は Parquet の列指向性を活用してデータの取り込みを加速します。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md.hash deleted file mode 100644 index 12672f26877..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/redshift/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -a6734a0c67217f2b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md deleted file mode 100644 index 27d44dafdba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -sidebar_label: 'MinIO' -sidebar_position: 6 -slug: '/integrations/minio' -description: 'ClickHouse と MinIO の使用方法を説明するページ' -title: 'Using MinIO' ---- - -import SelfManaged from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_self_managed_only_no_roadmap.md'; - - -# Using MinIO - - - -すべての S3 機能とテーブルは、[MinIO](https://min.io/) と互換性があります。ユーザーは、特に最適なネットワークの近接性がある場合に、セルフホストの MinIO ストレージで優れたスループットを体験することができます。 - -また、バックアップされたマージツリーの構成も、いくつかの設定の変更で互換性があります: - -```xml - - - ... - - - s3 - https://min.io/tables// - your_access_key_id - your_secret_access_key - - /var/lib/clickhouse/disks/s3/ - - - cache - s3 - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - - - ... - - -``` - -:::tip -エンドポイントタグのダブルスラッシュに注意してください。これはバケットのルートを指定するために必要です。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md.hash deleted file mode 100644 index 3f610366d0f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3-minio.md.hash +++ /dev/null @@ -1 +0,0 @@ -939736c86a8d344c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md deleted file mode 100644 index 8d4dabfad79..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md +++ /dev/null @@ -1,1275 +0,0 @@ ---- -slug: '/integrations/s3' -sidebar_position: 1 -sidebar_label: 'ClickHouse との S3 統合' -title: 'ClickHouse との S3 統合' -description: 'ClickHouse と S3 を統合する方法について説明したページ' ---- - -import BucketDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_S3_authentication_and_bucket.md'; -import S3J from '@site/static/images/integrations/data-ingestion/s3/s3-j.png'; -import Bucket1 from '@site/static/images/integrations/data-ingestion/s3/bucket1.png'; -import Bucket2 from '@site/static/images/integrations/data-ingestion/s3/bucket2.png'; -import Image from '@theme/IdealImage'; - -# S3とClickHouseの統合 - -S3からClickHouseにデータを挿入することができ、またS3をエクスポート先として使用することで、「データレイク」アーキテクチャとの相互作用を実現します。さらに、S3は「コールド」ストレージ層を提供し、ストレージと計算を分離するのに役立ちます。以下のセクションでは、ニューヨーク市のタクシーデータセットを使用して、S3とClickHouse間のデータ移動プロセスを示し、主要な構成パラメータを特定し、パフォーマンスを最適化するためのヒントを提供します。 -## S3テーブル関数 {#s3-table-functions} - -`s3`テーブル関数を使用すると、S3互換ストレージからファイルの読み書きができます。この構文の概要は以下の通りです: - -```sql -s3(path, [aws_access_key_id, aws_secret_access_key,] [format, [structure, [compression]]]) -``` - -ここで、 - -* path — ファイルへのパスを含むバケットURL。以下のワイルドカードを読み取り専用モードでサポートします:`*`、`?`、`{abc,def}` および `{N..M}`(ここで `N` と `M` は数値、`'abc'` と `'def'` は文字列)。詳細については、[パスにおけるワイルドカードの使用](/engines/table-engines/integrations/s3/#wildcards-in-path)のドキュメントを参照してください。 -* format — ファイルの[フォーマット](/interfaces/formats#formats-overview)。 -* structure — テーブルの構造。形式 `'column1_name column1_type, column2_name column2_type, ...'`。 -* compression — パラメータはオプションです。サポートされる値:`none`、`gzip/gz`、`brotli/br`、`xz/LZMA`、`zstd/zst`。デフォルトでは、ファイル拡張子によって圧縮を自動検出します。 - -パス式でワイルドカードを使用することで、複数のファイルを参照し、並列処理の扉を開きます。 -### 準備 {#preparation} - -ClickHouseでテーブルを作成する前に、S3バケット内のデータを詳しく見ることをお勧めします。これは、`DESCRIBE`ステートメントを使用してClickHouseから直接行うことができます: - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames'); -``` - -`DESCRIBE TABLE`ステートメントの出力は、ClickHouseがこのデータをどのように自動的に推論するかを示します。S3バケットで表示された内容に留意してください。また、gzip圧縮形式を自動的に認識し、解凍することも確認してください: - -```sql -DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames') SETTINGS describe_compact_output=1 - -┌─name──────────────────┬─type───────────────┐ -│ trip_id │ Nullable(Int64) │ -│ vendor_id │ Nullable(Int64) │ -│ pickup_date │ Nullable(Date) │ -│ pickup_datetime │ Nullable(DateTime) │ -│ dropoff_date │ Nullable(Date) │ -│ dropoff_datetime │ Nullable(DateTime) │ -│ store_and_fwd_flag │ Nullable(Int64) │ -│ rate_code_id │ Nullable(Int64) │ -│ pickup_longitude │ Nullable(Float64) │ -│ pickup_latitude │ Nullable(Float64) │ -│ dropoff_longitude │ Nullable(Float64) │ -│ dropoff_latitude │ Nullable(Float64) │ -│ passenger_count │ Nullable(Int64) │ -│ trip_distance │ Nullable(String) │ -│ fare_amount │ Nullable(String) │ -│ extra │ Nullable(String) │ -│ mta_tax │ Nullable(String) │ -│ tip_amount │ Nullable(String) │ -│ tolls_amount │ Nullable(Float64) │ -│ ehail_fee │ Nullable(Int64) │ -│ improvement_surcharge │ Nullable(String) │ -│ total_amount │ Nullable(String) │ -│ payment_type │ Nullable(String) │ -│ trip_type │ Nullable(Int64) │ -│ pickup │ Nullable(String) │ -│ dropoff │ Nullable(String) │ -│ cab_type │ Nullable(String) │ -│ pickup_nyct2010_gid │ Nullable(Int64) │ -│ pickup_ctlabel │ Nullable(Float64) │ -│ pickup_borocode │ Nullable(Int64) │ -│ pickup_ct2010 │ Nullable(String) │ -│ pickup_boroct2010 │ Nullable(String) │ -│ pickup_cdeligibil │ Nullable(String) │ -│ pickup_ntacode │ Nullable(String) │ -│ pickup_ntaname │ Nullable(String) │ -│ pickup_puma │ Nullable(Int64) │ -│ dropoff_nyct2010_gid │ Nullable(Int64) │ -│ dropoff_ctlabel │ Nullable(Float64) │ -│ dropoff_borocode │ Nullable(Int64) │ -│ dropoff_ct2010 │ Nullable(String) │ -│ dropoff_boroct2010 │ Nullable(String) │ -│ dropoff_cdeligibil │ Nullable(String) │ -│ dropoff_ntacode │ Nullable(String) │ -│ dropoff_ntaname │ Nullable(String) │ -│ dropoff_puma │ Nullable(Int64) │ -└───────────────────────┴────────────────────┘ -``` - -私たちのS3ベースのデータセットと対話するために、標準の`MergeTree`テーブルを目的地として準備します。以下のステートメントは、デフォルトデータベースに`trips`という名前のテーブルを作成します。特に推論されたデータ型の一部を変更することに注意してください。特に、余分なストレージデータを引き起こす可能性のある[`Nullable()`](/sql-reference/data-types/nullable)データ型修飾子を使用しないことを選択しました: - -```sql -CREATE TABLE trips -( - `trip_id` UInt32, - `vendor_id` Enum8('1' = 1, '2' = 2, '3' = 3, '4' = 4, 'CMT' = 5, 'VTS' = 6, 'DDS' = 7, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14, '' = 15), - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_date` Date, - `dropoff_datetime` DateTime, - `store_and_fwd_flag` UInt8, - `rate_code_id` UInt8, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `fare_amount` Float32, - `extra` Float32, - `mta_tax` Float32, - `tip_amount` Float32, - `tolls_amount` Float32, - `ehail_fee` Float32, - `improvement_surcharge` Float32, - `total_amount` Float32, - `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), - `trip_type` UInt8, - `pickup` FixedString(25), - `dropoff` FixedString(25), - `cab_type` Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), - `pickup_nyct2010_gid` Int8, - `pickup_ctlabel` Float32, - `pickup_borocode` Int8, - `pickup_ct2010` String, - `pickup_boroct2010` String, - `pickup_cdeligibil` String, - `pickup_ntacode` FixedString(4), - `pickup_ntaname` String, - `pickup_puma` UInt16, - `dropoff_nyct2010_gid` UInt8, - `dropoff_ctlabel` Float32, - `dropoff_borocode` UInt8, - `dropoff_ct2010` String, - `dropoff_boroct2010` String, - `dropoff_cdeligibil` String, - `dropoff_ntacode` FixedString(4), - `dropoff_ntaname` String, - `dropoff_puma` UInt16 -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(pickup_date) -ORDER BY pickup_datetime -``` - -`pickup_date`フィールドの[パーティショニング](/engines/table-engines/mergetree-family/custom-partitioning-key)の使用に注意してください。通常、パーティションキーはデータ管理用ですが、後でこのキーを使用してS3への書き込みを並列化します。 - -タクシーデータセット内の各エントリはタクシートリップを含みます。この匿名化されたデータは、S3バケットに圧縮された2000万件のレコードで構成されています。バケットは https://datasets-documentation.s3.eu-west-3.amazonaws.com/ で、フォルダは **nyc-taxi** です。データはTSV形式で、各ファイルに約100万行含まれています。 -### S3からのデータ読み取り {#reading-data-from-s3} - -S3データをソースとしてクエリし、ClickHouseの永続性を必要とせずに使用できます。以下のクエリでは、10行をサンプリングします。バケットが公開アクセス可能であるため、ここでは認証情報が不要です: - -```sql -SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames') -LIMIT 10; -``` - -`TabSeparatedWithNames`フォーマットは、最初の行にカラム名をエンコードしているため、カラムをリストする必要はないことに注意してください。他のフォーマット(例:`CSV`や`TSV`)は、このクエリに対して自動生成されたカラムを返します。例: `c1`、`c2`、`c3`など。 - -クエリは、バケットパスやファイル名に関する情報を提供する[仮想カラム](../sql-reference/table-functions/s3#virtual-columns)をサポートしています。例えば: - -```sql -SELECT _path, _file, trip_id -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_0.gz', 'TabSeparatedWithNames') -LIMIT 5; -``` - -```response -┌─_path──────────────────────────────────────┬─_file──────┬────trip_id─┐ -│ datasets-documentation/nyc-taxi/trips_0.gz │ trips_0.gz │ 1199999902 │ -│ datasets-documentation/nyc-taxi/trips_0.gz │ trips_0.gz │ 1199999919 │ -│ datasets-documentation/nyc-taxi/trips_0.gz │ trips_0.gz │ 1199999944 │ -│ datasets-documentation/nyc-taxi/trips_0.gz │ trips_0.gz │ 1199999969 │ -│ datasets-documentation/nyc-taxi/trips_0.gz │ trips_0.gz │ 1199999990 │ -└────────────────────────────────────────────┴────────────┴────────────┘ -``` - -このサンプルデータセット内の行数を確認します。ファイル展開のためワイルドカードを使用しているため、すべての20ファイルを考慮します。このクエリは、ClickHouseインスタンスのコア数によって約10秒かかります: - -```sql -SELECT count() AS count -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames'); -``` - -```response -┌────count─┐ -│ 20000000 │ -└──────────┘ -``` - -これは、データのサンプリングや即席の探索的クエリを実行する際には便利ですが、S3からデータを直接読み取ることは定期的に行うべきではありません。真剣にデータを扱うときは、データをClickHouseの`MergeTree`テーブルにインポートします。 -### clickhouse-localの使用 {#using-clickhouse-local} - -`clickhouse-local`プログラムを使用すると、ClickHouseサーバーをデプロイおよび構成せずにローカルファイルの高速処理を行うことができます。`s3`テーブル関数を使用するクエリは、このユーティリティを使って実行できます。例えば: - -```sql -clickhouse-local --query "SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames') LIMIT 10" -``` -### S3からのデータ挿入 {#inserting-data-from-s3} - -ClickHouseの機能を最大限に活用するために、次にデータを読み取り、インスタンスに挿入します。これを達成するために、`s3`関数とシンプルな`INSERT`ステートメントを組み合わせます。ターゲットテーブルが必要な構造を提供するため、カラムをリストする必要はありません。カラムは`SELECT`句の位置に従ってマッピングされます。10M行の挿入には数分かかる場合がありますが、下の例では即時の応答を得るために100万行を挿入します。必要に応じて、`LIMIT`句やカラム選択を調整して部分インポートを行います: - -```sql -INSERT INTO trips - SELECT * - FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames') - LIMIT 1000000; -``` -### ClickHouse Localを使用したリモート挿入 {#remote-insert-using-clickhouse-local} - -ネットワークセキュリティポリシーにより、ClickHouseクラスターが外向き接続を行えない場合、`clickhouse-local`を使用してS3データを挿入することができます。以下の例では、S3バケットから読み取り、`remote`関数を使用してClickHouseに挿入します: - -```sql -clickhouse-local --query "INSERT INTO TABLE FUNCTION remote('localhost:9000', 'default.trips', 'username', 'password') (*) SELECT * FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', 'TabSeparatedWithNames') LIMIT 10" -``` - -:::note -これを安全なSSL接続で実行するには、`remoteSecure`関数を利用してください。 -::: -### データのエクスポート {#exporting-data} - -`s3`テーブル関数を使用してS3にファイルを書き込むことができます。これには適切な権限が必要です。リクエスト内で必要な認証情報を渡しますが、詳細については[認証情報の管理](#managing-credentials)ページを参照してください。 - -単純な例では、テーブル関数をソースではなく目的地として使用します。ここでは、`trips`テーブルからバケットに10000行をストリーミングし、`lz4`圧縮および出力タイプとして`CSV`を指定します: - -```sql -INSERT INTO FUNCTION - s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/csv/trips.csv.lz4', - 's3_key', - 's3_secret', - 'CSV' - ) -SELECT * -FROM trips -LIMIT 10000; -``` - -ここで、ファイルの形式は拡張子から推測されることに注意してください。また、`s3`関数内でカラムを指定する必要はありません。これも`SELECT`から推測されます。 -### 大きなファイルを分割する {#splitting-large-files} - -データを単一のファイルとしてエクスポートすることはほとんどないでしょう。ClickHouseを含むほとんどのツールは、並列処理の可能性により、複数のファイルに対して読み書きすることでより高いスループットパフォーマンスを達成します。`INSERT`コマンドを複数回実行し、データのサブセットをターゲットにすることができます。ClickHouseは、`PARTITION`キーを使用してファイルを自動的に分割する手段を提供します。 - -以下の例では、`rand()`関数の剰余を使用して10ファイルを作成します。結果のパーティションIDがファイル名に参照されていることに注意してください。これにより、数値の接尾辞を持つ10ファイル(例:`trips_0.csv.lz4`, `trips_1.csv.lz4`など)が生成されます: - -```sql -INSERT INTO FUNCTION - s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/csv/trips_{_partition_id}.csv.lz4', - 's3_key', - 's3_secret', - 'CSV' - ) - PARTITION BY rand() % 10 -SELECT * -FROM trips -LIMIT 100000; -``` - -あるいは、データ内のフィールドを参照することもできます。このデータセットの場合、`payment_type`が自然なパーティショニングキーを提供し、その基数は5です。 - -```sql -INSERT INTO FUNCTION - s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/csv/trips_{_partition_id}.csv.lz4', - 's3_key', - 's3_secret', - 'CSV' - ) - PARTITION BY payment_type -SELECT * -FROM trips -LIMIT 100000; -``` -### クラスターの利用 {#utilizing-clusters} - -上記の関数はすべて単一ノードでの実行に制限されています。読み取り速度はCPUコアとともに線形にスケールし、他のリソース(通常はネットワーク)が飽和するまで、ユーザーは垂直にスケールすることができます。しかし、このアプローチには限界があります。ユーザーは、`INSERT INTO SELECT`クエリを実行する際に分散テーブルに挿入することでリソース圧力を軽減できますが、依然として単一ノードがデータを読み込み、解析し、処理しています。この課題に対処し、読み取りを水平方向にスケールするために、[s3Cluster](/sql-reference/table-functions/s3Cluster.md)関数があります。 - -クエリを受け取ったノード(イニシエーター)は、クラスタ内のすべてのノードへの接続を作成します。どのファイルを読み取る必要があるかを決定するためにグロブパターンが解決され、一連のファイルに展開されます。イニシエーターは、クラスタ内のノードにファイルを配布します。これらのノードは、読み取りを完了するたびに処理するファイルを要求します。このプロセスにより、読み取りを水平方向にスケールすることができます。 - -`s3Cluster`関数は、単一ノードバリアントと同じ形式を取り、ただしターゲットクラスタを指定する必要があります: - -```sql -s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure) -``` - -* `cluster_name` — リモートおよびローカルサーバーへのアドレスと接続パラメータのセットを構築するために使用されるクラスタの名前。 -* `source` — ファイルまたはファイルのグループへのURL。以下のワイルドカードを読み取り専用モードでサポートします:`*`、`?`、`{'abc','def'}` および `{N..M}`(ここで N, M は数値、abc, def は文字列)。詳細は[パスにおけるワイルドカード](/engines/table-engines/integrations/s3.md/#wildcards-in-path)を参照してください。 -* `access_key_id` および `secret_access_key` — 指定されたエンドポイントで使用するための認証情報を指定するキー。オプションです。 -* `format` — ファイルの[フォーマット](/interfaces/formats#formats-overview)。 -* `structure` — テーブルの構造。形式 'column1_name column1_type, column2_name column2_type, ...'。 - -`s3`関数と同様に、バケットが安全でない場合、または環境を通じてセキュリティを定義する場合(例:IAMロール)、認証情報はオプションです。しかし、22.3.1以降、`s3Cluster`関数では構造をリクエスト内で指定する必要があります。すなわち、スキーマは推測されません。 - -この関数は、ほとんどの場合`INSERT INTO SELECT`の一部として使用されます。この場合、ユーザーは分散テーブルに挿入することがよくあります。以下に、`trips_all`が分散テーブルである簡単な例を示します。このテーブルは`events`クラスタを使用していますが、読み込みと書き込みに使用されるノードの一貫性は必須ではありません: - -```sql -INSERT INTO default.trips_all - SELECT * - FROM s3Cluster( - 'events', - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_*.gz', - 'TabSeparatedWithNames' - ) -``` - -挿入はイニシエーターノードに対して発生します。これは、各ノードで読み取る一方で、結果の行がイニシエーターにルーティングされて配布されることを意味します。高スループットのシナリオでは、これがボトルネックとなる可能性があります。これに対処するために、`s3cluster`関数のための[parallel_distributed_insert_select](/operations/settings/settings/#parallel_distributed_insert_select)パラメータを設定してください。 -## S3テーブルエンジン {#s3-table-engines} - -`s3`関数はS3に保存されたデータに対して即席クエリを実行することを可能にしますが、構文が冗長です。`S3`テーブルエンジンを使用すると、バケットのURLや認証情報を何度も指定する必要がなくなります。これに対処するために、ClickHouseはS3テーブルエンジンを提供しています。 - -```sql -CREATE TABLE s3_engine_table (name String, value UInt32) - ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) - [SETTINGS ...] -``` - -* `path` — ファイルへのパスを含むバケットURL。以下のワイルドカードを読み取り専用モードで支援:`*`、`?`、`{abc,def}` および `{N..M}`(ここで N, M は数値、'abc'、'def' は文字列)。詳細については[こちら](/engines/table-engines/integrations/s3#wildcards-in-path)を参照してください。 -* `format` — ファイルの[フォーマット](/interfaces/formats#formats-overview)。 -* `aws_access_key_id`, `aws_secret_access_key` - AWSアカウントユーザーの長期的な認証情報。これを使用してリクエストを認証できます。このパラメータはオプションです。認証情報が指定されていない場合、構成ファイルでの値が使用されます。詳細は[認証情報の管理](#managing-credentials)を参照してください。 -* `compression` — 圧縮タイプ。サポートされる値:none、gzip/gz、brotli/br、xz/LZMA、zstd/zst。このパラメータはオプションです。デフォルトでは、ファイル拡張子によって圧縮を自動検出します。 -### データの読み取り {#reading-data} - -以下の例では、`https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/`バケット内の最初の10個のTSVファイルを使用して、`trips_raw`という名のテーブルを作成します。これらはそれぞれ100万行を含みます: - -```sql -CREATE TABLE trips_raw -( - `trip_id` UInt32, - `vendor_id` Enum8('1' = 1, '2' = 2, '3' = 3, '4' = 4, 'CMT' = 5, 'VTS' = 6, 'DDS' = 7, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14, '' = 15), - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_date` Date, - `dropoff_datetime` DateTime, - `store_and_fwd_flag` UInt8, - `rate_code_id` UInt8, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `fare_amount` Float32, - `extra` Float32, - `mta_tax` Float32, - `tip_amount` Float32, - `tolls_amount` Float32, - `ehail_fee` Float32, - `improvement_surcharge` Float32, - `total_amount` Float32, - `payment_type_` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), - `trip_type` UInt8, - `pickup` FixedString(25), - `dropoff` FixedString(25), - `cab_type` Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), - `pickup_nyct2010_gid` Int8, - `pickup_ctlabel` Float32, - `pickup_borocode` Int8, - `pickup_ct2010` String, - `pickup_boroct2010` FixedString(7), - `pickup_cdeligibil` String, - `pickup_ntacode` FixedString(4), - `pickup_ntaname` String, - `pickup_puma` UInt16, - `dropoff_nyct2010_gid` UInt8, - `dropoff_ctlabel` Float32, - `dropoff_borocode` UInt8, - `dropoff_ct2010` String, - `dropoff_boroct2010` FixedString(7), - `dropoff_cdeligibil` String, - `dropoff_ntacode` FixedString(4), - `dropoff_ntaname` String, - `dropoff_puma` UInt16 -) ENGINE = S3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_{0..9}.gz', 'TabSeparatedWithNames', 'gzip'); -``` - -最初の10ファイルに制限するために`{0..9}`パターンを使用していることに注意してください。一度作成されると、このテーブルは他のテーブルと同様にクエリできます: - -```sql -SELECT DISTINCT(pickup_ntaname) -FROM trips_raw -LIMIT 10; - -┌─pickup_ntaname───────────────────────────────────┐ -│ Lenox Hill-Roosevelt Island │ -│ Airport │ -│ SoHo-TriBeCa-Civic Center-Little Italy │ -│ West Village │ -│ Chinatown │ -│ Hudson Yards-Chelsea-Flatiron-Union Square │ -│ Turtle Bay-East Midtown │ -│ Upper West Side │ -│ Murray Hill-Kips Bay │ -│ DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill │ -└──────────────────────────────────────────────────┘ -``` -### データの挿入 {#inserting-data} - -`S3`テーブルエンジンは並列読み取りをサポートしています。テーブル定義にグロブパターンが含まれていない場合、書き込みのみがサポートされます。したがって、上記のテーブルは書き込みをブロックします。 - -書き込みを示すために、書き込み可能なS3バケットを指すテーブルを作成します: - -```sql -CREATE TABLE trips_dest -( - `trip_id` UInt32, - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_datetime` DateTime, - `tip_amount` Float32, - `total_amount` Float32 -) ENGINE = S3('/trips.bin', 'Native'); -``` - -```sql -INSERT INTO trips_dest - SELECT - trip_id, - pickup_date, - pickup_datetime, - dropoff_datetime, - tip_amount, - total_amount - FROM trips - LIMIT 10; -``` - -```sql -SELECT * FROM trips_dest LIMIT 5; -``` - -```response -┌────trip_id─┬─pickup_date─┬─────pickup_datetime─┬────dropoff_datetime─┬─tip_amount─┬─total_amount─┐ -│ 1200018648 │ 2015-07-01 │ 2015-07-01 00:00:16 │ 2015-07-01 00:02:57 │ 0 │ 7.3 │ -│ 1201452450 │ 2015-07-01 │ 2015-07-01 00:00:20 │ 2015-07-01 00:11:07 │ 1.96 │ 11.76 │ -│ 1202368372 │ 2015-07-01 │ 2015-07-01 00:00:40 │ 2015-07-01 00:05:46 │ 0 │ 7.3 │ -│ 1200831168 │ 2015-07-01 │ 2015-07-01 00:01:06 │ 2015-07-01 00:09:23 │ 2 │ 12.3 │ -│ 1201362116 │ 2015-07-01 │ 2015-07-01 00:01:07 │ 2015-07-01 00:03:31 │ 0 │ 5.3 │ -└────────────┴─────────────┴─────────────────────┴─────────────────────┴────────────┴──────────────┘ -``` - -行は新しいファイルにのみ挿入できることに注意してください。マージサイクルやファイル分割操作はありません。ファイルが書き込まれると、その後の挿入は失敗します。ユーザーには以下の2つのオプションがあります: - -* 設定 `s3_create_new_file_on_insert=1`を指定してください。これにより、各挿入時に新しいファイルが作成されます。各挿入操作のたびに、数値の接尾辞が各ファイルの末尾に追加されます。上記の例では、次回の挿入により、`trips_1.bin`ファイルが作成されます。 -* 設定 `s3_truncate_on_insert=1`を指定してください。これにより、ファイルが切り詰められ、つまり完了後は新たに挿入された行のみが含まれることになります。 - -これらの設定はデフォルトで0になります。したがって、ユーザーはそのいずれかを設定する必要があります。両方が設定されている場合は、`s3_truncate_on_insert`が優先されます。 - -`S3`テーブルエンジンに関するいくつかの注意点: - -- 従来の`MergeTree`ファミリテーブルとは異なり、`S3`テーブルを削除しても、基になるデータは削除されません。 -- このテーブルタイプの完全な設定は[こちら](/engines/table-engines/integrations/s3.md/#settings)で確認できます。 -- このエンジンを使用する際に注意すべき点は以下の通りです: - * ALTERクエリはサポートされていません - * SAMPLE操作はサポートされていません - * プライマリーまたはスキップのインデックスという概念はありません。 -## 認証情報の管理 {#managing-credentials} - -前の例では、`s3`関数または`S3`テーブル定義内に認証情報を渡しました。これは、稀に使用される場合には受け入れられるかもしれませんが、プロダクションではユーザーには明示的な認証メカニズムが求められる必要があります。これに対処するために、ClickHouseは複数のオプションを提供しています。 - -* **config.xml**または**conf.d**の下にある同等の設定ファイルに接続の詳細を指定します。デビアンパッケージを使用したインストールを前提とした例ファイルの内容は以下の通りです。 - - ```xml - ubuntu@single-node-clickhouse:/etc/clickhouse-server/config.d$ cat s3.xml - - - - https://dalem-files.s3.amazonaws.com/test/ - key - secret - - - - - - ``` - - これらの認証情報は、上記のエンドポイントがリクエストされたURLの正確な接頭辞一致である場合、任意のリクエストに使用されます。また、この例では、アクセスキーおよびシークレットキーの代替として認証ヘッダーを宣言する能力に注意してください。サポートされている設定の完全なリストは[こちら](/engines/table-engines/integrations/s3.md/#settings)で確認できます。 - -* 上記の例は、設定パラメータ `use_environment_credentials`の使用可能性を強調しています。この設定パラメータは、`s3`レベルでグローバルに設定することもできます: - - ```xml - - - true - - - ``` - - この設定は、環境からS3の認証情報を取得しようとする試みをオンにし、IAMロールを介してアクセス可能にします。具体的には、以下の取得順に従って実施されます: - - * 環境変数 `AWS_ACCESS_KEY_ID`、`AWS_SECRET_ACCESS_KEY`および `AWS_SESSION_TOKEN` の検索 - * **$HOME/.aws**におけるチェック - * AWSセキュリティトークンサービスを通じて取得された一時的な認証情報 - すなわち [`AssumeRole`](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) APIを介して。 - * ECS環境変数 `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` または `AWS_CONTAINER_CREDENTIALS_FULL_URI` および `AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN`での認証情報のチェック。 - * これらの認証情報の取得は、[AWS EC2インスタンスメタデータ](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-metadata.html)を介して行われ、[AWS_EC2_METADATA_DISABLED](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html#envvars-list-AWS_EC2_METADATA_DISABLED)がtrueに設定されていない場合。 - - 同様の設定を特定のエンドポイントに対して、同じ接頭辞一致ルールを使用して設定することもできます。 -## パフォーマンスの最適化 {#s3-optimizing-performance} - -S3関数を使用した読み取りおよび挿入の最適化方法については、[専用のパフォーマンスガイド](./performance.md)を参照してください。 -### S3ストレージの調整 {#s3-storage-tuning} - -内部的に、ClickHouseのマージツリーは、[`Wide`および`Compact`](/engines/table-engines/mergetree-family/mergetree.md/#mergetree-data-storage)の2つの主要ストレージフォーマットを使用します。現在の実装は、ClickHouseのデフォルトの動作(設定 `min_bytes_for_wide_part` と `min_rows_for_wide_part` を通じて制御される)を使用していますが、将来的なリリースでは、S3のために動作が異なることが予想されます。例えば、`min_bytes_for_wide_part`のデフォルト値が大きくなり、より`Compact`形式を推奨し、結果としてファイル数が少なくなることが期待されます。ユーザーは、専らS3ストレージを使用する場合にこれらの設定を調整することを望むかもしれません。 -## S3バックにあるMergeTree {#s3-backed-mergetree} - -`s3`関数と関連するテーブルエンジンにより、ユーザーはClickHouseの慣れ親しんだ構文を用いてS3内のデータをクエリできます。しかし、データ管理機能やパフォーマンスに関しては、限界があります。プライマリーインデックスのサポートはなく、キャッシュサポートもありません。ファイルの挿入はユーザーによって管理する必要があります。 - -ClickHouseは、S3が特に「コールド」データに対するクエリパフォーマンスがそれほど重要ではない状況で、ストレージソリューションとして魅力的であり、ユーザーがストレージと計算を分離することを望んでいることを認識しています。この実現を支援するために、MergeTreeエンジンのストレージとしてS3を使用するサポートが提供されます。これにより、ユーザーはS3のスケーラビリティとコストの利点を活用し、MergeTreeエンジンの挿入およびクエリパフォーマンスを享受できます。 -### ストレージ層 {#storage-tiers} - -ClickHouseストレージボリュームは、物理ディスクをMergeTreeテーブルエンジンから抽象化できます。単一のボリュームは、順序付きのディスクセットで構成されることがあります。データストレージのために複数のブロックデバイスを潜在的に使用できることを主に許可するこの抽象化は、S3などの他のストレージタイプも許可します。ClickHouseのデータパーツは、ストレージポリシーに応じてボリューム間で移動され、充填率が管理され、これによりストレージ層の概念が作成されます。 - -ストレージ層は、最近のデータをホットコールドアーキテクチャで解除し、通常は最も多くクエリされるデータが高性能ストレージ(例:NVMe SSD)にわずかな空間を必要とすることを可能にします。データが古くなるにつれて、クエリタイムのSLAが増加し、クエリの頻度も高くなります。このデータのファットテールは、HDDやS3などのオブジェクトストレージのような遅い、性能が低いストレージに保存できます。 -``` -### ディスクの作成 {#creating-a-disk} - -S3バケットをディスクとして利用するには、まずClickHouseの設定ファイルにそれを宣言する必要があります。config.xmlを拡張するか、preferably conf.dの下に新しいファイルを提供してください。以下にS3ディスク宣言の例を示します。 - -```xml - - - ... - - - s3 - https://sample-bucket.s3.us-east-2.amazonaws.com/tables/ - your_access_key_id - your_secret_access_key - - /var/lib/clickhouse/disks/s3/ - - - cache - s3 - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - - - ... - - - -``` - -このディスク宣言に関連する設定の完全なリストは[こちら](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3)で確認できます。資格情報は、[資格情報の管理](#managing-credentials)で説明されているのと同じアプローチを使用してここで管理できます。すなわち、上記の設定ブロックでuse_environment_credentialsをtrueに設定することでIAMロールを使用できます。 - -### ストレージポリシーの作成 {#creating-a-storage-policy} - -設定が完了すると、この「ディスク」はポリシー内で宣言されたストレージボリュームで使用できます。以下の例では、s3が唯一のストレージであると仮定します。これは、TTLや充填率に基づいてデータを移動できるより複雑なホットコールドアーキテクチャを無視しています。 - -```xml - - - - - ... - - - ... - - - - - -
- s3 -
-
-
-
-
-
-``` - -### テーブルの作成 {#creating-a-table} - -ディスクに書き込みアクセスのあるバケットを使用するように設定されている場合、以下の例のようなテーブルを作成できるはずです。簡潔さを持たせるために、NYCタクシーのカラムのサブセットを使用し、データを直接s3バックテーブルにストリーミングします。 - -```sql -CREATE TABLE trips_s3 -( - `trip_id` UInt32, - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_datetime` DateTime, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `tip_amount` Float32, - `total_amount` Float32, - `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4) -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(pickup_date) -ORDER BY pickup_datetime -SETTINGS storage_policy='s3_main' -``` - -```sql -INSERT INTO trips_s3 SELECT trip_id, pickup_date, pickup_datetime, dropoff_datetime, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, passenger_count, trip_distance, tip_amount, total_amount, payment_type FROM s3('https://ch-nyc-taxi.s3.eu-west-3.amazonaws.com/tsv/trips_{0..9}.tsv.gz', 'TabSeparatedWithNames') LIMIT 1000000; -``` - -ハードウェアによっては、この1m行の挿入に数分かかる場合があります。進捗はsystem.processesテーブルを介して確認できます。行数を最大10mまで調整し、いくつかのサンプルクエリを試してください。 - -```sql -SELECT passenger_count, avg(tip_amount) as avg_tip, avg(total_amount) as avg_amount FROM trips_s3 GROUP BY passenger_count; -``` - -### テーブルの変更 {#modifying-a-table} - -ユーザーは特定のテーブルのストレージポリシーを変更する必要がある場合があります。これは可能ですが、制限があります。新しいターゲットポリシーには、以前のポリシーのすべてのディスクとボリュームが含まれている必要があります。すなわち、ポリシーの変更を満たすためにデータは移行されないということです。これらの制約を検証する際には、ボリュームとディスクは名前で識別され、違反しようとするとエラーが発生します。ただし、前の例を使用していると仮定すると、以下の変更は有効です。 - -```xml - - - -
- s3 -
-
-
- - - - default - -
- s3 -
-
- 0.2 -
-
-``` - -```sql -ALTER TABLE trips_s3 MODIFY SETTING storage_policy='s3_tiered' -``` - -ここでは、新しいs3_tieredポリシーにメインボリュームを再利用し、新しいホットボリュームを導入します。これは、``パラメーターを介して設定された1つのディスクで構成されたデフォルトディスクを使用しています。ボリューム名とディスクは変更されません。新たにテーブルに挿入されるものは、デフォルトディスクに残り、そのサイズがmove_factor * disk_sizeに達した時点でデータはS3へ移動されます。 - -### レプリケーションの処理 {#handling-replication} - -S3ディスクとのレプリケーションは、`ReplicatedMergeTree`テーブルエンジンを使用することで実現できます。詳細については[二つのAWSリージョンにまたがる単一シャードをS3オブジェクトストレージで複製する](#s3-multi-region)ガイドを参照してください。 - -### 読み書き {#read--writes} - -以下のノートは、ClickHouseとS3のインタラクションの実装に関するものです。一般的には情報提供を目的としていますが、[パフォーマンスの最適化](#s3-optimizing-performance)を行う際に読者に役立つかもしれません: - -* デフォルトでは、クエリ処理パイプラインの任意のステージで使用される最大クエリ処理スレッド数は、コアの数と等しくなっています。一部のステージは他のステージよりも並行処理が可能であるため、この値は上限を提供します。複数のクエリステージは同時に実行される場合があります。データはディスクからストリームされるため、クエリに使用されるスレッド数はこの上限を超える可能性があります。[max_threads](/operations/settings/settings#max_threads)の設定を通じて変更してください。 -* S3からの読み取りはデフォルトで非同期です。この動作は、`remote_filesystem_read_method`を設定することで決まります。デフォルトではこの値は`threadpool`に設定されています。リクエストを処理する際、ClickHouseはグラニュールをストライプで読み取ります。これらのストライプは多くのカラムを含む可能性があります。スレッドはそれぞれのグラニュールのカラムを一つずつ読み取ります。この処理は同期的には行われず、データを待つ前にすべてのカラムのプレフェッチが行われます。これにより、各カラムに対する同期的待機よりも大幅なパフォーマンス向上が得られます。ほとんどの場合、この設定を変更する必要はありません - [最適化パフォーマンス](#s3-optimizing-performance)を参照してください。 -* 書き込みは並行して行われ、最大100の同時ファイル書き込みスレッドがあります。`max_insert_delayed_streams_for_parallel_write`はデフォルトで1000の値に設定されており、並行して書き込まれるS3ブロブの数を制御します。各ファイルの書き込みにはバッファが必要で(約1MB)、これによりINSERTのメモリ消費が効果的に制限されます。サーバーのメモリが少ないシナリオでは、この値を下げるのが適切かもしれません。 - -## ClickHouse用にS3オブジェクトストレージをディスクとして使用する {#configuring-s3-for-clickhouse-use} - -バケットとIAMロールを作成するためのステップバイステップの指示が必要な場合は、**S3バケットとIAMロールの作成**を展開し、手順に従ってください: - - -### S3バケットをディスクとして使用するようにClickHouseを設定する {#configure-clickhouse-to-use-the-s3-bucket-as-a-disk} -以下の例は、デフォルトのClickHouseディレクトリでサービスとしてインストールされたLinux Debパッケージに基づいています。 - -1. ClickHouseの`config.d`ディレクトリに新しいファイルを作成して、ストレージ設定を保存します。 -```bash -vim /etc/clickhouse-server/config.d/storage_config.xml -``` -2. ストレージ設定のために、先ほどの手順からバケットパス、アクセスキー、およびシークレットキーに置き換えた以下を追加します。 -```xml - - - - - s3 - https://mars-doc-test.s3.amazonaws.com/clickhouse3/ - ABC123 - Abc+123 - /var/lib/clickhouse/disks/s3_disk/ - - - cache - s3_disk - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - - - - - -
- s3_disk -
-
-
-
-
-
-``` - -:::note -``タグ内の`s3_disk`および`s3_cache`タグは任意のラベルです。これらは他の名前に設定できますが、``タブ内の``タブで同じラベルを使用してディスクを参照しなければなりません。 -``タグも任意で、ClickHouseでリソースを作成する際に参照されるストレージターゲットの識別子として使用されるポリシーの名前となります。 - -上記の設定は、ClickHouseバージョン22.8以上用です。古いバージョンを使用している場合は、[データを保存する](/operations/storing-data.md/#using-local-cache)ドキュメントを参照してください。 - -S3を使用するための詳細については: -統合ガイド: [S3バックのMergeTree](#s3-backed-mergetree) -::: - -3. ファイルの所有者を`clickhouse`ユーザーおよびグループに更新します。 -```bash -chown clickhouse:clickhouse /etc/clickhouse-server/config.d/storage_config.xml -``` -4. 変更を有効にするためにClickHouseインスタンスを再起動します。 -```bash -service clickhouse-server restart -``` -### テスト {#testing} -1. ClickHouseクライアントにログインします。以下のようにします。 -```bash -clickhouse-client --user default --password ClickHouse123! -``` -2. 新しいS3ストレージポリシーを指定してテーブルを作成します。 -```sql -CREATE TABLE s3_table1 - ( - `id` UInt64, - `column1` String - ) - ENGINE = MergeTree - ORDER BY id - SETTINGS storage_policy = 's3_main'; -``` - -3. テーブルが正しいポリシーで作成されたことを示します。 -```sql -SHOW CREATE TABLE s3_table1; -``` -```response -┌─statement──────────────────────────────────────────────────── -│ CREATE TABLE default.s3_table1 -( - `id` UInt64, - `column1` String -) -ENGINE = MergeTree -ORDER BY id -SETTINGS storage_policy = 's3_main', index_granularity = 8192 -└────────────────────────────────────────────────────────────── -``` - -4. テスト行をテーブルに挿入します。 -```sql -INSERT INTO s3_table1 - (id, column1) - VALUES - (1, 'abc'), - (2, 'xyz'); -``` -```response -INSERT INTO s3_table1 (id, column1) FORMAT Values - -Query id: 0265dd92-3890-4d56-9d12-71d4038b85d5 - -Ok. - -2 rows in set. Elapsed: 0.337 sec. -``` -5. 行を表示します。 -```sql -SELECT * FROM s3_table1; -``` -```response -┌─id─┬─column1─┐ -│ 1 │ abc │ -│ 2 │ xyz │ -└────┴─────────┘ - -2 rows in set. Elapsed: 0.284 sec. -``` -6. AWSコンソールで、バケットに移動し、新しいバケットとフォルダーを選択します。 -以下のようなものが表示されるはずです。 - - -## S3オブジェクトストレージを使用して単一のシャードを二つのAWSリージョンで複製する {#s3-multi-region} - -:::tip -ClickHouse Cloudではデフォルトでオブジェクトストレージが使用されます。ClickHouse Cloudで実行している場合、この手順を実行する必要はありません。 -::: -### 配備の計画 {#plan-the-deployment} -このチュートリアルは、AWS EC2上の二つのClickHouse Serverノードと三つのClickHouse Keeperノードをスピーディに展開することに基づいています。ClickHouseサーバーのデータストアはS3です。それぞれのリージョンにClickHouse ServerとS3バケットが1つ用意されており、災害復旧をサポートします。 - -ClickHouseテーブルは、二つのサーバー間で複製され、したがって二つのリージョン acrossで複製されます。 -### ソフトウェアのインストール {#install-software} -#### ClickHouseサーバーノード {#clickhouse-server-nodes} -ClickHouseサーバーノードに対するデプロイメント手順を実行する際は、[インストール手順](/getting-started/install/install.mdx)を参照してください。 -#### ClickHouseをデプロイする {#deploy-clickhouse} - -二つのホストにClickHouseをデプロイします。このサンプル設定ではこれらは`chnode1`と`chnode2`と呼ばれています。 - -`chnode1`を一つのAWSリージョンに配置し、`chnode2`を第二のリージョンに配置します。 -#### ClickHouse Keeperをデプロイする {#deploy-clickhouse-keeper} - -三つのホストにClickHouse Keeperをデプロイします。このサンプル設定では、これらは`keepernode1`、`keepernode2`、および`keepernode3`と呼ばれています。 `keepernode1`は`chnode1`と同じリージョンにデプロイできます、`keepernode2`は`chnode2`と一緒に、そして`keepernode3`はどちらのリージョンにもデプロイできますが、そのリージョンのClickHouseノードとは異なる可用性ゾーンになります。 - -ClickHouse Keeperノードのデプロイメント手順を実行する際には[インストール手順](/getting-started/install/install.mdx)を参照してください。 -### S3バケットを作成する {#create-s3-buckets} - -二つのS3バケットを作成します。一つは`chnode1`が配置されているリージョンに、もう一つは`chnode2`が配置されているリージョンに作成します。 - -バケットとIAMロールを作成するためのステップバイステップの指示が必要な場合は、**S3バケットとIAMロールの作成**を展開し、手順に従ってください: - - - -その後、設定ファイルは`/etc/clickhouse-server/config.d/`に配置されます。このバケットのためのサンプル設定ファイルは以下の通りで、もう一つは、三つのハイライトされた行が異なる類似のものです。 - -```xml title="/etc/clickhouse-server/config.d/storage_config.xml" - - - - - s3 - - https://docs-clickhouse-s3.s3.us-east-2.amazonaws.com/clickhouses3/ - ABCDEFGHIJKLMNOPQRST - Tjdm4kf5snfkj303nfljnev79wkjn2l3knr81007 - - /var/lib/clickhouse/disks/s3_disk/ - - - - cache - s3_disk - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - - - - - -
- s3_disk -
-
-
-
-
-
-``` -:::note -このガイドの多くのステップでは、`/etc/clickhouse-server/config.d/`に設定ファイルを配置するように求められます。これはLinuxシステムで設定上書きファイルのデフォルトの場所です。これらのファイルをそのディレクトリに置くと、ClickHouseはコンテンツを使用してデフォルトの設定を上書きします。これらのファイルを上書きディレクトリに配置することで、アップグレード中に設定が失われるのを避けることができます。 -::: -### ClickHouse Keeperを設定する {#configure-clickhouse-keeper} - -ClickHouse Keeperをスタンドアロンで実行する(ClickHouseサーバーとは別に)場合の設定は、単一のXMLファイルです。このチュートリアルでは、ファイルは`/etc/clickhouse-keeper/keeper_config.xml`です。すべての三つのKeeperサーバーは、以下の例のように同じ設定を使用しますが、1つの設定が異なります;``です。 - -`server_id`は、設定ファイルが使用されるホストに割り当てるIDを示します。以下の例では、`server_id`は`3`で、ファイル内の``セクションも見てみると、サーバー3にホスト名`keepernode3`が指定されています。これがClickHouse Keeperプロセスがリーダーを選択する際にどのサーバーに接続するかを知る方法です。 - -```xml title="/etc/clickhouse-keeper/keeper_config.xml" - - - trace - /var/log/clickhouse-keeper/clickhouse-keeper.log - /var/log/clickhouse-keeper/clickhouse-keeper.err.log - 1000M - 3 - - 0.0.0.0 - - 9181 - - 3 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - warning - - - - - 1 - keepernode1 - 9234 - - - 2 - keepernode2 - 9234 - - - - 3 - keepernode3 - 9234 - - - - - -``` - -ClickHouse Keeperの設定ファイルをコピーします(``を設定するのを忘れずに): -```bash -sudo -u clickhouse \ - cp keeper.xml /etc/clickhouse-keeper/keeper.xml -``` -### ClickHouseサーバーを設定する {#configure-clickhouse-server} -#### クラスターの定義 {#define-a-cluster} - -ClickHouseのクラスターは、設定ファイルの``セクションで定義されます。このサンプルでは、`cluster_1S_2R`という1つのクラスターが定義されており、これは一つのシャードと二つのレプリカで構成されています。レプリカは、ホスト`chnode1`と`chnode2`に位置しています。 - -```xml title="/etc/clickhouse-server/config.d/remote-servers.xml" - - - - - - chnode1 - 9000 - - - chnode2 - 9000 - - - - - -``` - -クラスターを使用する際は、DDLクエリにクラスター、シャード、およびレプリカの設定を埋め込むためのマクロを定義すると便利です。このサンプルでは、`shard`および`replica`の詳細を提供せずにレプリケーテッドテーブルエンジンを使用することを指定できます。テーブルを作成すると、`shard`と`replica`のマクロが`system.tables`をクエリすることでどのように使用されるかを見ることができます。 - -```xml title="/etc/clickhouse-server/config.d/macros.xml" - - - /clickhouse/task_queue/ddl - - - cluster_1S_2R - 1 - replica_1 - - -``` -:::note -上記のマクロは`chnode1`用のものです。`chnode2`では`replica`を`replica_2`に設定してください。 -::: -#### ゼロコピーのレプリケーションを無効にする {#disable-zero-copy-replication} - -ClickHouseバージョン22.7以下では、`allow_remote_fs_zero_copy_replication`設定は、デフォルトでS3およびHDFSディスクに対して`true`に設定されています。この設定は、この災害復旧シナリオでは`false`に設定する必要があります。バージョン22.8以降では、デフォルトで`false`に設定されています。 - -この設定は以下の二つの理由から`false`にする必要があります。1) この機能は製品版ではない。2) 災害復旧シナリオでは、データとメタデータの両方を複数のリージョンに保存する必要があります。`allow_remote_fs_zero_copy_replication`を`false`に設定します。 - -```xml title="/etc/clickhouse-server/config.d/remote-servers.xml" - - - false - - -``` - -ClickHouse Keeperは、ClickHouseノード間でのデータのレプリケーションを調整する責任があります。ClickHouseにClickHouse Keeperノードを伝えるために、各ClickHouseノードに設定ファイルを追加します。 - -```xml title="/etc/clickhouse-server/config.d/use_keeper.xml" - - - - keepernode1 - 9181 - - - keepernode2 - 9181 - - - keepernode3 - 9181 - - - -``` -### ネットワーキングの設定 {#configure-networking} - -AWSでサーバーが互いに通信できるようにするためのセキュリティ設定を構成する際は、[ネットワークポート](../../../guides/sre/network-ports.md)リストを確認してください。 - -すべての三つのサーバーは、ネットワーク接続をリッスンしてできるようにする必要があります。それにより、サーバー間とS3との通信が行われます。ClickHouseはデフォルトではループバックアドレス上でのみリッスンするため、これを変更する必要があります。これらの設定は`/etc/clickhouse-server/config.d/`に構成されます。以下は、ClickHouseとClickHouse KeeperがすべてのIP v4インターフェースでリッスンするように設定するサンプルです。詳細については、ドキュメントまたはデフォルト設定ファイル`/etc/clickhouse/config.xml`を参照してください。 - -```xml title="/etc/clickhouse-server/config.d/networking.xml" - - 0.0.0.0 - -``` -### サーバーの起動 {#start-the-servers} -#### ClickHouse Keeperを実行する {#run-clickhouse-keeper} - -各Keeperサーバーで、オペレーティングシステムに応じてコマンドを実行します。例: - -```bash -sudo systemctl enable clickhouse-keeper -sudo systemctl start clickhouse-keeper -sudo systemctl status clickhouse-keeper -``` -#### ClickHouse Keeperのステータスを確認する {#check-clickhouse-keeper-status} - -`netcat`を使ってClickHouse Keeperにコマンドを送ります。例えば、`mntr`はClickHouse Keeperクラスターの状態を返します。各Keeperノードでこのコマンドを実行すると、1つがリーダーであり、他の2つがフォロワーであることがわかります。 - -```bash -echo mntr | nc localhost 9181 -``` -```response -zk_version v22.7.2.15-stable-f843089624e8dd3ff7927b8a125cf3a7a769c069 -zk_avg_latency 0 -zk_max_latency 11 -zk_min_latency 0 -zk_packets_received 1783 -zk_packets_sent 1783 - -# highlight-start -zk_num_alive_connections 2 -zk_outstanding_requests 0 -zk_server_state leader - -# highlight-end -zk_znode_count 135 -zk_watch_count 8 -zk_ephemerals_count 3 -zk_approximate_data_size 42533 -zk_key_arena_size 28672 -zk_latest_snapshot_size 0 -zk_open_file_descriptor_count 182 -zk_max_file_descriptor_count 18446744073709551615 - -# highlight-start -zk_followers 2 -zk_synced_followers 2 - -# highlight-end -``` -#### ClickHouseサーバーを実行する {#run-clickhouse-server} - -各ClickHouseサーバーで実行します。 - -```bash -sudo service clickhouse-server start -``` -#### ClickHouseサーバーを確認する {#verify-clickhouse-server} - -[クラスター設定](#define-a-cluster)を追加した際に、二つのClickHouseノードにまたがる単一シャードが複製されるようになりました。この確認ステップでは、ClickHouseが起動した時にクラスタが構築されたことを確認し、そのクラスターを使用して複製されたテーブルを作成します。 -- クラスタが存在することを確認します: - ```sql - show clusters - ``` - ```response - ┌─cluster───────┐ - │ cluster_1S_2R │ - └───────────────┘ - - 1 row in set. Elapsed: 0.009 sec. ` - ``` - -- `ReplicatedMergeTree`テーブルエンジンを使用してクラスター内にテーブルを作成します: - ```sql - create table trips on cluster 'cluster_1S_2R' ( - `trip_id` UInt32, - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_datetime` DateTime, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `tip_amount` Float32, - `total_amount` Float32, - `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4)) - ENGINE = ReplicatedMergeTree - PARTITION BY toYYYYMM(pickup_date) - ORDER BY pickup_datetime - SETTINGS storage_policy='s3_main' - ``` - ```response - ┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ - │ chnode1 │ 9000 │ 0 │ │ 1 │ 0 │ - │ chnode2 │ 9000 │ 0 │ │ 0 │ 0 │ - └─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘ - ``` -- 先に定義されたマクロの使用を理解する - - マクロ `shard` と `replica` は[前述の通り](#define-a-cluster)で定義されており、ハイライトされた行でクリックハウスノードごとに値が置き換えられるのがわかります。さらに、値 `uuid` が使用されます。`uuid` はシステムによって生成されるため、マクロには定義されません。 - ```sql - SELECT create_table_query - FROM system.tables - WHERE name = 'trips' - FORMAT Vertical - ``` - ```response - Query id: 4d326b66-0402-4c14-9c2f-212bedd282c0 - - Row 1: - ────── - create_table_query: CREATE TABLE default.trips (`trip_id` UInt32, `pickup_date` Date, `pickup_datetime` DateTime, `dropoff_datetime` DateTime, `pickup_longitude` Float64, `pickup_latitude` Float64, `dropoff_longitude` Float64, `dropoff_latitude` Float64, `passenger_count` UInt8, `trip_distance` Float64, `tip_amount` Float32, `total_amount` Float32, `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4)) - # highlight-next-line - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') - PARTITION BY toYYYYMM(pickup_date) ORDER BY pickup_datetime SETTINGS storage_policy = 's3_main' - - 1 row in set. Elapsed: 0.012 sec. - ``` - :::note - 上記に示されるzookeeperのパス`'clickhouse/tables/{uuid}/{shard}`は、`default_replica_path`および`default_replica_name`を設定することでカスタマイズできます。詳細は[こちら](/operations/server-configuration-parameters/settings.md/#default_replica_path)をご覧ください。 - ::: -### テスト {#testing-1} - -これらのテストは、データが二つのサーバー間で複製されていること、そしてそれがローカルディスクではなくS3バケットに格納されていることを確認します。 - -- ニューヨーク市タクシーデータセットからデータを追加します: - ```sql - INSERT INTO trips - SELECT trip_id, - pickup_date, - pickup_datetime, - dropoff_datetime, - pickup_longitude, - pickup_latitude, - dropoff_longitude, - dropoff_latitude, - passenger_count, - trip_distance, - tip_amount, - total_amount, - payment_type - FROM s3('https://ch-nyc-taxi.s3.eu-west-3.amazonaws.com/tsv/trips_{0..9}.tsv.gz', 'TabSeparatedWithNames') LIMIT 1000000; - ``` -- データがS3に保存されていることを確認します。 - - このクエリはディスク上のデータのサイズと、どのディスクが使用されるかを決定するポリシーを表示します。 - ```sql - SELECT - engine, - data_paths, - metadata_path, - storage_policy, - formatReadableSize(total_bytes) - FROM system.tables - WHERE name = 'trips' - FORMAT Vertical - ``` - ```response - Query id: af7a3d1b-7730-49e0-9314-cc51c4cf053c - - Row 1: - ────── - engine: ReplicatedMergeTree - data_paths: ['/var/lib/clickhouse/disks/s3_disk/store/551/551a859d-ec2d-4512-9554-3a4e60782853/'] - metadata_path: /var/lib/clickhouse/store/e18/e18d3538-4c43-43d9-b083-4d8e0f390cf7/trips.sql - storage_policy: s3_main - formatReadableSize(total_bytes): 36.42 MiB - - 1 row in set. Elapsed: 0.009 sec. - ``` - - ローカルディスク上のデータのサイズを確認します。上記から、保存されたミリオンズ行のディスク上のサイズは36.42 MiBです。これはS3に保存されているはずで、ローカルディスクには保存されていません。このクエリは、ローカルディスクでデータとメタデータが保存されている場所も教えてくれます。ローカルデータを確認します: - ```response - root@chnode1:~# du -sh /var/lib/clickhouse/disks/s3_disk/store/551 - 536K /var/lib/clickhouse/disks/s3_disk/store/551 - ``` - - 各S3バケット内のS3データを確認します(合計は表示されませんが、両方のバケットには約36 MiBが保存されるはずです)。 - - - - -## S3Express {#s3express} - -[S3Express](https://aws.amazon.com/s3/storage-classes/express-one-zone/)は、Amazon S3の新しい高性能シングルアベイラビリティゾーンストレージクラスです。 - -この[ブログ](https://aws.amazon.com/blogs/storage/clickhouse-cloud-amazon-s3-express-one-zone-making-a-blazing-fast-analytical-database-even-faster/)では、ClickHouseでのS3Expressテストについての私たちの経験を読めます。 - -:::note - S3Expressは、単一のAZ内にデータを保存します。つまり、AZの停止の場合、データが利用できなくなります。 -::: -### S3ディスク {#s3-disk} - -S3Expressバケットでストレージをサポートするテーブルを作成するには、以下の手順が必要です。 - -1. `Directory`タイプのバケットを作成します。 -2. S3ユーザーに必要なすべての権限を付与するために適切なバケットポリシーをインストールします(例えば、 `"Action": "s3express:*"` を指定して無制限のアクセスを許可する)。 -3. ストレージポリシーを設定する際には、`region` パラメータを指定してください。 - -ストレージ構成は通常のS3と同じで、例えば次のようになります。 - -``` sql - - - - s3 - https://my-test-bucket--eun1-az1--x-s3.s3express-eun1-az1.eu-north-1.amazonaws.com/store/ - eu-north-1 - ... - ... - - - - - -
- s3_express -
-
-
-
-
-``` - -その後、新しいストレージにテーブルを作成します。 - -``` sql -CREATE TABLE t -( - a UInt64, - s String -) -ENGINE = MergeTree -ORDER BY a -SETTINGS storage_policy = 's3_express'; -``` -### S3ストレージ {#s3-storage} - -S3ストレージもサポートされていますが、`Object URL`パスのみ対応しています。例: - -``` sql -select * from s3('https://test-bucket--eun1-az1--x-s3.s3express-eun1-az1.eu-north-1.amazonaws.com/file.csv', ...) -``` - -設定ではバケットのリージョンも指定する必要があります。 - -``` xml - - - https://test-bucket--eun1-az1--x-s3.s3express-eun1-az1.eu-north-1.amazonaws.com - eu-north-1 - - -``` -### バックアップ {#backups} - -上記で作成したディスクにバックアップを保存することが可能です。 - -``` sql -BACKUP TABLE t TO Disk('s3_express', 't.zip') - -┌─id───────────────────────────────────┬─status─────────┐ -│ c61f65ac-0d76-4390-8317-504a30ba7595 │ BACKUP_CREATED │ -└──────────────────────────────────────┴────────────────┘ -``` - -``` sql -RESTORE TABLE t AS t_restored FROM Disk('s3_express', 't.zip') - -┌─id───────────────────────────────────┬─status───┐ -│ 4870e829-8d76-4171-ae59-cffaf58dea04 │ RESTORED │ -└──────────────────────────────────────┴──────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md.hash deleted file mode 100644 index ba45c51a6dc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -193fa9bb98ae02d6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md deleted file mode 100644 index c87cc3f386f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -slug: '/integrations/s3/performance' -sidebar_position: 2 -sidebar_label: 'パフォーマンスの最適化' -title: 'S3挿入および読み取りパフォーマンスの最適化' -description: 'S3読み取りおよび挿入のパフォーマンスの最適化' ---- - -import Image from '@theme/IdealImage'; -import InsertMechanics from '@site/static/images/integrations/data-ingestion/s3/insert_mechanics.png'; -import Pull from '@site/static/images/integrations/data-ingestion/s3/pull.png'; -import Merges from '@site/static/images/integrations/data-ingestion/s3/merges.png'; -import ResourceUsage from '@site/static/images/integrations/data-ingestion/s3/resource_usage.png'; -import InsertThreads from '@site/static/images/integrations/data-ingestion/s3/insert_threads.png'; -import S3Cluster from '@site/static/images/integrations/data-ingestion/s3/s3Cluster.png'; -import HardwareSize from '@site/static/images/integrations/data-ingestion/s3/hardware_size.png'; - -このセクションでは、S3からデータを読み込みおよび挿入する際のパフォーマンス最適化に焦点を当てています。[s3テーブル関数](/sql-reference/table-functions/s3)を使用します。 - -:::info -**このガイドで説明されているレッスンは、[GCS](/sql-reference/table-functions/gcs)や[Azure Blobストレージ](/sql-reference/table-functions/azureBlobStorage)のような、専用のテーブル関数を持つ他のオブジェクトストレージの実装にも適用できます。** -::: - -挿入パフォーマンスを向上させるためにスレッドやブロックサイズを調整する前に、ユーザーはS3への挿入メカニズムを理解することをお勧めします。挿入メカニズムに慣れているか、クイックなヒントが欲しい場合は、[以下の例](/integrations/s3/performance#example-dataset)に飛ばしてください。 -## 挿入メカニズム(単一ノード) {#insert-mechanics-single-node} - -ハードウェアサイズに加え、ClickHouseのデータ挿入メカニズムのパフォーマンスとリソース使用に影響を与える主な要因は、**挿入ブロックサイズ**と**挿入の並行性**の二つです。 -### 挿入ブロックサイズ {#insert-block-size} - - - -`INSERT INTO SELECT`を実行する際、ClickHouseはデータの一部を受信し、①受信したデータから(少なくとも)1つのメモリ内挿入ブロックを形成します([パーティショニングキー](/engines/table-engines/mergetree-family/custom-partitioning-key)ごとに)。ブロックのデータはソートされ、テーブルエンジン固有の最適化が適用されます。その後、データは圧縮され、②新しいデータパーツの形でデータベースストレージに書き込まれます。 - -挿入ブロックサイズはClickHouseサーバーの[ディスクファイルI/O使用量](https://en.wikipedia.org/wiki/Category:Disk_file_systems)とメモリ使用量の両方に影響します。大きな挿入ブロックはより多くのメモリを使用しますが、初期パーツは大きく少なく生成されます。ClickHouseが大量のデータを読み込むために作成する必要があるパーツが少ないほど、ディスクファイルI/Oと自動的な[バックグラウンドマージ](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part1#more-parts--more-background-part-merges)が少なくなります。 - -`INSERT INTO SELECT`クエリをインテグレーションテーブルエンジンまたはテーブル関数と組み合わせて使用する際、データはClickHouseサーバーによってプルされます: - - - -データが完全に読み込まれるまで、サーバーは次のループを実行します: - -```bash -① 次の未処理データの部分をプルして解析し、そこからメモリ内データブロックを形成します(パーティショニングキーごとに1つ)。 - -② ストレージの新しいパーツにブロックを書き込みます。 - -次へ ① -``` - -①では、サイズは挿入ブロックサイズに依存し、次の2つの設定で制御できます: - -- [`min_insert_block_size_rows`](/operations/settings/settings#min_insert_block_size_rows)(デフォルト:`1048545`行) -- [`min_insert_block_size_bytes`](/operations/settings/settings#min_insert_block_size_bytes)(デフォルト:`256 MiB`) - -挿入ブロックに指定された行数が収集されるか、設定された量のデータに達すると(どちらか早い方が先に発生する場合)、これによりブロックが新しいパートに書き込まれるトリガーとなります。挿入ループは①のステップで続行されます。 - -`min_insert_block_size_bytes`の値は、未圧縮のメモリ内ブロックサイズを示し(圧縮されたディスク上のパートサイズではありません)、また作成されるブロックとパーツは、ClickHouseが行-[ブロック](/operations/settings/settings#max_block_size)単位でデータをストリーム処理および[処理](https://clickhouse.com/company/events/query-performance-introspection)するため、設定された行数またはバイト数を正確に含むことは稀であることに留意してください。したがって、これらの設定は最小閾値を示しています。 -#### マージに注意 {#be-aware-of-merges} - -設定された挿入ブロックサイズが小さいほど、大量のデータロードの際に生成される初期パーツが多く、データ取り込みと並行してより多くのバックグラウンドパートマージが実行されることになります。これによりリソースの競合(CPUとメモリ)が発生し、取り込みが終了した後に[健康的な](/operations/settings/merge-tree-settings#parts_to_throw_insert)(3000)のパーツ数に達するのに追加の時間が必要になる場合があります。 - -:::important -パーツ数が[推奨限度](/operations/settings/merge-tree-settings#parts_to_throw_insert)を超えると、ClickHouseのクエリパフォーマンスに悪影響が及びます。 -::: - -ClickHouseは、二つのパーツが圧縮サイズ約150 GiBに達するまで、継続的に[マージを行います](https://clickhouse.com/blog/asynchronous-data-inserts-in-clickhouse#data-needs-to-be-batched-for-optimal-performance)。以下の図は、ClickHouseサーバーがパーツをマージする方法を示しています: - - - -単一のClickHouseサーバーは、いくつかの[バックグラウンドマージスレッド](/operations/server-configuration-parameters/settings#background_pool_size)を利用して並行して[パートマージ](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part1#more-parts--more-background-part-merges:~:text=to%20execute%20concurrent-,part%20merges,-.%20Each%20thread%20executes)を実行します。各スレッドは次のループを実行します: - -```bash -① 次にマージするパーツを決定し、それらのパーツをメモリ内のブロックとしてロードします。 - -② メモリ内のロードされたブロックを大きなブロックにマージします。 - -③ マージしたブロックを新しいパーツとしてディスクに書き込みます。 - -次へ ① -``` - -[増加する](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part1#hardware-size) CPUコアの数およびRAMのサイズは、バックグラウンドマージスループットを増加させます。 - -大きなパーツにマージされたパーツは[非活性](/operations/system-tables/parts)としてマークされ、最終的には[設定可能な](/operations/settings/merge-tree-settings#old_parts_lifetime)分の分だけの分数が経過した後に削除されます。時間が経つにつれて、マージされたパーツのツリーが作成されます(そのため、[`MergeTree`](/engines/table-engines/mergetree-family)テーブルと呼ばれます)。 -### 挿入の並行性 {#insert-parallelism} - - - -ClickHouseサーバーはデータを並行して処理および挿入できます。挿入の並行性のレベルは、ClickHouseサーバーの取り込みスループットとメモリ使用量に影響を与えます。データを並行してロードおよび処理するにはより多くのメインメモリが必要ですが、データがより迅速に処理されるため、取り込みスループットは向上します。 - -s3のようなテーブル関数は、グロブパターンを通じて読み込むファイル名のセットを指定することを可能にします。グロブパターンが複数の既存ファイルと一致した場合、ClickHouseはこれらのファイルの間および内部での読み取りを並列化し、並行してテーブルにデータを挿入するために、並列実行される挿入スレッドを使用します(サーバーごとに): - - - -すべてのファイルからのデータが処理されるまで、各挿入スレッドは次のループを実行します: - -```bash -① 未処理のファイルデータの次の部分を取得し(部分のサイズは設定されたブロックサイズに基づく)、それからメモリ内データブロックを作成します。 - -② ブロックを新しいパーツにストレージに書き込みます。 - -次へ ①. -``` - -このような並行挿入スレッドの数は[`max_insert_threads`](/operations/settings/settings#max_insert_threads)設定で構成できます。オープンソースのClickHouseのデフォルト値は`1`、[ClickHouse Cloud](https://clickhouse.com/cloud)のデフォルト値は`4`です。 - -ファイルの数が多い場合、複数の挿入スレッドによる並行処理がうまく機能し、利用可能なCPUコアとネットワーク帯域幅(並行ファイルダウンロード用)を完全に飽和させることができます。わずか数個の大きなファイルをテーブルに読み込む場合、ClickHouseは自動的に高いデータ処理並行性を確立し、大きなファイル内の異なる範囲を並行して読み取り(ダウンロード)するために各挿入スレッドごとに追加のリーダースレッドを生成してネットワーク帯域幅の使用を最適化します。 - -s3関数とテーブルの場合、個々のファイルの並列ダウンロードは、[max_download_threads](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_threads)および[max_download_buffer_size](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_buffer_size)の値によって決まります。ファイルのサイズが`2 * max_download_buffer_size`を超えない限り、ファイルは並列にダウンロードされません。デフォルトでは、`max_download_buffer_size`のデフォルトは10MiBに設定されています。場合によっては、このバッファサイズを50 MB(`max_download_buffer_size=52428800`)に安全に増やすことで、各ファイルが単一のスレッドによってダウンロードされることを保証できます。これにより、各スレッドがS3コールを行う時間が短縮され、これによりS3の待機時間も短縮されます。さらに、並列読み込みに対してサイズが小さすぎるファイルに対しては、ClickHouseが非同期でこのようなファイルを事前に読み込むことでスループットを増加させます。 -## パフォーマンスの測定 {#measuring-performance} - -S3テーブル関数を使用したクエリのパフォーマンスを最適化することは、データがそのまま存在するクエリを実行する場合、すなわちClickHouseのコンピュートのみを使用し、データがS3にその元の形式で残る場合、およびS3からClickHouse MergeTreeテーブルエンジンにデータを挿入する際に必要です。指定がない限り、以下の推奨事項は両方のシナリオに適用されます。 -## ハードウェアサイズの影響 {#impact-of-hardware-size} - - - -使用可能なCPUコアの数とRAMのサイズは、次に影響します: - -- サポートされる[初期パーツサイズ](#insert-block-size) -- 可能な[挿入並行性](#insert-parallelism) -- [バックグラウンドパートマージ](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part1#more-parts--more-background-part-merges)のスループット - -したがって、全体的な取り込みスループットに影響します。 -## リージョンのローカリティ {#region-locality} - -バケットがClickHouseインスタンスと同じリージョンにあることを確認してください。この単純な最適化は、特にClickHouseインスタンスをAWSのインフラストラクチャにデプロイした場合、スループットパフォーマンスを劇的に向上させることができます。 -## フォーマット {#formats} - -ClickHouseは、`s3`関数と`S3`エンジンを使用して、S3バケットに保存されたファイルを[サポートされているフォーマット](/interfaces/formats#formats-overview)で読み取ることができます。生のファイルを読み込む場合、これらのフォーマットのいくつかには明確な利点があります: - -* Native、Parquet、CSVWithNames、TabSeparatedWithNamesなどのエンコード済みカラム名を持つフォーマットは、ユーザーが`s3`関数でカラム名を指定する必要がないため、クエリが冗長になりにくいです。カラム名はこの情報を推測可能にします。 -* フォーマット間の読み取りおよび書き込みスループットにおけるパフォーマンスの差があります。NativeとParquetはすでに列指向であり、よりコンパクトなため、読み取りパフォーマンスにとって最も最適なフォーマットを表します。Nativeフォーマットは、ClickHouseがメモリ内にデータを格納する方法と整合性があるため、このため、ClickHouseにストリームされるデータの処理オーバーヘッドが削減されます。 -* ブロックサイズが大きなファイルの読み取りの待機時間にしばしば影響します。これは、データの一部のみをサンプリングする場合(例:上位N行を返す場合)に非常に明らかです。CSVやTSVのようなフォーマットでは、行セットを返すためにファイルを解析する必要があります。NativeやParquetのようなフォーマットは、結果的により迅速にサンプリングを可能にします。 -* 各圧縮フォーマットには利点と欠点があり、スピードとエクスパクションバイアスの圧縮レベルをバランスさせます。CSVやTSVのような生のファイルを圧縮する場合、lz4は圧縮レベルを犠牲にして最も迅速な解凍パフォーマンスを提供します。Gzipは通常、わずかに遅い読み取り速度の代償としてより良好に圧縮されます。Xzは、通常は圧縮および解凍パフォーマンスが遅い代わりに最良の圧縮を提供します。エクスポートの場合、Gzとlz4は比較可能な圧縮速度を提供します。これは接続速度に対抗してバランスを取ってください。より高速な解凍または圧縮から得られる利点は、S3バケットへの接続が遅ければ簡単に打ち消されてしまいます。 -* NativeやParquetのようなフォーマットでは圧縮のオーバーヘッドを正当化することは通常ありません。これらのフォーマットは本質的にコンパクトであるため、データサイズの削減はわずかです。圧縮と解凍にかかる時間は、ネットワーク転送時間を補うことは滅多にありません - 特にS3はグローバルに利用可能で高いネットワーク帯域を持っています。 -## 例となるデータセット {#example-dataset} - -さらなる潜在的な最適化を示すために、[Stack Overflowデータセットの投稿](/data-modeling/schema-design#stack-overflow-dataset)を使用します - このデータのクエリと挿入パフォーマンスの両方を最適化します。 - -このデータセットは、2008年7月から2024年3月までの毎月の1つのParquetファイルで構成され、合計189ファイルです。 - -パフォーマンスのためにParquetを使用し、[上記の推奨](/formats)に従い、バケットと同じリージョンにあるClickHouseクラスターで全てのクエリを実行します。このクラスターは、32GiBのRAMと8つのvCPUを各ノードに持つ3つのノードから構成されています。 - -調整を行わずに、このデータセットをMergeTreeテーブルエンジンに挿入するパフォーマンスを示すとともに、最も質問しているユーザーを計算するためのクエリを実行します。これらのクエリは意図的にデータ全体のスキャンを必要とします。 - -```sql --- トップユーザー名 -SELECT - OwnerDisplayName, - count() AS num_posts -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -WHERE OwnerDisplayName NOT IN ('', 'anon') -GROUP BY OwnerDisplayName -ORDER BY num_posts DESC -LIMIT 5 - -┌─OwnerDisplayName─┬─num_posts─┐ -│ user330315 │ 10344 │ -│ user4039065 │ 5316 │ -│ user149341 │ 4102 │ -│ user529758 │ 3700 │ -│ user3559349 │ 3068 │ -└──────────────────┴───────────┘ - -5 rows in set. Elapsed: 3.013 sec. Processed 59.82 million rows, 24.03 GB (19.86 million rows/s., 7.98 GB/s.) -Peak memory usage: 603.64 MiB. - --- posts テーブルにロード -INSERT INTO posts SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') - -0 rows in set. Elapsed: 191.692 sec. Processed 59.82 million rows, 24.03 GB (312.06 thousand rows/s., 125.37 MB/s.) -``` - -例ではいくつかの行のみを返しています。大規模なデータをクライアントに返す`SELECT`クエリのパフォーマンスを測定する際は、[nullフォーマット](/interfaces/formats/#null)をクエリに使用するか、結果を[`Null`エンジン](/engines/table-engines/special/null.md)に直接送信することをお勧めします。これにより、クライアントがデータの量に圧倒されてネットワークが飽和するのを避けることができます。 - -:::info -クエリから読み取る場合、最初のクエリは同じクエリを繰り返すよりも遅く見えることがよくあります。これは、S3自身のキャッシングと、[ClickHouseスキーマ推論キャッシュ](/operations/system-tables/schema_inference_cache)に起因する可能性があります。これにより、ファイルの推測されたスキーマが保存され、以降のアクセス時に推測ステップをスキップできるため、クエリ時間が短縮されます。 -::: -## 読み取りのためのスレッドの使用 {#using-threads-for-reads} - -S3での読み取りパフォーマンスは、ネットワーク帯域幅やローカルI/Oによって制限されない限り、コア数に応じて線形にスケールします。スレッドの数を増やすことは、ユーザーが意識すべきメモリオーバーヘッドの変動があります。読み取りスループットパフォーマンスを改善するために次の項目を変更できます: - -* 通常、`max_threads`のデフォルト値は、すなわちコアの数として十分です。クエリに使用するメモリ量が多く、これを削減する必要がある場合、または結果の`LIMIT`が少ない場合、この値は低く設定できます。十分なメモリを持っているユーザーは、この値を上げてS3からの読み取りスループットを向上させることを試みるかもしれません。通常これは、コ ア数が少ないマシン(例:10未満)でのみ有益です。さらに並列化を進める利点は、通常は他のリソースがボトルネックとして機能する場合には減少します。例えば、ネットワークおよびCPUの競合です。 -* ClickHouseの22.3.1以前のバージョンでは、`s3`関数または`S3`テーブルエンジンを使用する場合にのみ、複数のファイル全体での読み取りを並列化しました。これにより、ユーザーはS3でファイルがチャンクに分割され、最適な読み取りパフォーマンスを得るためにグロブパターンを使用して読み取られることを確認する必要がありました。後のバージョンでは、ファイル内でのダウンロードも並列化されます。 -* スレッド数が少ないシナリオでは、ユーザーは`remote_filesystem_read_method`を"read"に設定することで、S3からファイルを同期的に読み取ることができる利点を得られるかもしれません。 -* s3関数とテーブルの場合、個々のファイルの並列ダウンロードは、[`max_download_threads`](/operations/settings/settings#max_download_threads)および[`max_download_buffer_size`](/operations/settings/settings#max_download_buffer_size)の値によって決定されます。[`max_download_threads`](/operations/settings/settings#max_download_threads)がスレッドの数を制御しますが、ファイルはサイズが`2 * max_download_buffer_size`を超えない限り並列でダウンロードされません。デフォルトで`max_download_buffer_size`のデフォルト値は10MiBに設定されています。場合によっては、このバッファサイズを50 MB(`max_download_buffer_size=52428800`)に安全に増やすことができ、小さなファイルを単一のスレッドでのみダウンロードすることが保証されます。これにより、各スレッドのS3コールに費やされる時間が短縮され、S3の待機時間も短縮されます。この件についての[このブログ投稿](https://clickhouse.com/blog/clickhouse-1-trillion-row-challenge)を参照してください。 - -パフォーマンスを改善するために変更を加える前に、適切に測定することを確認してください。S3 APIコールはレイテンシーに敏感であり、クライアントのタイミングに影響を与える可能性があるため、パフォーマンス指標にはクエリログを使用してください。すなわち、`system.query_log`。 - -以前のクエリを考慮し、`max_threads`を`16`に倍増させることで(デフォルトの`max_thread`はノードあたりのコア数です)、読み取りクエリのパフォーマンスが2倍になり、より多くのメモリを消費することが分かりました。さらに`max_threads`を増やすことには収益の減少があります。 - -```sql -SELECT - OwnerDisplayName, - count() AS num_posts -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -WHERE OwnerDisplayName NOT IN ('', 'anon') -GROUP BY OwnerDisplayName -ORDER BY num_posts DESC -LIMIT 5 -SETTINGS max_threads = 16 - -┌─OwnerDisplayName─┬─num_posts─┐ -│ user330315 │ 10344 │ -│ user4039065 │ 5316 │ -│ user149341 │ 4102 │ -│ user529758 │ 3700 │ -│ user3559349 │ 3068 │ -└──────────────────┴───────────┘ - -5 rows in set. Elapsed: 1.505 sec. Processed 59.82 million rows, 24.03 GB (39.76 million rows/s., 15.97 GB/s.) -Peak memory usage: 178.58 MiB. - -SETTINGS max_threads = 32 - -5 rows in set. Elapsed: 0.779 sec. Processed 59.82 million rows, 24.03 GB (76.81 million rows/s., 30.86 GB/s.) -Peak memory usage: 369.20 MiB. - -SETTINGS max_threads = 64 - -5 rows in set. Elapsed: 0.674 sec. Processed 59.82 million rows, 24.03 GB (88.81 million rows/s., 35.68 GB/s.) -Peak memory usage: 639.99 MiB. -``` -## 挿入のためのスレッドとブロックサイズの調整 {#tuning-threads-and-block-size-for-inserts} - -最大の取り込みパフォーマンスを達成するには、(1) 挿入ブロックサイズ、(2) 利用可能なCPUコアとRAMに基づく適切な挿入並行性のレベルを選択する必要があります。まとめると: - -- [挿入ブロックサイズ](#insert-block-size)を大きく設定するほど、ClickHouseが作成する必要のあるパーツが少なくなり、必要な[ディスクファイルI/O](https://en.wikipedia.org/wiki/Category:Disk_file_systems)と[バックグラウンドマージ](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part1#more-parts--more-background-part-merges)が減ります。 -- [並行挿入スレッドの数](#insert-parallelism)を多く設定するほど、データがより速く処理されます。 - -これら二つのパフォーマンス要因の間には、対立するトレードオフが存在します(および背景部分マージとのトレードオフも存在します)。ClickHouseサーバーのメインメモリの量は制限されています。大きなブロックはより多くのメインメモリを使用し、そのため並行に利用できる挿入スレッドの数が制限されます。逆に、より多くの並行挿入スレッドを使用するほど、メインメモリが多く必要とされ、挿入スレッドの数が同時にメモリ内で作成される挿入ブロックの数を決定するため、挿入ブロックサイズの制限が生じます。さらに、挿入スレッドとバックグラウンドマージスレッドの間にはリソースの競合が生じる可能性があります。設定された数の挿入スレッドが多くなると(1) マージする必要のあるパーツが増え、(2) バックグラウンドマージスレッドからCPUコアとメモリスペースが奪われます。 - -これらのパラメータの挙動がパフォーマンスとリソースに与える影響の詳細な説明については、[このブログ投稿](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2)を読むことをお勧めします。ブログ記事でも説明されているように、調整はこれらの二つのパラメータのバランスを注意深く取ることを含むことがあります。このような徹底したテストはしばしば実用的ではないため、まとめると、次のことをお勧めします: - -```bash -• max_insert_threads: 挿入スレッド用に利用可能なCPUコアの約半分を選択します(バックグラウンドマージ用に十分なコアを残すため)。 - -• peak_memory_usage_in_bytes: 計画されたピークメモリ使用量を選択します。孤立した取り込みの場合は全メモリ(利用可能なRAMのすべて)またはその他のタスクのためにスペースを確保するために半分まで(あるいはそれ以下)を選択します。 - -その後: -min_insert_block_size_bytes = peak_memory_usage_in_bytes / (~3 * max_insert_threads) -``` - -この公式により、`min_insert_block_size_rows`を0に設定して(行ベースの閾値を無効化)、`max_insert_threads`を選択した値に設定し、`min_insert_block_size_bytes`を上記の公式から計算した結果に設定できます。 - -この公式を以前のStack Overflowの例に適用します。 - -- `max_insert_threads=4`(ノードあたり8コア) -- `peak_memory_usage_in_bytes` - 32 GiB(ノードリソースの100%)つまり、`34359738368`バイト。 -- `min_insert_block_size_bytes` = `34359738368/(3*4) = 2863311530` - -```sql -INSERT INTO posts SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') SETTINGS min_insert_block_size_rows=0, max_insert_threads=4, min_insert_block_size_bytes=2863311530 - -0 rows in set. Elapsed: 128.566 sec. Processed 59.82 million rows, 24.03 GB (465.28 thousand rows/s., 186.92 MB/s.) -``` - -このように、これらの設定の調整により挿入パフォーマンスが33%以上向上しました。読者は、さらに単一ノードパフォーマンスを向上させる方法を探ることができます。 -## リソースとノードのスケーリング {#scaling-with-resources-and-nodes} - -リソースとノードのスケーリングは、読み取りおよび挿入クエリの両方に適用されます。 -### 垂直スケーリング {#vertical-scaling} - -これまでの全ての調整やクエリは、ClickHouse Cloudクラスターの単一ノードを使用しています。ユーザーは通常、ClickHouseを利用できる複数のノードを持っています。初めはユーザーが縦方向にスケールすることをお勧めします。コア数が増えることで、S3のスループットが線形に向上します。もしこれまでの挿入および読み取りクエリを、リソースが2倍の大きなClickHouse Cloudノード(64GiB、16 vCPU)で実行すると、両方とも約2倍の速さになります。 - -```sql -INSERT INTO posts SELECT * -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') SETTINGS min_insert_block_size_rows=0, max_insert_threads=8, min_insert_block_size_bytes=2863311530 - -0 rows in set. Elapsed: 67.294 sec. Processed 59.82 million rows, 24.03 GB (888.93 thousand rows/s., 357.12 MB/s.) - -SELECT - OwnerDisplayName, - count() AS num_posts -FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -WHERE OwnerDisplayName NOT IN ('', 'anon') -GROUP BY OwnerDisplayName -ORDER BY num_posts DESC -LIMIT 5 -SETTINGS max_threads = 92 - -5 rows in set. Elapsed: 0.421 sec. Processed 59.82 million rows, 24.03 GB (142.08 million rows/s., 57.08 GB/s.) -``` - -:::note -個々のノードは、ネットワークおよびS3 GETリクエストによってボトルネックとなることがあり、垂直スケーリングのパフォーマンスが線形に上昇しない場合があります。 -::: -### 水平スケーリング {#horizontal-scaling} - -やがて、ハードウェアの可用性とコスト効率から水平方向のスケーリングが必要になることがほとんどです。ClickHouse Cloudの生産クラスターには、最低3ノードがあります。したがって、ユーザーは挿入にすべてのノードを利用することを希望するかもしれません。 - -S3の読み取りにクラスターを利用するには、[クラスターの利用](/integrations/s3#utilizing-clusters)で説明されているように`s3Cluster`関数を使用する必要があります。これにより、読み取りがノード間で分散されます。 - -最初に挿入クエリを受け取るサーバーは、最初にグロブパターンを解決し、その後、一致する各ファイルの処理を動的に自分自身および他のサーバーに分配します。 - - - -以前の読み取りクエリを、3ノードに負荷を分散して再実行し、クエリを`s3Cluster`を使うように調整します。これはClickHouse Cloudでは、自動的に`default`クラスタを参照することで実行されます。 - -[クラスターの利用](/integrations/s3#utilizing-clusters)に記載されているように、この作業はファイルレベルで分散されます。この機能を利用するには、ユーザーには十分な数のファイル、つまりノード数の少なくとも>を持っている必要があります。 - -```sql -SELECT - OwnerDisplayName, - count() AS num_posts -FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -WHERE OwnerDisplayName NOT IN ('', 'anon') -GROUP BY OwnerDisplayName -ORDER BY num_posts DESC -LIMIT 5 -SETTINGS max_threads = 16 - -┌─OwnerDisplayName─┬─num_posts─┐ -│ user330315 │ 10344 │ -│ user4039065 │ 5316 │ -│ user149341 │ 4102 │ -│ user529758 │ 3700 │ -│ user3559349 │ 3068 │ -└──────────────────┴───────────┘ - -5 rows in set. Elapsed: 0.622 sec. Processed 59.82 million rows, 24.03 GB (96.13 million rows/s., 38.62 GB/s.) -Peak memory usage: 176.74 MiB. -``` - -同様に、以前の単一ノードのために特定した改善設定を利用して、挿入クエリも分散できます。 - -```sql -INSERT INTO posts SELECT * -FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') SETTINGS min_insert_block_size_rows=0, max_insert_threads=4, min_insert_block_size_bytes=2863311530 - -0 rows in set. Elapsed: 171.202 sec. Processed 59.82 million rows, 24.03 GB (349.41 thousand rows/s., 140.37 MB/s.) -``` - -読者は、ファイルの読み込みがクエリを改善したのに対し、挿入パフォーマンスには改善が見られないことを認識するでしょう。デフォルトでは、読み取りは`s3Cluster`を使用して分散されますが、挿入はイニシエータノードに対して実行されます。つまり、読み取りは各ノードで行われますが、結果の行は分配のためにイニシエータにルートされます。高スループットのシナリオでは、これはボトルネックになる可能性があります。これに対処するために、`s3cluster`関数に対して`parallel_distributed_insert_select`パラメータを設定します。 - -これを`parallel_distributed_insert_select=2`に設定することで、`SELECT`と`INSERT`が各ノード上の分散エンジンの基盤となるテーブルに対して各シャードで実行されることが保証されます。 - -```sql -INSERT INTO posts -SELECT * -FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows=0, max_insert_threads=4, min_insert_block_size_bytes=2863311530 - -0 rows in set. Elapsed: 54.571 sec. Processed 59.82 million rows, 24.03 GB (1.10 million rows/s., 440.38 MB/s.) -Peak memory usage: 11.75 GiB. -``` - -予想通り、これにより挿入パフォーマンスは3倍に低下します。 -## さらなる調整 {#further-tuning} -### 重複排除の無効化 {#disable-de-duplication} - -挿入操作は、タイムアウトなどのエラーにより失敗することがあります。挿入が失敗した場合、データが正常に挿入されているかどうかは不明な場合があります。クライアントによる挿入の再試行を安全に行えるように、分散デプロイメント(ClickHouse Cloudなど)では、データが正常に挿入されたかどうかを確認しようとします。挿入されたデータが重複としてマークされると、ClickHouseはそれを宛先テーブルに挿入しません。ただし、ユーザーには、データが通常どおり挿入されたかのように成功の操作状況が表示されます。 - -この動作は挿入のオーバーヘッドを伴い、クライアントやバッチからのデータを読み込む場合は意味がありますが、オブジェクトストレージからの`INSERT INTO SELECT`を実行する際には不要であることがあります。挿入時にこの機能を無効にすることで、パフォーマンスを向上させることができます。以下のように: - -```sql -INSERT INTO posts -SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, max_insert_threads = 4, min_insert_block_size_bytes = 2863311530, insert_deduplicate = 0 -SELECT * -FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, max_insert_threads = 4, min_insert_block_size_bytes = 2863311530, insert_deduplicate = 0 - -0 rows in set. Elapsed: 52.992 sec. Processed 59.82 million rows, 24.03 GB (1.13 million rows/s., 453.50 MB/s.) -Peak memory usage: 26.57 GiB. -``` -### Optimize on insert {#optimize-on-insert} - -ClickHouseでは、`optimize_on_insert`設定は、データパーツが挿入プロセス中にマージされるかどうかを制御します。有効にすると(デフォルトでは`optimize_on_insert = 1`)、小さいパーツが挿入されると同時に大きなパーツにマージされ、読み取る必要のあるパーツの数が減ることでクエリパフォーマンスが向上します。ただし、このマージ処理は挿入プロセスにオーバーヘッドを追加するため、高スループットの挿入速度が遅くなる可能性があります。 - -この設定を無効にすると(`optimize_on_insert = 0`)、挿入時にマージをスキップし、特に頻繁な小規模挿入を扱う際にデータを書き込む速度が向上します。マージプロセスはバックグラウンドに延期されるため、より良い挿入パフォーマンスが得られますが、一時的に小さいパーツの数が増加し、バックグラウンドのマージが完了するまでクエリが遅くなる可能性があります。この設定は、挿入パフォーマンスが優先され、バックグラウンドのマージプロセスが後で効率的に最適化を処理できる場合に最適です。以下に示すように、設定を無効にすると挿入スループットが改善されることがあります。 - -```sql -SELECT * -FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow/parquet/posts/by_month/*.parquet') -SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, max_insert_threads = 4, min_insert_block_size_bytes = 2863311530, insert_deduplicate = 0, optimize_on_insert = 0 - -0 rows in set. Elapsed: 49.688 sec. Processed 59.82 million rows, 24.03 GB (1.20 million rows/s., 483.66 MB/s.) -``` -## Misc notes {#misc-notes} - -* メモリが少ないシナリオの場合、S3に挿入する際には`max_insert_delayed_streams_for_parallel_write`を下げることを検討してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md.hash deleted file mode 100644 index 7a32ae1100c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md.hash +++ /dev/null @@ -1 +0,0 @@ -fb2127f1c994d6aa diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md deleted file mode 100644 index 7ef9f3888c9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -slug: '/integrations/cassandra' -sidebar_label: 'Cassandra' -title: 'Cassandra' -description: 'Page describing how users can integrate with Cassandra via a dictionary.' ---- - - - - -# Cassandra統合 - -ユーザーはディクショナリを介してCassandraと統合できます。詳細は[こちら](/sql-reference/dictionaries#cassandra)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md.hash deleted file mode 100644 index 50125317b9d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/cassandra.md.hash +++ /dev/null @@ -1 +0,0 @@ -f8a5a77e01856aa4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md deleted file mode 100644 index 53e209e1b54..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -slug: '/integrations/deltalake' -sidebar_label: 'Delta Lake' -title: 'Delta Lake' -description: 'Deltaレイク形式のテーブル関数を使用してDeltaレイクテーブルとの統合方法について説明したページ。' ---- - -import DeltaLakeFunction from '@site/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md'; - - -# Delta Lake統合 - -ユーザーは、テーブル関数を介してDelta Lakeテーブルフォーマットと統合できます。 - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md.hash deleted file mode 100644 index 916e2ba3338..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/deltalake.md.hash +++ /dev/null @@ -1 +0,0 @@ -433236a1b5fd4ec8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md deleted file mode 100644 index 5628dacda6c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/hive' -sidebar_label: 'Hive' -title: 'Hive' -hide_title: true -description: 'Hive テーブルエンジンを説明するページ' ---- - -import HiveTableEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hive.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md.hash deleted file mode 100644 index c13b114fa3e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hive.md.hash +++ /dev/null @@ -1 +0,0 @@ -bd2c3bc415379eed diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md deleted file mode 100644 index da6dd070b2c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/hudi' -sidebar_label: 'Hudi' -title: 'Hudi' -hide_title: true -description: 'Hudi テーブルエンジンを説明するページ' ---- - -import HudiTableEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/hudi.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md.hash deleted file mode 100644 index 920b3ccfa82..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/hudi.md.hash +++ /dev/null @@ -1 +0,0 @@ -0310a6868311721c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md deleted file mode 100644 index b60f4ebfb5b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -slug: '/integrations/iceberg' -sidebar_label: 'Iceberg' -title: 'Iceberg' -description: 'Page describing the IcebergFunction which can be used to integrate - ClickHouse with the Iceberg table format' ---- - -import IcebergFunction from '@site/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md'; - - -# Iceberg統合 - -ユーザーはテーブル関数を介してIcebergテーブルフォーマットと統合できます。 - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md.hash deleted file mode 100644 index f34d372688f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/iceberg.md.hash +++ /dev/null @@ -1 +0,0 @@ -349f271e490b29ee diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md deleted file mode 100644 index 1bafbe0ed0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/mongodb' -sidebar_label: 'MongoDB' -title: 'MongoDB' -hide_title: true -description: 'Page describing integration using the MongoDB engine' ---- - -import MongoDBEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/mongodb.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md.hash deleted file mode 100644 index ca00e8d7d58..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mongodb.md.hash +++ /dev/null @@ -1 +0,0 @@ -5222e681661c6323 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md deleted file mode 100644 index 761c05fb7f0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/mysql' -sidebar_label: 'MySQL' -title: 'MySQL' -hide_title: true -description: 'MySQLのインテグレーションを説明するページ' ---- - -import MySQL from '@site/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/mysql/index.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md.hash deleted file mode 100644 index 5986241ae7f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/mysql.md.hash +++ /dev/null @@ -1 +0,0 @@ -a9cd674d2f1898db diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md deleted file mode 100644 index 9dfb72a4745..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/nats' -sidebar_label: 'NATS' -title: 'NATS' -hide_title: true -description: 'NATS エンジンとの統合について説明するページ' ---- - -import NatsEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/nats.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md.hash deleted file mode 100644 index 0bae3876b81..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/nats.md.hash +++ /dev/null @@ -1 +0,0 @@ -dbef47f01440293f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md deleted file mode 100644 index d8182d28b9d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -slug: '/integrations/postgresql' -sidebar_label: 'PostgreSQL' -title: 'PostgreSQL' -hide_title: false -description: 'Page describing how to integrate Postgres with ClickHouse' ---- - -import PostgreSQL from '@site/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql.md'; - -PostgreSQL から ClickHouse への完全な移行ガイド、データモデリングおよび同等の概念に関するアドバイスは、[こちら](/migrations/postgresql/overview)で確認できます。次に、ClickHouse と PostgreSQL を接続する方法について説明します。 - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md.hash deleted file mode 100644 index 2478fa48957..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/postgres.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb0b7f9e990a42ad diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md deleted file mode 100644 index dee33c7f1ba..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/rabbitmq' -sidebar_label: 'RabbitMQ' -title: 'RabbitMQ' -hide_title: true -description: 'Page describing the RabbitMQEngine integration' ---- - -import RabbitMQEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/rabbitmq.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md.hash deleted file mode 100644 index b1c8b4785d2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rabbitmq.md.hash +++ /dev/null @@ -1 +0,0 @@ -a81ca01fff813363 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md deleted file mode 100644 index 4d6264d2847..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -slug: '/integrations/redis' -sidebar_label: 'Redis' -title: 'Redis' -description: 'Redis テーブル機能を説明するページ' ---- - -import RedisFunction from '@site/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md'; - - -# Redis統合 - -ユーザーは、テーブル関数を介してRedisと統合できます。 - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md.hash deleted file mode 100644 index 3dfaf6d2975..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/redis.md.hash +++ /dev/null @@ -1 +0,0 @@ -4a11847c5f3e6ea4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md deleted file mode 100644 index 1791ab621bc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/rocksdb' -sidebar_label: 'RocksDB' -title: 'RocksDB' -hide_title: true -description: 'Page describing the RocksDBTableEngine' ---- - -import RocksDBTableEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/embedded-rocksdb.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md.hash deleted file mode 100644 index 14bd02c5ede..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/rocksdb.md.hash +++ /dev/null @@ -1 +0,0 @@ -e3777f356a395819 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md deleted file mode 100644 index 6b607ee19ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: '/integrations/sqlite' -sidebar_label: 'SQLite' -title: 'SQLite' -hide_title: true -description: 'Page describing integration using the SQLite engine' ---- - -import SQLiteEngine from '@site/i18n/jp/docusaurus-plugin-content-docs/current/engines/table-engines/integrations/sqlite.md'; - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md.hash deleted file mode 100644 index a180e0ea0be..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-sources/sqlite.md.hash +++ /dev/null @@ -1 +0,0 @@ -9ebf902521c2a853 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/_category_.yml deleted file mode 100644 index 648a2180271..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 300 -label: 'Data visualization' -collapsible: true -collapsed: true -link: - type: generated-index - title: Data visualization - slug: /integrations/data-visualization diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md deleted file mode 100644 index 1cce15212ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -sidebar_label: 'Astrato' -sidebar_position: 131 -slug: '/integrations/astrato' -keywords: -- 'clickhouse' -- 'Power BI' -- 'connect' -- 'integrate' -- 'ui' -- 'data apps' -- 'data viz' -- 'embedded analytics' -- 'Astrato' -description: 'Astrato brings true Self-Service BI to Enterprises & Data Businesses - by putting analytics in the hands of every user, enabling them to build their own - dashboards, reports and data apps, enabling the answering of data questions without - IT help. Astrato accelerates adoption, speeds up decision-making, and unifies analytics, - embedded analytics, data input, and data apps in one platform. Astrato unites action - and analytics in one, introduce live write-back, interact with ML models, accelerate - your analytics with AI – go beyond dashboarding, thanks to pushdown SQL support - in Astrato.' -title: 'Connecting Astrato to ClickHouse' ---- - -import astrato_1_dataconnection from '@site/static/images/integrations/data-visualization/astrato_1_dataconnection.png'; -import astrato_2a_clickhouse_connection from '@site/static/images/integrations/data-visualization/astrato_2a_clickhouse_connection.png'; -import astrato_2b_clickhouse_connection from '@site/static/images/integrations/data-visualization/astrato_2b_clickhouse_connection.png'; -import astrato_3_user_access from '@site/static/images/integrations/data-visualization/astrato_3_user_access.png'; -import astrato_4a_clickhouse_data_view from '@site/static/images/integrations/data-visualization/astrato_4a_clickhouse_data_view.png'; -import astrato_4b_clickhouse_data_view_joins from '@site/static/images/integrations/data-visualization/astrato_4b_clickhouse_data_view_joins.png'; -import astrato_4c_clickhouse_completed_data_view from '@site/static/images/integrations/data-visualization/astrato_4c_clickhouse_completed_data_view.png'; -import astrato_5a_clickhouse_build_chart from '@site/static/images/integrations/data-visualization/astrato_5a_clickhouse_build_chart.png'; -import astrato_5b_clickhouse_view_sql from '@site/static/images/integrations/data-visualization/astrato_5b_clickhouse_view_sql.png'; -import astrato_5c_clickhouse_complete_dashboard from '@site/static/images/integrations/data-visualization/astrato_5c_clickhouse_complete_dashboard.png'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# AstratoをClickHouseに接続する - - - -AstratoはPushdown SQLを使用して、ClickHouse Cloudまたはオンプレミスのデプロイに直接クエリを実行します。これにより、ClickHouseの業界トップクラスのパフォーマンスを活用しながら、必要なすべてのデータにアクセスできます。 - -## 接続データが必要です {#connection-data-required} - -データ接続を設定する際に必要な情報は次のとおりです: - -- データ接続:ホスト名、ポート - -- データベース資格情報:ユーザー名、パスワード - - - -## ClickHouseへのデータ接続を作成する {#creating-the-data-connection-to-clickhouse} - -- サイドバーで**データ**を選択し、**データ接続**タブを選択します -(または、こちらのリンクに移動します: https://app.astrato.io/data/sources) -​ -- 画面の右上にある**新しいデータ接続**ボタンをクリックします。 - - - -- **ClickHouse**を選択します。 - - - -- 接続ダイアログボックスで必須項目を入力します。 - - - -- **接続テスト**をクリックします。接続が成功した場合は、データ接続に**名前**を付け、**次へ**をクリックします。 - -- データ接続への**ユーザーアクセス**を設定し、**接続**をクリックします。 - - - -- 接続が作成され、データビューが作成されます。 - -:::note -重複が作成された場合、データソース名にタイムスタンプが追加されます。 -::: - -## セマンティックモデル / データビューを作成する {#creating-a-semantic-model--data-view} - -私たちのデータビューエディターでは、ClickHouse内のすべてのテーブルとスキーマを見ることができ、始めるためにいくつかを選択します。 - - - -データを選択したら、**データビュー**を定義するために、ウェブページの右上にある定義をクリックします。 - -ここでは、データを結合したり、**管理されたディメンションとメジャーを作成**したりできます。これは、さまざまなチーム間でのビジネスロジックの一貫性を促進するのに理想的です。 - - - -**Astratoはメタデータを使用して結合をインテリジェントに提案**します。これにより、ClickHouseのキーを活用します。提案された結合を使用することで、うまく管理されたClickHouseデータから簡単に作業を開始できます。私たちはまた、**結合の質**を表示し、Astratoからすべての提案を詳細に確認するオプションを提供します。 - - - -## ダッシュボードを作成する {#creating-a-dashboard} - -数ステップで、Astratoで最初のチャートを作成できます。 -1. ビジュアルパネルを開く -2. ビジュアルを選択する(まずはカラムバーチャートを始めましょう) -3. ディメンションを追加する -4. メジャーを追加する - - - - -### 各ビジュアライゼーションをサポートする生成されたSQLを見る {#view-generated-sql-supporting-each-visualization} - -透明性と正確性はAstratoの中心です。生成されたすべてのクエリを可視化し、完全にコントロールできるようにしています。すべての計算は直接ClickHouse内で行われ、そのスピードを活用しながら、強力なセキュリティとガバナンスを維持しています。 - - - - -### 完成したダッシュボードの例 {#example-completed-dashboard} - -美しい完成したダッシュボードやデータアプリはもうすぐ手に入ります。私たちが構築したものをもっと見たい場合は、私たちのウェブサイトのデモギャラリーにアクセスしてください。 https://astrato.io/gallery - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md.hash deleted file mode 100644 index 5fb9a6d5ac7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/astrato-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -363262f6fe156a30 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md deleted file mode 100644 index f8bd6765a5c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: 'Connecting Chartbrew to ClickHouse' -sidebar_label: 'Chartbrew' -sidebar_position: 131 -slug: '/integrations/chartbrew-and-clickhouse' -keywords: -- 'ClickHouse' -- 'Chartbrew' -- 'connect' -- 'integrate' -- 'visualization' -description: 'Connect Chartbrew to ClickHouse to create real-time dashboards and - client reports.' ---- - -import chartbrew_01 from '@site/static/images/integrations/data-visualization/chartbrew_01.png'; -import chartbrew_02 from '@site/static/images/integrations/data-visualization/chartbrew_02.png'; -import chartbrew_03 from '@site/static/images/integrations/data-visualization/chartbrew_03.png'; -import chartbrew_04 from '@site/static/images/integrations/data-visualization/chartbrew_04.png'; -import chartbrew_05 from '@site/static/images/integrations/data-visualization/chartbrew_05.png'; -import chartbrew_06 from '@site/static/images/integrations/data-visualization/chartbrew_06.png'; -import chartbrew_07 from '@site/static/images/integrations/data-visualization/chartbrew_07.png'; -import chartbrew_08 from '@site/static/images/integrations/data-visualization/chartbrew_08.png'; -import chartbrew_09 from '@site/static/images/integrations/data-visualization/chartbrew_09.png'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; -import Image from '@theme/IdealImage'; - - -# ChartbrewをClickHouseに接続する - - - -[Chartbrew](https://chartbrew.com)は、ユーザーがダッシュボードを作成し、リアルタイムでデータを監視できるデータ可視化プラットフォームです。ClickHouseを含む複数のデータソースをサポートしており、チャートやレポートを作成するためのノーコードインターフェースを提供します。 - -## 目標 {#goal} - -このガイドでは、ChartbrewをClickHouseに接続し、SQLクエリを実行し、視覚化を作成します。最後には、あなたのダッシュボードは次のようになるかもしれません: - - - -:::tip データを追加する -作業するデータセットがない場合は、例の一つを追加できます。このガイドでは、[UK Price Paid](/getting-started/example-datasets/uk-price-paid.md)データセットを使用します。 -::: - -## 1. 接続情報を集める {#1-gather-your-connection-details} - - - -## 2. ChartbrewをClickHouseに接続する {#2-connect-chartbrew-to-clickhouse} - -1. [Chartbrew](https://chartbrew.com/login)にログインし、**Connections**タブに移動します。 -2. **Create connection**をクリックし、利用可能なデータベースオプションから**ClickHouse**を選択します。 - - - -3. ClickHouseデータベースの接続情報を入力します: - - - **Display Name**: Chartbrew内で接続を識別するための名前。 - - **Host**: ClickHouseサーバーのホスト名またはIPアドレス。 - - **Port**: 通常はHTTPS接続のために`8443`。 - - **Database Name**: 接続したいデータベース。 - - **Username**: あなたのClickHouseユーザー名。 - - **Password**: あなたのClickHouseパスワード。 - - - -4. **Test connection**をクリックして、ChartbrewがClickHouseに接続できるか確認します。 -5. テストが成功した場合は、**Save connection**をクリックします。ChartbrewはClickHouseからスキーマを自動的に取得します。 - - - -## 3. データセットを作成し、SQLクエリを実行する {#3-create-a-dataset-and-run-a-sql-query} - -1. **Create dataset**ボタンをクリックするか、**Datasets**タブに移動して作成します。 -2. 前に作成したClickHouse接続を選択します。 - - - - 可視化するデータを取得するためのSQLクエリを書きます。たとえば、このクエリは`uk_price_paid`データセットから年ごとの平均支払価格を計算します: - - ```sql - SELECT toYear(date) AS year, avg(price) AS avg_price - FROM uk_price_paid - GROUP BY year - ORDER BY year; - ``` - - - - **Run query**をクリックしてデータを取得します。 - - クエリの書き方がわからない場合は、**ChartbrewのAIアシスタント**を使用して、データベーススキーマに基づいたSQLクエリを生成できます。 - - - -データが取得されたら、**Configure dataset**をクリックして、視覚化パラメータを設定します。 - -## 4. 視覚化を作成する {#4-create-a-visualization} - -1. 視覚化のためのメトリック(数値)とディメンション(カテゴリカル値)を定義します。 -2. データセットをプレビューして、クエリ結果が正しく構造化されていることを確認します。 -3. チャートタイプ(例:折れ線グラフ、棒グラフ、円グラフ)を選択し、それをダッシュボードに追加します。 -4. **Complete dataset**をクリックして、設定を確定します。 - - - - データの異なる側面を視覚化するために、希望するだけ多くのデータセットを作成できます。これらのデータセットを使用して、異なるメトリックを管理するための複数のダッシュボードを作成できます。 - - - -## 5. データの自動更新を設定する {#5-automate-data-updates} - -ダッシュボードを最新の状態に保つためには、データの自動更新をスケジュールできます: - -1. データセットの更新ボタンの横にあるカレンダーアイコンをクリックします。 -2. 更新間隔を設定します(例:毎時、毎日)。 -3. 設定を保存して、自動更新を有効にします。 - - - -## もっと学ぶ {#learn-more} - -詳細については、[ChartbrewとClickHouse](https://chartbrew.com/blog/visualizing-clickhouse-data-with-chartbrew-a-step-by-step-guide/)に関するブログ記事をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md.hash deleted file mode 100644 index b902299af86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/chartbrew-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -cca725c76a7daf9b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md deleted file mode 100644 index e40c3ad9656..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -sidebar_label: 'Deepnote' -sidebar_position: 11 -slug: '/integrations/deepnote' -keywords: -- 'clickhouse' -- 'Deepnote' -- 'connect' -- 'integrate' -- 'notebook' -description: 'Efficiently query very large datasets, analyzing and modeling in the - comfort of known notebook environment.' -title: 'Connect ClickHouse to Deepnote' ---- - -import deepnote_01 from '@site/static/images/integrations/data-visualization/deepnote_01.png'; -import deepnote_02 from '@site/static/images/integrations/data-visualization/deepnote_02.png'; -import deepnote_03 from '@site/static/images/integrations/data-visualization/deepnote_03.png'; -import Image from '@theme/IdealImage'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - - -# Connect ClickHouse to Deepnote - - - -Deepnote は、チームが洞察を発見し共有するために構築された共同作業型データノートブックです。Jupyter互換であるだけでなく、クラウド上で動作し、データサイエンスプロジェクトに効率的に取り組むための中央の作業スペースを提供します。 - -このガイドは、すでにDeepnoteアカウントをお持ちで、稼働中のClickHouseインスタンスがあることを前提としています。 - -## Interactive example {#interactive-example} -DeepnoteのデータノートブックからClickHouseをクエリするインタラクティブな例を探索したい場合は、以下のボタンをクリックして、[ClickHouse playground](../../getting-started/playground.md)に接続されたテンプレートプロジェクトを起動してください。 - -[](https://deepnote.com/launch?template=ClickHouse%20and%20Deepnote) - -## Connect to ClickHouse {#connect-to-clickhouse} - -1. Deepnote内で、「Integrations」概要を選択し、ClickHouseタイルをクリックします。 - - - -2. ClickHouseインスタンスの接続詳細を提供します: - - - - - **_注意:_** ClickHouseへの接続がIPアクセスリストで保護されている場合、DeepnoteのIPアドレスを許可する必要があります。詳細は[Deepnoteのドキュメント](https://docs.deepnote.com/integrations/authorize-connections-from-deepnote-ip-addresses)をお読みください。 - -3. おめでとうございます!これでClickHouseがDeepnoteに統合されました。 - -## Using ClickHouse integration. {#using-clickhouse-integration} - -1. まず、ノートブックの右側でClickHouse統合に接続します。 - - - -2. 次に、新しいClickHouseクエリブロックを作成し、データベースをクエリします。クエリ結果はDataFrameとして保存され、SQLブロックで指定された変数に格納されます。 -3. 既存の[SQLブロック](https://docs.deepnote.com/features/sql-cells)をClickHouseブロックに変換することもできます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md.hash deleted file mode 100644 index 8da365b0e3a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/deepnote.md.hash +++ /dev/null @@ -1 +0,0 @@ -555494093c8282cc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md deleted file mode 100644 index 4f12429e5d8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -sidebar_label: 'Draxlr' -sidebar_position: 131 -slug: '/integrations/draxlr' -keywords: -- 'clickhouse' -- 'Draxlr' -- 'connect' -- 'integrate' -- 'ui' -description: 'Draxlr is a Business intelligence tool with data visualization and - analytics.' -title: 'Connecting Draxlr to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import draxlr_01 from '@site/static/images/integrations/data-visualization/draxlr_01.png'; -import draxlr_02 from '@site/static/images/integrations/data-visualization/draxlr_02.png'; -import draxlr_03 from '@site/static/images/integrations/data-visualization/draxlr_03.png'; -import draxlr_04 from '@site/static/images/integrations/data-visualization/draxlr_04.png'; -import draxlr_05 from '@site/static/images/integrations/data-visualization/draxlr_05.png'; -import draxlr_06 from '@site/static/images/integrations/data-visualization/draxlr_06.png'; -import Image from '@theme/IdealImage'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# DraxlrをClickHouseに接続する - - - -Draxlrは、ClickHouseデータベースに接続するための直感的なインターフェースを提供し、チームが数分以内に洞察を探求、視覚化、公開できるようにします。このガイドでは、成功した接続を確立するための手順を説明します。 - - -## 1. ClickHouseの認証情報を取得する {#1-get-your-clickhouse-credentials} - - -## 2. DraxlrをClickHouseに接続する {#2--connect-draxlr-to-clickhouse} - -1. ナビゲーションバーの**データベースに接続**ボタンをクリックします。 - -2. 利用可能なデータベースのリストから**ClickHouse**を選択し、次へ進みます。 - -3. ホスティングサービスの1つを選択し、次へ進みます。 - -4. **接続名**フィールドに任意の名前を入力します。 - -5. フォームに接続詳細を追加します。 - - - -6. **次へ**ボタンをクリックし、接続が確立されるのを待ちます。接続に成功すると、テーブルページが表示されます。 - -## 4. データを探索する {#4-explore-your-data} - -1. リストからテーブルの1つをクリックします。 - -2. テーブルのデータを見るために探索ページに移動します。 - -3. フィルタを追加したり、結合を行ったりして、データをソートすることができます。 - - - -4. また、**グラフ**ボタンを使用して、グラフの種類を選択しデータを視覚化することもできます。 - - - - -## 4. SQLクエリを使用する {#4-using-sql-queries} - -1. ナビゲーションバーの探索ボタンをクリックします。 - -2. **生クエリ**ボタンをクリックし、テキストエリアにクエリを入力します。 - - - -3. **クエリを実行**ボタンをクリックして、結果を確認します。 - - -## 4. クエリを保存する {#4-saving-you-query} - -1. クエリを実行した後、**クエリを保存**ボタンをクリックします。 - - - -2. **クエリ名**テキストボックスにクエリの名前を付け、カテゴリを選択するフォルダを選択します。 - -3. **ダッシュボードに追加**オプションを使用して、結果をダッシュボードに追加することもできます。 - -4. **保存**ボタンをクリックして、クエリを保存します。 - - -## 5. ダッシュボードの構築 {#5-building-dashboards} - -1. ナビゲーションバーの**ダッシュボード**ボタンをクリックします。 - - - -2. 左のサイドバーの**追加 +**ボタンをクリックして、新しいダッシュボードを追加できます。 - -3. 新しいウィジェットを追加するには、右上隅の**追加**ボタンをクリックします。 - -4. 保存されたクエリのリストからクエリを選択し、視覚化の種類を選んで、**ダッシュボード項目を追加**ボタンをクリックします。 - -## 詳しく知る {#learn-more} -Draxlrの詳細については、[Draxlrドキュメント](https://draxlr.notion.site/draxlr/Draxlr-Docs-d228b23383f64d00a70836ff9643a928)サイトをご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md.hash deleted file mode 100644 index a41b94e3bb7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/draxlr-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -e14f174da23e8f67 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md deleted file mode 100644 index 858501fc480..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -sidebar_label: 'Embeddable' -slug: '/integrations/embeddable' -keywords: -- 'clickhouse' -- 'Embeddable' -- 'connect' -- 'integrate' -- 'ui' -description: 'Embeddable is a developer toolkit for building fast, interactive, - fully-custom analytics experiences directly into your app.' -title: 'Connecting Embeddable to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# EmbeddableをClickHouseに接続する - - - -[Embeddable](https://embeddable.com/)では、コード内で[データモデル](https://docs.embeddable.com/data-modeling/introduction)と[コンポーネント](https://docs.embeddable.com/development/introduction)を定義し(自分自身のコードリポジトリに保存)、私たちの**SDK**を使用して、強力なEmbeddable**ノーコードビルダー**内でチームにそれらを提供します。 - -最終的な結果は、製品内で迅速かつインタラクティブな顧客向け分析を提供できることです。これは、あなたのプロダクトチームによって設計され、エンジニアリングチームによって構築され、顧客対応チームとデータチームによって維持されます。正にあるべき姿です。 - -組み込まれた行レベルセキュリティにより、ユーザーは自身が見ることを許可されたデータのみを正確に確認できます。さらに、2つの完全に構成可能なキャッシュレベルによって、スケールにおいて迅速なリアルタイム分析を提供できます。 - -## 1. 接続詳細を収集する {#1-gather-your-connection-details} - - -## 2. ClickHouse接続タイプを作成する {#2-create-a-clickhouse-connection-type} - -Embeddable APIを使用してデータベース接続を追加します。この接続はClickHouseサービスに接続するために使用されます。次のAPI呼び出しを使用して接続を追加できます。 - -```javascript -// セキュリティ上の理由から、これはクライアントサイドから*決して*呼び出さないでください -fetch('https://api.embeddable.com/api/v1/connections', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - Accept: 'application/json', - Authorization: `Bearer ${apiKey}` /* APIキーを安全に保管してください */, - }, - body: JSON.stringify({ - name: 'my-clickhouse-db', - type: 'clickhouse', - credentials: { - host: 'my.clickhouse.host', - user: 'clickhouse_user', - port: 8443, - password: '*****', - }, - }), -}); - - -Response: -Status 201 { errorMessage: null } -``` - -上記は`CREATE`アクションを表しますが、すべての`CRUD`操作が利用可能です。 - -`apiKey`は、Embeddableダッシュボードの1つで「**公開**」をクリックすることで見つけることができます。 - -`name`は、この接続を識別するための一意の名前です。 -- デフォルトではデータモデルは「default」という接続を探しますが、異なる接続に異なるデータモデルを接続するために、別の`data_source`名をモデルに指定できます(モデル内でdata_source名を指定するだけです)。 - -`type`は、Embeddableにどのドライバーを使用するかを伝えます。 - -- ここでは`clickhouse`を使用したいですが、Embeddableのワークスペースに異なるデータソースを複数接続できるので、他にも`postgres`、`bigquery`、`mongodb`などを使用できます。 - -`credentials`は、ドライバーが必要とする資格情報を含むJavaScriptオブジェクトです。 -- これらは安全に暗号化され、データモデルで記述されたデータのみを取得するために使用されます。Embeddableは、各接続に対して読み取り専用のデータベースユーザーを作成することを強く推奨します(Embeddableはデータベースから読み取るだけで、書き込むことはありません)。 - -本番環境、QA、テストなどの異なるデータベースへの接続をサポートするために(または異なる顧客のために異なるデータベースをサポートするために)、各接続を環境に割り当てることができます([Environments API](https://docs.embeddable.com/data/environments)を参照してください)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md.hash deleted file mode 100644 index 511aada5b34..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/embeddable-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -777c4729a02e58a3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md deleted file mode 100644 index 28e52c10aed..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -sidebar_label: 'Explo' -sidebar_position: 131 -slug: '/integrations/explo' -keywords: -- 'clickhouse' -- 'Explo' -- 'connect' -- 'integrate' -- 'ui' -description: 'Exploは、データに関する質問をするための使いやすいオープンソースUIツールです。' -title: 'Connecting Explo to ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import explo_01 from '@site/static/images/integrations/data-visualization/explo_01.png'; -import explo_02 from '@site/static/images/integrations/data-visualization/explo_02.png'; -import explo_03 from '@site/static/images/integrations/data-visualization/explo_03.png'; -import explo_04 from '@site/static/images/integrations/data-visualization/explo_04.png'; -import explo_05 from '@site/static/images/integrations/data-visualization/explo_05.png'; -import explo_06 from '@site/static/images/integrations/data-visualization/explo_06.png'; -import explo_07 from '@site/static/images/integrations/data-visualization/explo_07.png'; -import explo_08 from '@site/static/images/integrations/data-visualization/explo_08.png'; -import explo_09 from '@site/static/images/integrations/data-visualization/explo_09.png'; -import explo_10 from '@site/static/images/integrations/data-visualization/explo_10.png'; -import explo_11 from '@site/static/images/integrations/data-visualization/explo_11.png'; -import explo_12 from '@site/static/images/integrations/data-visualization/explo_12.png'; -import explo_13 from '@site/static/images/integrations/data-visualization/explo_13.png'; -import explo_14 from '@site/static/images/integrations/data-visualization/explo_14.png'; -import explo_15 from '@site/static/images/integrations/data-visualization/explo_15.png'; -import explo_16 from '@site/static/images/integrations/data-visualization/explo_16.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connecting Explo to ClickHouse - - - -顧客向けの分析をあらゆるプラットフォームに対して提供。美しいビジュアル化のために設計され、シンプルさのためにエンジニアリングされています。 - -## Goal {#goal} - -このガイドでは、ClickHouseからExploにデータを接続し、結果を視覚化します。 チャートは次のようになります: - - -

- -:::tip データを追加する -作業用のデータセットがない場合は、例の1つを追加できます。このガイドでは[UK Price Paid](/getting-started/example-datasets/uk-price-paid.md)データセットを使用するので、それを選択することができます。同じ文書カテゴリー内に他にもいくつかのデータセットがあります。 -::: - -## 1. 接続詳細を収集する {#1-gather-your-connection-details} - - -## 2. ExploをClickHouseに接続する {#2--connect-explo-to-clickhouse} - -1. Exploアカウントにサインアップします。 - -2. 左のサイドバーでExploの**データ**タブをクリックします。 - - - -3. 右上の**データソースに接続**をクリックします。 - - - -4. **Getting Started**ページの情報を入力します。 - - - -5. **Clickhouse**を選択します。 - - - -6. **Clickhouse Credentials**を入力します。 - - - -7. **Security**を設定します。 - - - -8. Clickhouse内で、**Explo IPsをホワイトリストに登録**します。 -` -54.211.43.19, 52.55.98.121, 3.214.169.94, and 54.156.141.148 -` - -## 3. ダッシュボードを作成する {#3-create-a-dashboard} - -1. 左側のナビゲーションバーで**Dashboard**タブに移動します。 - - - -2. 右上の**Create Dashboard**をクリックして、ダッシュボードに名前を付けます。これで、ダッシュボードが作成されました! - - - -3. 次のような画面が表示されるはずです: - - - -## 4. SQLクエリを実行する {#4-run-a-sql-query} - -1. スキーマタイトルの下の右サイドバーからテーブル名を取得します。次に、データセットエディタに次のコマンドを入力します: -` -SELECT * FROM YOUR_TABLE_NAME -LIMIT 100 -` - - - -2. 実行をクリックし、プレビュタブに移動してデータを確認します。 - - - -## 5. チャートを作成する {#5-build-a-chart} - -1. 左側からバー チャートアイコンを画面にドラッグします。 - - - -2. データセットを選択します。次のような画面が表示されるはずです: - - - -3. X軸に**county**、Y軸セクションに**Price**を次のように入力します: - - - -4. 次に、集計を**AVG**に変更します。 - - - -5. これで、価格別の住宅の平均価格が得られました! - - - -## Learn more {#learn-more} - -Exploやダッシュボードの作成方法についての詳細情報は、Exploドキュメントを訪問することで見つけることができます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md.hash deleted file mode 100644 index b4281945f50..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/explo-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -cb537ef14ee6dea6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md deleted file mode 100644 index 7b3822368cb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md +++ /dev/null @@ -1,316 +0,0 @@ ---- -sidebar_label: 'プラグイン構成' -sidebar_position: 3 -slug: '/integrations/grafana/config' -description: 'Grafana における ClickHouse データソースプラグインの構成オプション' -title: 'Grafana での ClickHouse データソースの構成' ---- - -import Image from '@theme/IdealImage'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; -import config_common from '@site/static/images/integrations/data-visualization/grafana/config_common.png'; -import config_http from '@site/static/images/integrations/data-visualization/grafana/config_http.png'; -import config_additional from '@site/static/images/integrations/data-visualization/grafana/config_additional.png'; -import config_logs from '@site/static/images/integrations/data-visualization/grafana/config_logs.png'; -import config_traces from '@site/static/images/integrations/data-visualization/grafana/config_traces.png'; -import alias_table_config_example from '@site/static/images/integrations/data-visualization/grafana/alias_table_config_example.png'; -import alias_table_select_example from '@site/static/images/integrations/data-visualization/grafana/alias_table_select_example.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# ClickHouse データソースの Grafana における設定 - - - -構成を変更する最も簡単な方法は、Grafana UI のプラグイン設定ページで行うことですが、データソースも [YAML ファイルでプロビジョニング](https://grafana.com/docs/grafana/latest/administration/provisioning/#data-sources) できます。 - -このページでは、ClickHouse プラグインでの設定に利用可能なオプションのリストと、YAML でデータソースをプロビジョニングするための構成スニペットを示します。 - -すべてのオプションの概要については、完全な構成オプションのリストを [こちら](#all-yaml-options) で確認できます。 - -## 一般的な設定 {#common-settings} - -例の設定画面: - - -一般的な設定のための例の YAML: -```yaml -jsonData: - host: 127.0.0.1 # (required) サーバーアドレス。 - port: 9000 # (required) サーバーポート。ネイティブの場合、9440がセキュア、9000が非セキュアのデフォルトです。HTTPの場合、8443がセキュア、8123が非セキュアのデフォルトです。 - - protocol: native # (required) 接続に使用されるプロトコル。 "native" または "http" に設定できます。 - secure: false # 接続がセキュアであれば true に設定します。 - - username: default # 認証に使用されるユーザー名。 - - tlsSkipVerify: # true に設定すると、TLS 検証をスキップします。 - tlsAuth: # TLS クライアント認証を有効にするために true に設定します。 - tlsAuthWithCACert: # CA 証明書が提供されている場合は true に設定します。自己署名 TLS 証明書を検証するために必要です。 - -secureJsonData: - password: secureExamplePassword # 認証に使用されるパスワード。 - - tlsCACert: # TLS CA 証明書 - tlsClientCert: # TLS クライアント証明書 - tlsClientKey: # TLS クライアントキー -``` - -設定が UI から保存されると、`version` プロパティが追加されることに注意してください。これにより、その設定が保存されたプラグインのバージョンが表示されます。 - -### HTTP プロトコル {#http-protocol} - -HTTP プロトコル経由で接続を選択すると、追加の設定が表示されます。 - - - -#### HTTP パス {#http-path} - -HTTP サーバーが異なる URL パスで公開されている場合は、ここに追加できます。 - -```yaml -jsonData: - # 最初のスラッシュを除外します - path: additional/path/example -``` - -#### カスタム HTTP ヘッダー {#custom-http-headers} - -サーバーに送信するリクエストにカスタムヘッダーを追加できます。 - -ヘッダーはプレーンテキストまたはセキュアであることができます。 -すべてのヘッダーキーはプレーンテキストで保存され、セキュアヘッダー値はセキュア構成に保存されます(`password` フィールドに似ています)。 - -:::warning セキュア値を HTTP 経由で送信 -セキュアヘッダー値はセキュア構成に安全に保存されますが、セキュア接続が無効になっている場合は、値が HTTP 経由で送信されます。 -::: - -プレーン/セキュアヘッダーの例 YAML: -```yaml -jsonData: - httpHeaders: - - name: X-Example-Plain-Header - value: plain text value - secure: false - - name: X-Example-Secure-Header - # "value" は除外されます - secure: true -secureJsonData: - secureHttpHeaders.X-Example-Secure-Header: secure header value -``` - -## 追加設定 {#additional-settings} - -これらの追加設定はオプションです。 - - - -例の YAML: -```yaml -jsonData: - defaultDatabase: default # クエリビルダーによって読み込まれるデフォルトのデータベース。デフォルトは "default" です。 - defaultTable: # クエリビルダーによって読み込まれるデフォルトのテーブル。 - - dialTimeout: 10 # サーバーへの接続時のダイアルタイムアウト(秒)。デフォルトは "10" です。 - queryTimeout: 60 # クエリ実行時のクエリタイムアウト(秒)。デフォルトは 60 です。これはユーザーの権限が必要です。権限エラーが発生した場合は、"0" に設定して無効にしてみてください。 - validateSql: false # true に設定すると、SQL エディタ内の SQL を検証します。 -``` - -### OpenTelemetry {#opentelemetry} - -OpenTelemetry (OTel) はプラグインに深く統合されています。 -OpenTelemetry データは、当社の [exporter plugin](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/clickhouseexporter) を使用して ClickHouse にエクスポートできます。 -最適な使用法のために、[logs](#logs) と [traces](#traces) の両方に OTel を設定することをお勧めします。 - -また、[data links](./query-builder.md#data-links) を有効にするためのデフォルトも設定する必要があります。これは強力な可観測性ワークフローを可能にする機能です。 - -### ログ {#logs} - -[ログのクエリビルディングを加速するため](./query-builder.md#logs)、デフォルトのデータベース/テーブルおよびログクエリのカラムを設定できます。これにより、クエリビルダーに実行可能なログクエリが事前ロードされ、探求ページでのブラウジングが速くなります。 - -OpenTelemetry を使用している場合は、"**Use OTel**" スイッチを有効にし、**default log table** を `otel_logs` に設定する必要があります。 -これにより、デフォルトのカラムが選択された OTel スキーマバージョンを使用するように自動的に上書きされます。 - -OpenTelemetry がログに必要ではありませんが、単一のログ/トレースデータセットを使用すると、[data linking](./query-builder.md#data-links) による可観測性ワークフローがスムーズになるのに役立ちます。 - -ログ設定画面の例: - - -ログ設定の例 YAML: -```yaml -jsonData: - logs: - defaultDatabase: default # デフォルトのログデータベース。 - defaultTable: otel_logs # デフォルトのログテーブル。OTel を使用している場合は "otel_logs" に設定する必要があります。 - - otelEnabled: false # OTel が有効な場合は true に設定します。 - otelVersion: latest # 使用する OTel コレクタのスキーマバージョン。バージョンは UI に表示されますが、"latest" はプラグインの利用可能な最新バージョンを使用します。 - - # 新しいログクエリを開くときに選択されるデフォルトのカラム。OTel が有効な場合は無視されます。 - timeColumn: # ログの主要な時刻カラム。 - levelColumn: # ログのレベル/重大度。値は通常 "INFO"、"error"、または "Debug" のようになります。 - messageColumn: # ログのメッセージ/コンテンツ。 -``` - -### トレース {#traces} - -[トレースのクエリビルディングを加速するため](./query-builder.md#traces)、デフォルトのデータベース/テーブルおよびトレースクエリのカラムを設定できます。これにより、クエリビルダーに実行可能なトレース検索クエリが事前ロードされ、探求ページでのブラウジングが速くなります。 - -OpenTelemetry を使用している場合は、"**Use OTel**" スイッチを有効にし、**default trace table** を `otel_traces` に設定する必要があります。 -これにより、デフォルトのカラムが選択された OTel スキーマバージョンを使用するように自動的に上書きされます。 -OpenTelemetry は必須ではありませんが、この機能はトレースのスキーマを使用する際に最も効果を発揮します。 - -トレース設定画面の例: - - -トレース設定の例 YAML: -```yaml -jsonData: - traces: - defaultDatabase: default # デフォルトのトレースデータベース。 - defaultTable: otel_traces # デフォルトのトレーステーブル。OTel を使用している場合は "otel_traces" に設定する必要があります。 - - otelEnabled: false # OTel が有効な場合は true に設定します。 - otelVersion: latest # 使用する OTel コレクタのスキーマバージョン。バージョンは UI に表示されますが、"latest" はプラグインの利用可能な最新バージョンを使用します。 - - # 新しいトレースクエリを開くときに選択されるデフォルトのカラム。OTel が有効な場合は無視されます。 - traceIdColumn: # トレース ID カラム。 - spanIdColumn: # スパン ID カラム。 - operationNameColumn: # 操作名カラム。 - parentSpanIdColumn: # 親スパン ID カラム。 - serviceNameColumn: # サービス名カラム。 - durationTimeColumn: # 継続時間カラム。 - durationUnitColumn:

- -
- -## 1. 接続情報を集める {#1-gather-your-connection-details} - - -## 2. 読み取り専用ユーザーの作成 {#2-making-a-read-only-user} - -ClickHouse を Grafana のようなデータ可視化ツールに接続する場合、データを不適切な変更から保護するために、読み取り専用のユーザーを作成することをお勧めします。 - -Grafana はクエリが安全であるかどうかを検証しません。クエリには `DELETE` や `INSERT` などの任意の SQL ステートメントを含めることができます。 - -読み取り専用ユーザーを構成するには、次の手順に従ってください: -1. [ClickHouse でのユーザーとロールの作成](/operations/access-rights)ガイドに従って、`readonly` ユーザープロファイルを作成します。 -2. `readonly` ユーザーが基盤となる [clickhouse-go client](https://github.com/ClickHouse/clickhouse-go) に必要な `max_execution_time` 設定を変更するための十分な権限を持っていることを確認します。 -3. 公開 ClickHouse インスタンスを使用している場合、`readonly` プロファイルで `readonly=2` を設定することは推奨されません。代わりに `readonly=1` のままにして、`max_execution_time` の制約タイプを [changeable_in_readonly](/operations/settings/constraints-on-settings) に設定して、この設定の変更を許可します。 - -## 3. Grafana 用の ClickHouse プラグインをインストールする {#3--install-the-clickhouse-plugin-for-grafana} - -Grafana が ClickHouse に接続する前に、適切な Grafana プラグインをインストールする必要があります。Grafana にログインしている前提で、次の手順に従ってください: - -1. サイドバーの **Connections** ページから、**Add new connection** タブを選択します。 - -2. **ClickHouse** を検索し、Grafana Labs の署名されたプラグインをクリックします: - - - -3. 次の画面で **Install** ボタンをクリックします: - - - -## 4. ClickHouse データソースを定義する {#4-define-a-clickhouse-data-source} - -1. インストールが完了したら、**Add new data source** ボタンをクリックします。(**Connections** ページの **Data sources** タブからもデータソースを追加できます。) - - - -2. 下にスクロールして **ClickHouse** データソースタイプを見つけるか、**Add data source** ページの検索バーで検索します。**ClickHouse** データソースを選択すると、次のページが表示されます: - - - -3. サーバーの設定と資格情報を入力します。主な設定は以下の通りです: - -- **Server host address:** ClickHouse サービスのホスト名。 -- **Server port:** ClickHouse サービスのポート。サーバー設定やプロトコルによって異なる場合があります。 -- **Protocol**:ClickHouse サービスに接続するために使用されるプロトコル。 -- **Secure connection**:サーバーが安全な接続を要求する場合は有効にします。 -- **Username** および **Password**:ClickHouse のユーザー資格情報を入力します。ユーザーを設定していない場合は、ユーザー名に `default` を試してください。 [読み取り専用ユーザーを構成する](#2-making-a-read-only-user)ことをお勧めします。 - -他の設定については [plugin configuration](./config.md) ドキュメントを確認してください。 - -4. **Save & test** ボタンをクリックして、Grafana が ClickHouse サービスに接続できるか確認します。成功すると、**Data source is working** メッセージが表示されます: - - - -## 5. 次のステップ {#5-next-steps} - -データソースの準備が整いました![クエリビルダー](./query-builder.md) を使ってクエリを構築する方法についてもっと学びましょう。 - -設定の詳細については、[plugin configuration](./config.md) ドキュメントを確認してください。 - -これらのドキュメントには含まれていない情報を探している場合は、[GitHub のプラグインリポジトリ](https://github.com/grafana/clickhouse-datasource)を確認してください。 - -## プラグインバージョンのアップグレード {#upgrading-plugin-versions} - -v4 から、設定やクエリは新しいバージョンがリリースされるたびにアップグレードできるようになります。 - -v3 の設定やクエリは、開かれると v4 に移行されます。古い設定やダッシュボードは v4 で読み込まれますが、移行は新しいバージョンで保存されるまで持続しません。古い設定やクエリを開く際に問題が発生した場合は、変更を破棄し、[GitHub に問題を報告してください](https://github.com/grafana/clickhouse-datasource/issues)。 - -設定やクエリが新しいバージョンで作成された場合、プラグインは以前のバージョンにダウングレードできません。 - -## 関連コンテンツ {#related-content} - -- [GitHub のプラグインリポジトリ](https://github.com/grafana/clickhouse-datasource) -- ブログ: [ClickHouse でのデータの可視化 - パート 1 - Grafana](https://clickhouse.com/blog/visualizing-data-with-grafana) -- ブログ: [Grafana を使用した ClickHouse データの可視化 - 動画](https://www.youtube.com/watch?v=Ve-VPDxHgZU) -- ブログ: [ClickHouse Grafana プラグイン 4.0 - SQL 可観測性のレベルアップ](https://clickhouse.com/blog/clickhouse-grafana-plugin-4-0) -- ブログ: [データを ClickHouse に取り込む - パート 3 - S3 の使用](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) -- ブログ: [ClickHouse での可観測性ソリューションの構築 - パート 1 - ログ](https://clickhouse.com/blog/storing-log-data-in-clickhouse-fluent-bit-vector-open-telemetry) -- ブログ: [ClickHouse での可観測性ソリューションの構築 - パート 2 - トレース](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse) -- ブログ & ウェビナー: [ClickHouse と Grafana を使用したオープンソース GitHub アクティビティの物語](https://clickhouse.com/blog/introduction-to-clickhouse-and-grafana-webinar) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md.hash deleted file mode 100644 index 1db0f74374f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -1058865d35b8fac7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md deleted file mode 100644 index 62f91825755..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -sidebar_label: 'クエリビルダー' -sidebar_position: 2 -slug: '/integrations/grafana/query-builder' -description: 'ClickHouse Grafanaプラグイン内のクエリビルダーの使用方法' -title: 'クエリビルダー' ---- - -import Image from '@theme/IdealImage'; -import demo_table_query from '@site/static/images/integrations/data-visualization/grafana/demo_table_query.png'; -import demo_logs_query from '@site/static/images/integrations/data-visualization/grafana/demo_logs_query.png'; -import demo_logs_query_fields from '@site/static/images/integrations/data-visualization/grafana/demo_logs_query_fields.png'; -import demo_time_series_query from '@site/static/images/integrations/data-visualization/grafana/demo_time_series_query.png'; -import demo_trace_query from '@site/static/images/integrations/data-visualization/grafana/demo_trace_query.png'; -import demo_raw_sql_query from '@site/static/images/integrations/data-visualization/grafana/demo_raw_sql_query.png'; -import trace_id_in_table from '@site/static/images/integrations/data-visualization/grafana/trace_id_in_table.png'; -import trace_id_in_logs from '@site/static/images/integrations/data-visualization/grafana/trace_id_in_logs.png'; -import demo_data_links from '@site/static/images/integrations/data-visualization/grafana/demo_data_links.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# クエリビルダー - - - -任意のクエリは ClickHouse プラグインを使用して実行できます。 -クエリビルダーは簡単なクエリに便利なオプションですが、複雑なクエリの場合は [SQL エディタ](#sql-editor) を使用する必要があります。 - -クエリビルダー内のすべてのクエリには [クエリタイプ](#query-types) があり、少なくとも1つのカラムを選択する必要があります。 - -利用可能なクエリタイプは次のとおりです: -- [テーブル](#table):データをテーブル形式で表示するための最もシンプルなクエリタイプ。集計関数を含む単純および複雑なクエリのどちらにも機能します。 -- [ログ](#logs):ログクエリの構築に最適化されています。[defaults configured](./config.md#logs) が設定された探索ビューで最適に機能します。 -- [時系列](#time-series):時系列クエリを構築するために最適です。専用の時間カラムを選択し、集計関数を追加することができます。 -- [トレース](#traces):トレースの検索/表示に最適化されています。[defaults configured](./config.md#traces) が設定された探索ビューで最適に機能します。 -- [SQL エディタ](#sql-editor):完全にクエリを制御したい場合に SQL エディタを使用できます。このモードでは、任意の SQL クエリを実行できます。 - -## クエリタイプ {#query-types} - -*クエリタイプ*設定は、ビルドされるクエリのタイプに合わせてクエリビルダーのレイアウトを変更します。 -クエリタイプは、データを視覚化する際に使用されるパネルも決定します。 - -### テーブル {#table} - -最も柔軟なクエリタイプはテーブルクエリです。これは、単純および集計クエリを処理するために設計された他のクエリビルダーのすべてをキャッチオールするものです。 - -| フィールド | 説明 | -|----|----| -| ビルダーモード | 単純なクエリは Aggregates および Group By を除外し、集計クエリはこれらのオプションを含みます。 | -| カラム | 選択されたカラム。生の SQL をこのフィールドに入力して関数やカラムのエイリアスを指定できます。 | -| 集計 | [集計関数](/sql-reference/aggregate-functions/index.md)のリスト。関数およびカラムのカスタム値を許可します。Aggregate モードのみに表示されます。 | -| Group By | 一連の [GROUP BY](/sql-reference/statements/select/group-by.md) 表現。Aggregate モードのみに表示されます。 | -| Order By | 一連の [ORDER BY](/sql-reference/statements/select/order-by.md) 表現。 | -| Limit | クエリの末尾に [LIMIT](/sql-reference/statements/select/limit.md) ステートメントを追加します。 `0` に設定すると除外されます。一部の視覚化では、この値を `0` に設定する必要があり、すべてのデータを表示できる必要があります。 | -| フィルタ | `WHERE` 句に適用されるフィルタのリスト。 | - - - -このクエリタイプはデータをテーブルとして表示します。 - -### ログ {#logs} - -ログクエリタイプはログデータをクエリするために特化したクエリビルダーを提供します。 -データソースの [ログ設定](./config.md#logs) でデフォルトを設定することで、クエリビルダーがデフォルトのデータベース/テーブルおよびカラムで事前にロードされるようにできます。 -OpenTelemetry を有効にすることで、スキーマバージョンに応じたカラムを自動的に選択することも可能です。 - -**時間** と **レベル** のフィルタがデフォルトで追加され、時間カラムに対する Order By も含まれます。 -これらのフィルタはそれぞれのフィールドに関連付けられており、カラムが変更されると更新されます。 -**Level** フィルタはデフォルトで SQL から除外されており、`IS ANYTHING` オプションから変更することで有効にできます。 - -ログクエリタイプは [データリンク](#data-links) をサポートしています。 - -| フィールド | 説明 | -|----|----| -| OTel を使用 | OpenTelemetry カラムを有効にします。選択されたカラムは、選択された OTel スキーマバージョンによって定義されたカラムを使用するために上書きされます(カラム選択は無効になります)。 | -| カラム | ログ行に追加する追加カラム。生の SQL をこのフィールドに入力して関数やカラムのエイリアスを指定できます。 | -| 時間 | ログの主要なタイムスタンプカラム。時間のような型を表示しますが、カスタム値/関数を許可します。 | -| ログレベル | オプション。ログの*レベル*または*重大度*。値は一般的に `INFO`、`error`、`Debug` などの形式です。 | -| メッセージ | ログメッセージの内容。 | -| Order By | 一連の [ORDER BY](/sql-reference/statements/select/order-by.md) 表現。 | -| Limit | クエリの末尾に [LIMIT](/sql-reference/statements/select/limit.md) ステートメントを追加します。 `0` に設定すると除外されますが、大規模なログデータセットには推奨されません。 | -| フィルタ | `WHERE` 句に適用されるフィルタのリスト。 | -| メッセージフィルタ | `LIKE %value%` を使用してログを便利にフィルタリングするためのテキスト入力。入力が空の場合は除外されます。 | - - - -
-このクエリタイプは、データをログパネルに表示し、その上にログのヒストグラムパネルを表示します。 - -クエリで選択された追加カラムは、展開されたログ行で表示できます: - - -### 時系列 {#time-series} - -時系列クエリタイプは、[テーブル](#table) に似ていますが、時系列データに焦点を当てています。 - -二つのビューは主に同じですが、顕著な違いは次のとおりです: - - 専用の *時間* フィールド。 - - Aggregate モードでは、時間フィールドに対して自動的に時間間隔マクロが適用され、Group By も適用されます。 - - Aggregate モードでは、「カラム」フィールドが非表示になります。 - - **時間** フィールドに対して時系列フィルタと Order By が自動的に追加されます。 - -:::important ビジュアライゼーションにデータが欠けていますか? -一部のケースでは、時系列パネルがカットオフされているように表示されます。これは、デフォルトの制限が `1000` に設定されているためです。 - -データセットが許可する場合は、`LIMIT` 句を `0` に設定して削除してみてください。 -::: - -| フィールド | 説明 | -|----|----| -| ビルダーモード | 単純なクエリは Aggregates および Group By を除外し、集計クエリはこれらのオプションを含みます。 | -| 時間 | クエリの主要な時間カラム。時間のような型を表示しますが、カスタム値/関数を許可します。 | -| カラム | 選択されたカラム。生の SQL をこのフィールドに入力して関数やカラムのエイリアスを指定できます。単純モードでのみ表示されます。 | -| 集計 | [集計関数](/sql-reference/aggregate-functions/index.md)のリスト。関数およびカラムのカスタム値を許可します。Aggregate モードのみに表示されます。 | -| Group By | 一連の [GROUP BY](/sql-reference/statements/select/group-by.md) 表現。Aggregate モードのみに表示されます。 | -| Order By | 一連の [ORDER BY](/sql-reference/statements/select/order-by.md) 表現。 | -| Limit | クエリの末尾に [LIMIT](/sql-reference/statements/select/limit.md) ステートメントを追加します。 `0` に設定すると除外されます。これは、完全なビジュアライゼーションを表示するためにいくつかの時系列データセットで推奨されます。 | -| フィルタ | `WHERE` 句に適用されるフィルタのリスト。 | - - - -このクエリタイプは、時系列パネルを使用してデータを表示します。 - -### トレース {#traces} - -トレースクエリタイプは、トレースの検索や表示を簡単に行えるクエリビルダーを提供します。 -OpenTelemetry データ用に設計されていますが、異なるスキーマからトレースをレンダリングするためにカラムを選択することもできます。 -データソースの [トレース設定](./config.md#traces) でデフォルトを設定することで、クエリビルダーがデフォルトのデータベース/テーブルおよびカラムで事前にロードされるようにできます。デフォルトが設定されている場合、カラム選択はデフォルトで collapsed されます。 -OpenTelemetry を有効にすることで、スキーマバージョンに応じたカラムを自動的に選択することも可能です。 - -デフォルトフィルタが追加されており、最上位のスパンのみを表示することを目的としています。 -時間と期間の列に対する Order By も含まれています。 -これらのフィルタはそれぞれのフィールドに関連付けられており、カラムが変更されると更新されます。 -**サービス名** フィルタはデフォルトで SQL から除外されており、`IS ANYTHING` オプションから変更することで有効にできます。 - -トレースクエリタイプは [データリンク](#data-links) をサポートしています。 - -| フィールド | 説明 | -|----|----| -| トレースモード | クエリをトレース検索からトレース ID ルックアップに変更します。 | -| OTel を使用 | OpenTelemetry カラムを有効にします。選択されたカラムは、選択された OTel スキーマバージョンによって定義されたカラムを使用するために上書きされます(カラム選択は無効になります)。 | -| トレース ID カラム | トレースの ID です。 | -| スパン ID カラム | スパン ID。 | -| 親スパン ID カラム | 親スパン ID。上位レベルのトレースの場合は通常空です。 | -| サービス名カラム | サービス名。 | -| 操作名カラム | 操作名。 | -| 開始時間カラム | トレーススパンの主要な時間カラム。スパンが開始されたときの時間。 | -| 期間時間カラム | スパンの期間。デフォルトで Grafana はこれをミリ秒の float として期待します。`Duration Unit` ドロップダウンを介して自動的に変換が適用されます。 | -| 期間単位 | 期間に使用される時間の単位。デフォルトはナノ秒です。選択した単位は、Grafana が必要とするミリ秒の float に変換されます。 | -| タグカラム | スパンタグ。OTel ベースのスキーマを使用しない場合、このフィールドは除外してください。特定のマップカラムタイプを必要とします。 | -| サービスタグカラム | サービスタグ。OTel ベースのスキーマを使用しない場合、このフィールドは除外してください。特定のマップカラムタイプを必要とします。 | -| Order By | 一連の [ORDER BY](/sql-reference/statements/select/order-by.md) 表現。 | -| Limit | クエリの末尾に [LIMIT](/sql-reference/statements/select/limit.md) ステートメントを追加します。 `0` に設定すると除外されますが、大規模なトレースデータセットには推奨されません。 | -| フィルタ | `WHERE` 句に適用されるフィルタのリスト。 | -| トレース ID | フィルタリングするトレース ID。トレース ID モードでのみ使用され、トレース ID [データリンク](#data-links) を開く際に使用されます。 | - - - -このクエリタイプは、トレース検索モードではテーブルビューで、トレース ID モードではトレースパネルでデータを表示します。 - -## SQL エディタ {#sql-editor} - -クエリビルダーでは複雑すぎるクエリには、SQL エディタを使用できます。 -これにより、ClickHouse SQL をそのまま記述して実行できるようになり、クエリを完全に制御できます。 - -SQL エディタは、クエリエディタの上部で「SQL エディタ」を選択することで開くことができます。 - -このモードでも [マクロ関数](#macros) を使用することができます。 - -最適なビジュアライゼーションを得るためにクエリタイプの間で切り替えることができます。 -この切り替えはダッシュボードビューでも影響を与え、特に時系列データに顕著です。 - - - -## データリンク {#data-links} - -Grafana [データリンク](https://grafana.com/docs/grafana/latest/panels-visualizations/configure-data-links)を使用して新しいクエリへのリンクを作成できます。 -この機能は、トレースをログにリンクさせるための ClickHouse プラグイン内で有効になっています。また、その逆も可能です。これは、[data source's config](./config.md#opentelemetry) でログとトレースの両方に OpenTelemetry が設定されている場合に最も効果的に機能します。 - -
- テーブル内のトレースリンクの例 - -
- -
- ログ内のトレースリンクの例 - -
- -### データリンクの作成方法 {#how-to-make-a-data-link} - -クエリ内で `traceID` というカラムを選択することでデータリンクを作成できます。この名前は大文字小文字を区別せず、"ID" の前にアンダースコアを追加することもサポートしています。例えば:`traceId`、`TraceId`、`TRACE_ID`、`tracE_iD` はすべて有効です。 - -[ログ](#logs) または [トレース](#traces) クエリで OpenTelemetry が有効になっている場合、トレース ID カラムは自動的に追加されます。 - -トレース ID カラムを含めることで、データに "**View Trace**" および "**View Logs**" リンクが付けられます。 - -### リンク機能 {#linking-abilities} - -データリンクがあることで、提供されたトレース ID を使用してトレースやログを開くことができます。 - -"**View Trace**"はトレースを含むスプリットパネルを開き、"**View Logs**"はトレース ID でフィルタリングされたログクエリを開きます。 -ダッシュボードからクリックされたリンクは、探索ビューの新しいタブで開かれます。 - -[ログ](./config.md#logs) と [トレース](./config.md#traces) の両方にデフォルトを設定することが、クエリタイプを横断する場合(ログからトレース、トレースからログ)に必要です。同じクエリタイプのリンクを開く場合は、クエリを単にコピーすればよいため、デフォルトは必要ありません。 - -
- ログクエリ (左パネル) からトレース (右パネル) を表示する例 - -
- - -## マクロ {#macros} - -マクロはクエリに動的 SQL を追加する簡単な方法です。 -クエリが ClickHouse サーバーに送信される前に、プラグインはマクロを展開し、完全な式で置き換えます。 - -SQL エディタとクエリビルダーの両方からのクエリにマクロを使用できます。 - -### マクロの使用 {#using-macros} - -マクロはクエリ内の任意の場所に含めることができ、必要に応じて複数回使用できます。 - -以下は `$__timeFilter` マクロの使用例です: - -入力: -```sql -SELECT log_time, log_message -FROM logs -WHERE $__timeFilter(log_time) -``` - -最終的なクエリ出力: -```sql -SELECT log_time, log_message -FROM logs -WHERE log_time >= toDateTime(1415792726) AND log_time <= toDateTime(1447328726) -``` - -この例では、Grafana ダッシュボードの時間範囲が `log_time` カラムに適用されます。 - -プラグインは、ブレース `{}` を使用した表記法もサポートしています。この表記法は、[パラメーター](/sql-reference/syntax.md#defining-and-using-query-parameters)内で必要なクエリに使用します。 - -### マクロの一覧 {#list-of-macros} - -これはプラグインで利用可能なすべてのマクロのリストです: - -| マクロ | 説明 | 出力例 | -| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -| `$__dateFilter(columnName)` | 提供されたカラムに対して時間範囲フィルタに置き換えられ、Grafana パネルの時間範囲を [Date](/sql-reference/data-types/date.md) として使用します。 | `columnName >= toDate('2022-10-21') AND columnName <= toDate('2022-10-23')` | -| `$__timeFilter(columnName)` | 提供されたカラムに対して時間範囲フィルタに置き換えられ、Grafana パネルの時間範囲を [DateTime](/sql-reference/data-types/datetime.md) として使用します。 | `columnName >= toDateTime(1415792726) AND time <= toDateTime(1447328726)` | -| `$__timeFilter_ms(columnName)` | 提供されたカラムに対して時間範囲フィルタに置き換えられ、Grafana パネルの時間範囲を [DateTime64](/sql-reference/data-types/datetime64.md) として使用します。 | `columnName >= fromUnixTimestamp64Milli(1415792726123) AND columnName <= fromUnixTimestamp64Milli(1447328726456)` | -| `$__dateTimeFilter(dateColumn, timeColumn)` | `$__dateFilter()` と `$__timeFilter()` を組み合わせて、別々の Date と DateTime カラムを使用するための簡略記法。エイリアス `$__dt()` | `$__dateFilter(dateColumn) AND $__timeFilter(timeColumn)` | -| `$__fromTime` | Grafana パネル範囲の開始時間を [DateTime](/sql-reference/data-types/datetime.md) にキャストして置き換えられます。 | `toDateTime(1415792726)` | -| `$__fromTime_ms` | パネル範囲の開始時間を [DateTime64](/sql-reference/data-types/datetime64.md) にキャストして置き換えられます。 | `fromUnixTimestamp64Milli(1415792726123)` | -| `$__toTime` | Grafana パネル範囲の終了時間を [DateTime](/sql-reference/data-types/datetime.md) にキャストして置き換えられます。 | `toDateTime(1447328726)` | -| `$__toTime_ms` | パネル範囲の終了時間を [DateTime64](/sql-reference/data-types/datetime64.md) にキャストして置き換えられます。 | `fromUnixTimestamp64Milli(1447328726456)` | -| `$__timeInterval(columnName)` | ウィンドウサイズに基づいて秒単位で間隔を計算する関数に置き換えられます。 | `toStartOfInterval(toDateTime(columnName), INTERVAL 20 second)` | -| `$__timeInterval_ms(columnName)` | ウィンドウサイズに基づいてミリ秒単位で間隔を計算する関数に置き換えられます。 | `toStartOfInterval(toDateTime64(columnName, 3), INTERVAL 20 millisecond)` | -| `$__interval_s` | ダッシュボード間隔を秒単位で置き換えます。 | `20` | -| `$__conditionalAll(condition, $templateVar)` | テンプレート変数がすべての値を選択しない場合は最初のパラメーターに置き換えられます。テンプレート変数がすべての値を選択した場合は `1=1` に置き換えられます。 | `condition` または `1=1` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md.hash deleted file mode 100644 index 09b4e6f6326..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md.hash +++ /dev/null @@ -1 +0,0 @@ -24932308da38121b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md deleted file mode 100644 index a33e75b5f0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_label: 'Hashboard' -sidebar_position: 132 -slug: '/integrations/hashboard' -keywords: -- 'clickhouse' -- 'Hashboard' -- 'connect' -- 'integrate' -- 'ui' -- 'analytics' -description: 'Hashboard is a robust analytics platform that can be easily integrated - with ClickHouse for real-time data analysis.' -title: 'Connecting ClickHouse to Hashboard' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; -import hashboard_01 from '@site/static/images/integrations/data-visualization/hashboard_01.png'; -import Image from '@theme/IdealImage'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ClickHouseをHashboardに接続する - - - -[Hashboard](https://hashboard.com) は、組織内の誰もがメトリクスを追跡し、実用的な洞察を発見できるインタラクティブなデータ探索ツールです。Hashboardは、ClickHouseデータベースに対してリアルタイムSQLクエリを発行し、自己サービスのadhocデータ探索に特に役立ちます。 - - - -
- -このガイドでは、HashboardをClickHouseインスタンスに接続する手順を説明します。この情報は、Hashboardの[ClickHouse統合ドキュメント](https://docs.hashboard.com/docs/database-connections/clickhouse)にも掲載されています。 - -## 前提条件 {#pre-requisites} - -- 自身のインフラ上にホストされたClickHouseデータベース、または[ClickHouse Cloud](https://clickhouse.com/)。 -- [Hashboardアカウント](https://hashboard.com/getAccess)およびプロジェクト。 - -## HashboardをClickHouseに接続する手順 {#steps-to-connect-hashboard-to-clickhouse} - -### 1. 接続詳細を収集する {#1-gather-your-connection-details} - - - -### 2. Hashboardに新しいデータベース接続を追加する {#2-add-a-new-database-connection-in-hashboard} - -1. [Hashboardプロジェクト](https://hashboard.com/app)に移動します。 -2. サイドナビゲーションバーのギアアイコンをクリックして設定ページを開きます。 -3. `+ 新しいデータベース接続`をクリックします。 -4. モーダルで「ClickHouse」を選択します。 -5. 収集した情報を基に**接続名**、**ホスト**、**ポート**、**ユーザー名**、**パスワード**、**データベース**フィールドを入力します。 -6. 「テスト」をクリックして接続が正しく設定されていることを確認します。 -7. 「追加」をクリックします。 - -これで、ClickHouseデータベースがHashboardに接続され、[データモデル](https://docs.hashboard.com/docs/data-modeling/add-data-model)、[探索](https://docs.hashboard.com/docs/visualizing-data/explorations)、[メトリクス](https://docs.hashboard.com/docs/metrics)、および[ダッシュボード](https://docs.hashboard.com/docs/dashboards)を構築することができます。これらの機能に関する詳細は、対応するHashboardのドキュメントを参照してください。 - -## 詳細を学ぶ {#learn-more} - -より高度な機能やトラブルシューティングについては、[Hashboardのドキュメント](https://docs.hashboard.com/)を訪れてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md.hash deleted file mode 100644 index 85cf9149454..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/hashboard-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -fe01793d078a8076 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md deleted file mode 100644 index 0d1826b75d2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -sidebar_label: '概要' -sidebar_position: 1 -keywords: -- 'ClickHouse' -- 'connect' -- 'Luzmo' -- 'Explo' -- 'Tableau' -- 'Grafana' -- 'Metabase' -- 'Mitzu' -- 'superset' -- 'Deepnote' -- 'Draxlr' -- 'RocketBI' -- 'Omni' -- 'bi' -- 'visualization' -- 'tool' -title: 'ClickHouseでデータを可視化する' -slug: '/integrations/data-visualization' -description: 'ClickHouseでデータの可視化について学ぶ' ---- - - - - -# ClickHouseでのデータの視覚化 - -
- -
- -
- -データがClickHouseに入ったので、分析を行う時が来ました。分析には通常、BIツールを使用して視覚化を構築することが含まれます。多くの人気のあるBIおよび視覚化ツールがClickHouseに接続します。一部はClickHouseにアウトオブボックスで接続される一方、他はコネクタをインストールする必要があります。いくつかのツールに関するドキュメントがあります。 - -- [Apache Superset](./superset-and-clickhouse.md) -- [Astrato](./astrato-and-clickhouse.md) -- [Chartbrew](./chartbrew-and-clickhouse.md) -- [Deepnote](./deepnote.md) -- [Draxlr](./draxlr-and-clickhouse.md) -- [Embeddable](./embeddable-and-clickhouse.md) -- [Explo](./explo-and-clickhouse.md) -- [Grafana](./grafana/index.md) -- [Looker](./looker-and-clickhouse.md) -- [Luzmo](./luzmo-and-clickhouse.md) -- [Metabase](./metabase-and-clickhouse.md) -- [Mitzu](./mitzu-and-clickhouse.md) -- [Omni](./omni-and-clickhouse.md) -- [Rill](https://docs.rilldata.com/reference/olap-engines/clickhouse) -- [Rocket BI](./rocketbi-and-clickhouse.md) -- [Tableau](./tableau/tableau-and-clickhouse.md) -- [Zing Data](./zingdata-and-clickhouse.md) - -## ClickHouse Cloudとデータ視覚化ツールの互換性 {#clickhouse-cloud-compatibility-with-data-visualization-tools} - -| ツール | サポート方法 | テスト済み | ドキュメント化 | コメント | -|--------------------------------------------------------------------------------|----------------------------------|------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------| -| [Apache Superset](./superset-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Astrato](./astrato-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | プッシュダウンSQL(直接クエリのみ)を使用してネイティブに動作します。 | -| [AWS QuickSight](./quicksight-and-clickhouse.md) | MySQLインターフェース | ✅ | ✅ | 一部制限付きで動作します。詳細については[ドキュメント](./quicksight-and-clickhouse.md)を参照してください。 | -| [Chartbrew](./chartbrew-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Deepnote](./deepnote.md) | ネイティブコネクタ | ✅ | ✅ | | -| [Explo](./explo-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | | -| [Grafana](./grafana/index.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Hashboard](./hashboard-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | | -| [Looker](./looker-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | 一部制限付きで動作します。詳細については[ドキュメント](./looker-and-clickhouse.md)を参照してください。 | -| Looker | MySQLインターフェース | 🚧 | ❌ | | -| [Luzmo](./luzmo-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Looker Studio](./looker-studio-and-clickhouse.md) | MySQLインターフェース | ✅ | ✅ | | -| [Metabase](./metabase-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Mitzu](./mitzu-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | | -| [Omni](./omni-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | | -| [Power BI Desktop](./powerbi-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | ODBC経由で接続し、直接クエリモードをサポートします。 | -| [Power BI service](/integrations/powerbi#power-bi-service) | ClickHouse公式コネクタ | ✅ | ✅ | [Microsoft Data Gateway](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-custom-connectors)のセットアップが必要です。 | -| [Rill](https://docs.rilldata.com/reference/olap-engines/clickhouse) | ネイティブコネクタ | ✅ | ✅ | | -| [Rocket BI](./rocketbi-and-clickhouse.md) | ネイティブコネクタ | ✅ | ❌ | | -| [Tableau Desktop](./tableau/tableau-and-clickhouse.md) | ClickHouse公式コネクタ | ✅ | ✅ | | -| [Tableau Online](./tableau/tableau-online-and-clickhouse.md) | MySQLインターフェース | ✅ | ✅ | 一部制限付きで動作します。詳細については[ドキュメント](./tableau/tableau-online-and-clickhouse.md)を参照してください。 | -| [Zing Data](./zingdata-and-clickhouse.md) | ネイティブコネクタ | ✅ | ✅ | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md.hash deleted file mode 100644 index 9c0d57e687b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -dd3759bf7b12ef17 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md deleted file mode 100644 index 410fa272725..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -sidebar_label: 'Looker' -slug: '/integrations/looker' -keywords: -- 'clickhouse' -- 'looker' -- 'connect' -- 'integrate' -- 'ui' -description: 'Looker is an enterprise platform for BI, data applications, and embedded - analytics that helps you explore and share insights in real time.' -title: 'Looker' ---- - -import Image from '@theme/IdealImage'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import looker_01 from '@site/static/images/integrations/data-visualization/looker_01.png'; -import looker_02 from '@site/static/images/integrations/data-visualization/looker_02.png'; -import looker_03 from '@site/static/images/integrations/data-visualization/looker_03.png'; -import looker_04 from '@site/static/images/integrations/data-visualization/looker_04.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Looker - - - -Lookerは、公式のClickHouseデータソースを介して、ClickHouse Cloudまたはオンプレミスの展開に接続できます。 - -## 1. 接続詳細を収集する {#1-gather-your-connection-details} - - -## 2. ClickHouseデータソースを作成する {#2-create-a-clickhouse-data-source} - -管理者 -> データベース -> 接続に移動し、右上の「接続を追加」ボタンをクリックします。 - - -
- -データソースの名前を選択し、ダイアレクトのドロップダウンから`ClickHouse`を選択します。フォームに資格情報を入力します。 - - -
- -ClickHouse Cloudを使用している場合や、デプロイがSSLを必要とする場合は、追加設定でSSLがオンになっていることを確認してください。 - - -
- -まず接続をテストし、完了したら新しいClickHouseデータソースに接続します。 - - -
- -これで、ClickHouseデータソースをLookerプロジェクトに接続できるようになるはずです。 - -## 3. 既知の制限 {#3-known-limitations} - -1. 次のデータ型はデフォルトで文字列として扱われます: - * Array - JDBCドライバの制限により、シリアル化が期待通りに機能しません - * Decimal* - モデル内で数値に変更可能です - * LowCardinality(...) - モデル内で適切な型に変更可能です - * Enum8, Enum16 - * UUID - * Tuple - * Map - * JSON - * Nested - * FixedString - * Geoタイプ - * MultiPolygon - * Polygon - * Point - * Ring -2. [対称集約機能](https://cloud.google.com/looker/docs/reference/param-explore-symmetric-aggregates)はサポートされていません -3. [フル外部結合](https://cloud.google.com/looker/docs/reference/param-explore-join-type#full_outer)はまだドライバに実装されていません diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md.hash deleted file mode 100644 index 41022b3b515..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -f435ba65a146bdf9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md deleted file mode 100644 index a29c2a3dffe..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -sidebar_label: 'Looker Studio' -slug: '/integrations/lookerstudio' -keywords: -- 'clickhouse' -- 'looker' -- 'studio' -- 'connect' -- 'mysql' -- 'integrate' -- 'ui' -description: 'Looker Studio, formerly Google Data Studio, is an online tool for - converting data into customizable informative reports and dashboards.' -title: 'Looker Studio' ---- - -import Image from '@theme/IdealImage'; -import MySQLCloudSetup from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx'; -import MySQLOnPremiseSetup from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx'; -import looker_studio_01 from '@site/static/images/integrations/data-visualization/looker_studio_01.png'; -import looker_studio_02 from '@site/static/images/integrations/data-visualization/looker_studio_02.png'; -import looker_studio_03 from '@site/static/images/integrations/data-visualization/looker_studio_03.png'; -import looker_studio_04 from '@site/static/images/integrations/data-visualization/looker_studio_04.png'; -import looker_studio_05 from '@site/static/images/integrations/data-visualization/looker_studio_05.png'; -import looker_studio_06 from '@site/static/images/integrations/data-visualization/looker_studio_06.png'; -import looker_studio_enable_mysql from '@site/static/images/integrations/data-visualization/looker_studio_enable_mysql.png'; -import looker_studio_mysql_cloud from '@site/static/images/integrations/data-visualization/looker_studio_mysql_cloud.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Looker Studio - - - -Looker Studio は、公式の Google MySQL データソースを使用して MySQL インターフェース経由で ClickHouse に接続できます。 - -## ClickHouse Cloud の設定 {#clickhouse-cloud-setup} - - -## オンプレミスの ClickHouse サーバーの設定 {#on-premise-clickhouse-server-setup} - - -## Looker Studio を ClickHouse に接続する {#connecting-looker-studio-to-clickhouse} - -まず、Google アカウントを使用して https://lookerstudio.google.com にログインし、新しいデータソースを作成します。 - - -
- -Google が提供する公式の MySQL コネクタ(名前は **MySQL** のみ)を検索します。 - - -
- -接続の詳細を指定します。デフォルトで MySQL インターフェースのポートは 9004 ですが、サーバーの設定によって異なる場合がありますのでご注意ください。 - - -
- -次に、ClickHouse からデータを取得する方法について2つのオプションがあります。最初に、テーブルブラウザ機能を使用できます: - - -
- -あるいは、カスタムクエリを指定してデータを取得することもできます: - - -
- -最後に、内部のテーブル構造を確認し、必要に応じてデータ型を調整できるようになります。 - - -
- -これで、データを探索するか、新しいレポートを作成することができます! - -## ClickHouse Cloud で Looker Studio を使用する {#using-looker-studio-with-clickhouse-cloud} - -ClickHouse Cloud を使用する場合、まず MySQL インターフェースを有効にする必要があります。それは接続ダイアログの「MySQL」タブで行えます。 - - -
- -Looker Studio UI で、「SSL を有効にする」オプションを選択します。ClickHouse Cloud の SSL 証明書は [Let's Encrypt](https://letsencrypt.org/certificates/) によって署名されています。このルート証明書を [こちら](https://letsencrypt.org/certs/isrgrootx1.pem) からダウンロードできます。 - - -
- -残りの手順は、前のセクションに記載されているものと同じです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md.hash deleted file mode 100644 index ac56314b5a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -55ff0384381d66e3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md deleted file mode 100644 index 71b0a689860..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -sidebar_label: 'Luzmo' -slug: '/integrations/luzmo' -keywords: -- 'clickhouse' -- 'Luzmo' -- 'connect' -- 'integrate' -- 'ui' -- 'embedded' -description: 'Luzmoは、ネイティブのClickHouse統合を備えた埋め込み型分析プラットフォームであり、ソフトウェアおよびSaaSアプリケーション向けに特別に設計されています。' -title: 'Integrating Luzmo with ClickHouse' -sidebar: 'integrations' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import luzmo_01 from '@site/static/images/integrations/data-visualization/luzmo_01.png'; -import luzmo_02 from '@site/static/images/integrations/data-visualization/luzmo_02.png'; -import luzmo_03 from '@site/static/images/integrations/data-visualization/luzmo_03.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# LuzmoをClickHouseと統合する - - - -## 1. ClickHouse接続の設定 {#1-setup-a-clickhouse-connection} - -ClickHouseに接続するには、**Connectionsページ**に移動し、**New Connection**を選択して、New ConnectionモーダルからClickHouseを選択します。 - - - -**ホスト**、**ユーザー名**、**パスワード**を提供するように求められます: - - - -* **ホスト**: これはあなたのClickHouseデータベースが公開されているホストです。ここではデータを安全に転送するために `https` のみが許可されていることに注意してください。ホストURLの構造は次のようになります: `https://url-to-clickhouse-db:port/database` - デフォルトでは、このプラグインは 'default' データベースおよび443ポートに接続します。 '/'の後にデータベースを指定することで、接続するデータベースを設定できます。 -* **ユーザー名**: あなたのClickHouseクラスタに接続するために使用されるユーザー名。 -* **パスワード**: あなたのClickHouseクラスタに接続するためのパスワード。 - -私たちのAPIを介してClickHouseに接続を作成する方法については、開発者ドキュメントの例を参照してください。[ClickHouseへの接続を作成する](https://developer.luzmo.com/api/createAccount?exampleSection=AccountCreateClickhouseRequestBody)。 - -## 2. データセットを追加する {#2-add-datasets} - -ClickHouseに接続したら、[こちら](https://academy.luzmo.com/article/ldx3iltg)に説明されているようにデータセットを追加できます。ClickHouseで利用可能な1つまたは複数のデータセットを選択し、Luzmoにリンクしてダッシュボードで一緒に使用できるようにします。また、[分析のためのデータを準備する](https://academy.luzmo.com/article/u492qov0)に関するこの記事もチェックしてください。 - -私たちのAPIを使ってデータセットを追加する方法については、[開発者ドキュメントのこの例](https://developer.luzmo.com/api/createDataprovider?exampleSection=DataproviderCreateClickhouseRequestBody)を参照してください。 - -データセットを使用して美しい(埋め込まれた)ダッシュボードを構築したり、クライアントの質問に答えることができるAIデータアナリスト([Luzmo IQ](https://luzmo.com/iq))に力を与えたりすることができます。 - - - -## 使用上の注意 {#usage-notes} - -1. Luzmo ClickHouseコネクタは、HTTP APIインターフェース(通常はポート8123で実行)を使用して接続します。 -2. `Distributed` テーブルエンジンを使用しているテーブルを使用する場合、一部のLuzmoチャートは `distributed_product_mode`が `deny` のときに失敗する可能性があります。しかし、これは他のテーブルにリンクしてそのリンクをチャートで使用する場合にのみ発生する必要があります。その場合、ClickHouseクラスタ内であなたにとって理にかなう他のオプションに `distributed_product_mode`を設定してください。ClickHouse Cloudを使用している場合、この設定は無視しても安全です。 -3. 例えば、LuzmoアプリケーションのみがあなたのClickHouseインスタンスにアクセスできるようにするためには、[Luzmoの静的IPアドレスの範囲をホワイトリストに追加する](https://academy.luzmo.com/article/u9on8gbm)ことを強くお勧めします。また、技術的な読み取り専用ユーザーを使用することも推奨します。 -4. 現在、ClickHouseコネクタは以下のデータ型をサポートしています: - - | ClickHouseタイプ | Luzmoタイプ | - | --- | --- | - | UInt | numeric | - | Int | numeric | - | Float | numeric | - | Decimal | numeric | - | Date | datetime | - | DateTime | datetime | - | String | hierarchy | - | Enum | hierarchy | - | FixedString | hierarchy | - | UUID | hierarchy | - | Bool | hierarchy | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md.hash deleted file mode 100644 index 190d60ff362..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/luzmo-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -4e22be15ac741c2d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md deleted file mode 100644 index 33966f518c9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -sidebar_label: 'Metabase' -sidebar_position: 131 -slug: '/integrations/metabase' -keywords: -- 'ClickHouse' -- 'Metabase' -- 'connect' -- 'integrate' -- 'ui' -description: 'Metabaseは、データに関する質問をするための使いやすいオープンソースUIツールです。' -title: 'Connecting Metabase to ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import metabase_01 from '@site/static/images/integrations/data-visualization/metabase_01.png'; -import metabase_02 from '@site/static/images/integrations/data-visualization/metabase_02.png'; -import metabase_03 from '@site/static/images/integrations/data-visualization/metabase_03.png'; -import metabase_04 from '@site/static/images/integrations/data-visualization/metabase_04.png'; -import metabase_06 from '@site/static/images/integrations/data-visualization/metabase_06.png'; -import metabase_07 from '@site/static/images/integrations/data-visualization/metabase_07.png'; -import metabase_08 from '@site/static/images/integrations/data-visualization/metabase_08.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ClickHouseへのMetabaseの接続 - - - -Metabaseは、データに関する質問を行うための使いやすいオープンソースのUIツールです。MetabaseはJavaアプリケーションであり、単にJARファイルをダウンロードして、`java -jar metabase.jar`を実行することで実行できます。Metabaseは、ダウンロードして`plugins`フォルダに置くJDBCドライバを使用してClickHouseに接続します。 - -## 目標 {#goal} - -このガイドでは、Metabaseを使用してClickHouseデータにいくつかの質問を行い、その回答を可視化します。回答の1つはこのように見えます: - - -

- -:::tip データを追加する -作業するためのデータセットがない場合は、例の1つを追加できます。このガイドでは[UK Price Paid](/getting-started/example-datasets/uk-price-paid.md)データセットを使用しているので、それを選択してもよいでしょう。同じ文書カテゴリに他にもいくつかの候補があります。 -::: - -## 1. 接続詳細を集める {#1-gather-your-connection-details} - - -## 2. Metabase用のClickHouseプラグインをダウンロードする {#2--download-the-clickhouse-plugin-for-metabase} - -1. `plugins`フォルダがない場合は、`metabase.jar`を保存しているフォルダのサブフォルダとして作成します。 - -2. プラグインは`clickhouse.metabase-driver.jar`という名前のJARファイルです。JARファイルの最新バージョンをhttps://github.com/clickhouse/metabase-clickhouse-driver/releases/latestからダウンロードします。 - -3. `clickhouse.metabase-driver.jar`を`plugins`フォルダに保存します。 - -4. Metabaseを起動(または再起動)して、ドライバが正常に読み込まれるようにします。 - -5. http://hostname:3000でMetabaseにアクセスします。初回起動時には歓迎画面が表示され、一連の質問を通過する必要があります。データベースを選択するように促された場合は、「**後でデータを追加します**」を選択します: - -## 3. MetabaseをClickHouseに接続する {#3--connect-metabase-to-clickhouse} - -1. 右上隅の歯車アイコンをクリックして**管理設定**を選択し、Metabaseの管理ページにアクセスします。 - -2. **データベースを追加**をクリックします。あるいは、**データベース**タブをクリックして**データベースを追加**ボタンを選択できます。 - -3. ドライバのインストールが成功していれば、**データベースタイプ**のドロップダウンメニューに**ClickHouse**が表示されます: - - - -4. データベースに**表示名**を付けます。これはMetabaseの設定なので、お好きな名前を使用してください。 - -5. ClickHouseデータベースの接続詳細を入力します。ClickHouseサーバーがSSLを使用するように設定されている場合は、安全な接続を有効にします。例えば: - - - -6. **保存**ボタンをクリックすると、Metabaseはデータベース内のテーブルをスキャンします。 - -## 4. SQLクエリを実行する {#4-run-a-sql-query} - -1. 右上隅の**管理設定を終了**ボタンをクリックして、**管理設定**から退出します。 - -2. 右上隅で**+ 新規**メニューをクリックし、質問の作成、SQLクエリの実行、ダッシュボードの構築ができることに気づきます: - - - -3. 例えば、1995年から2022年までの年ごとの平均価格を返す`uk_price_paid`という名前のテーブルで実行されたSQLクエリは以下の通りです: - - - -## 5. 質問をする {#5-ask-a-question} - -1. **+ 新規**をクリックして**質問**を選択します。データベースとテーブルから開始して質問を構築できることに気づきます。例えば、次の質問は`default`データベースにある`uk_price_paid`というテーブルに対して行われています。ここでは、グレーター・マンチェスター郡内の町ごとの平均価格を計算する簡単な質問です: - - - -2. **可視化**ボタンをクリックして、結果を表形式で表示します。 - - - -3. 結果の下にある**可視化**ボタンをクリックして、視覚化を棒グラフに変更します(または他のどのオプションでも可能です): - - - -## 詳しく学ぶ {#learn-more} - -Metabaseやダッシュボードの構築方法についての詳細情報は、Metabaseのドキュメントを訪れることで得られます。 - -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouseでのデータの可視化 - 第3部 - Metabase](https://clickhouse.com/blog/visualizing-data-with-metabase) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md.hash deleted file mode 100644 index 1fe3201bdb2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -9e0a2662c97faf57 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md deleted file mode 100644 index c669a762c57..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -sidebar_label: 'Mitzu' -slug: '/integrations/mitzu' -keywords: -- 'clickhouse' -- 'Mitzu' -- 'connect' -- 'integrate' -- 'ui' -description: 'Mitzu is a no-code warehouse-native product analytics application.' -title: 'Connecting Mitzu to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import mitzu_01 from '@site/static/images/integrations/data-visualization/mitzu_01.png'; -import mitzu_02 from '@site/static/images/integrations/data-visualization/mitzu_02.png'; -import mitzu_03 from '@site/static/images/integrations/data-visualization/mitzu_03.png'; -import mitzu_04 from '@site/static/images/integrations/data-visualization/mitzu_04.png'; -import mitzu_05 from '@site/static/images/integrations/data-visualization/mitzu_05.png'; -import mitzu_06 from '@site/static/images/integrations/data-visualization/mitzu_06.png'; -import mitzu_07 from '@site/static/images/integrations/data-visualization/mitzu_07.png'; -import mitzu_08 from '@site/static/images/integrations/data-visualization/mitzu_08.png'; -import mitzu_09 from '@site/static/images/integrations/data-visualization/mitzu_09.png'; -import mitzu_10 from '@site/static/images/integrations/data-visualization/mitzu_10.png'; -import mitzu_11 from '@site/static/images/integrations/data-visualization/mitzu_11.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connecting Mitzu to ClickHouse - - - -Mitzuは、ノーコード、ウェアハウスネイティブなプロダクト分析アプリケーションです。Amplitude、Mixpanel、PostHogなどのツールに似て、MitzuはユーザーがSQLやPythonの専門知識なしでプロダクトの使用データを分析できるようにします。 - -しかし、これらのプラットフォームとは異なり、Mitzuは会社のプロダクト使用データを複製しません。代わりに、既存のデータウェアハウスまたはデータレイク上でネイティブSQLクエリを生成します。 - -## Goal {#goal} - -本ガイドでは、以下の内容をカバーします: - -- ウェアハウスネイティブプロダクト分析 -- MitzuをClickHouseに統合する方法 - -:::tip 例のデータセット -Mitzu用のデータセットがない場合は、NYC Taxi Dataを使用できます。このデータセットはClickHouse Cloudに利用可能で、[これらの指示でロードできます](/getting-started/example-datasets/nyc-taxi)。 -::: - -このガイドはMitzuの使用方法の簡単な概要です。より詳細な情報は[Mitzuのドキュメント](https://docs.mitzu.io/)で確認できます。 - -## 1. 接続情報を収集する {#1-gather-your-connection-details} - - - -## 2. Mitzuにサインインまたはサインアップする {#2-sign-in-or-sign-up-to-mitzu} - -最初のステップとして、[https://app.mitzu.io](https://app.mitzu.io)にアクセスしてサインアップしてください。 - - - -## 3. ワークスペースを設定する {#3-configure-your-workspace} - -組織を作成した後、左側のサイドバーにある`ワークスペースを設定する`オンボーディングガイドに従ってください。次に、`Mitzuをデータウェアハウスに接続する`リンクをクリックします。 - - - -## 4. MitzuをClickHouseに接続する {#4-connect-mitzu-to-clickhouse} - -最初に接続タイプとしてClickHouseを選択し、接続情報を設定します。次に、`接続をテストして保存`ボタンをクリックして設定を保存します。 - - - -## 5. イベントテーブルを設定する {#5-configure-event-tables} - -接続が保存されたら、`イベントテーブル`タブを選択し、`テーブルを追加`ボタンをクリックします。モーダル内で、データベースとMitzuに追加したいテーブルを選択します。 - -チェックボックスを使用して少なくとも1つのテーブルを選択し、`テーブルを設定`ボタンをクリックします。これにより、各テーブルのキーとなるカラムを設定できるモーダルウィンドウが開きます。 - - -
- -> ClickHouse設定でプロダクト分析を実行するには、テーブルからいくつかのキーとなるカラムを指定する必要があります。 -> -> これらのカラムは以下の通りです: -> -> - **ユーザーID** - ユーザーの一意の識別子に関するカラム。 -> - **イベント時刻** - イベントのタイムスタンプカラム。 -> - オプション[**イベント名**] - このカラムは、テーブルが複数のイベントタイプを含む場合にイベントをセグメント化します。 - - -
- -全てのテーブルが設定されたら、`イベントカタログを保存して更新`ボタンをクリックし、Mitzuは上記で定義されたテーブルから全てのイベントとそのプロパティを見つけます。このステップはデータセットのサイズに応じて数分かかる場合があります。 - -## 4. セグメンテーションクエリを実行する {#4-run-segmentation-queries} - -Mitzuでのユーザーセグメンテーションは、Amplitude、Mixpanel、またはPostHogと同じくらい簡単です。 - -Exploreページにはイベントのための左側の選択エリアがあり、上部セクションではタイムホライズンを設定できます。 - - - -
- -:::tip フィルターとブレイクダウン -フィルタリングは予想通りに行われます:プロパティ(ClickHouseカラム)を選択し、フィルタリングしたい値をドロップダウンから選択します。 -ブレイクダウンには任意のイベントまたはユーザープロパティを選択できます(ユーザープロパティの統合方法については以下を参照)。 -::: - -## 5. ファネルクエリを実行する {#5-run-funnel-queries} - -ファネルには最大9ステップを選択できます。ユーザーがファネルを完了できる時間ウィンドウを選択します。 -SQLコードを1行も書かずに即座にコンバージョン率を把握できます。 - - - -
- -:::tip トレンドを視覚化 -`ファネルトレンド`を選択して、時間を通じたファネルトレンドを視覚化します。 -::: - -## 6. リテンションクエリを実行する {#6-run-retention-queries} - -リテンションレート計算には最大2ステップを選択できます。繰り返しウィンドウのリテンションウィンドウを選択します。 -SQLコードを1行も書かずに即座にコンバージョン率を把握できます。 - - - -
- -:::tip コホートリテンション -`週間コホートリテンション`を選択して、リテンションレートが時間と共にどのように変化するかを視覚化します。 -::: - - -## 7. ジャーニークエリを実行する {#7-run-journey-queries} -ファネルには最大9ステップを選択できます。ユーザーがジャーニーを完了できる時間ウィンドウを選択します。Mitzuのジャーニーチャートは、選択されたイベントを通じてユーザーがたどるすべての経路の視覚マップを提供します。 - - -
- -:::tip ステップを分解する -セグメント`Break down`のプロパティを選択して、同じステップ内のユーザーを区別できます。 -::: - -
- -## 8. 収益クエリを実行する {#8-run-revenue-queries} -収益設定が構成されている場合、Mitzuは支払いイベントに基づいて総MRRとサブスクリプション数を計算できます。 - - - -## 9. SQLネイティブ {#9-sql-native} - -MitzuはSQLネイティブであり、これはExploreページで選択した構成からネイティブSQLコードを生成することを意味します。 - - - -
- -:::tip BIツールで作業を続ける -MitzuのUIで制限に直面した場合、SQLコードをコピーしてBIツールで作業を続けてください。 -::: - -## Mitzuサポート {#mitzu-support} - -迷った場合は、[support@mitzu.io](email://support@mitzu.io)までお気軽にご連絡ください。 - -または、私たちのSlackコミュニティには[こちら](https://join.slack.com/t/mitzu-io/shared_invite/zt-1h1ykr93a-_VtVu0XshfspFjOg6sczKg)で参加できます。 - -## 詳細を学ぶ {#learn-more} - -Mitzuの詳細情報は[mitzu.io](https://mitzu.io)で見つけられます。 - -私たちのドキュメントページには[docs.mitzu.io](https://docs.mitzu.io)を訪れてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md.hash deleted file mode 100644 index d71fe26bebc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/mitzu-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -af47c0b6c5d04e31 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md deleted file mode 100644 index 3a8dc162c77..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -sidebar_label: 'Omni' -slug: '/integrations/omni' -keywords: -- 'clickhouse' -- 'Omni' -- 'connect' -- 'integrate' -- 'ui' -description: 'Omniは、BI、データアプリケーション、組み込みアナリティクス向けのエンタープライズプラットフォームであり、リアルタイムで洞察を探索し共有するのに役立ちます。' -title: 'Omni' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import omni_01 from '@site/static/images/integrations/data-visualization/omni_01.png'; -import omni_02 from '@site/static/images/integrations/data-visualization/omni_02.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Omni - - - -Omniは、公式のClickHouseデータソースを介して、ClickHouse Cloudまたはオンプレミスのデプロイメントに接続できます。 - -## 1. 接続情報を集める {#1-gather-your-connection-details} - - - -## 2. ClickHouseデータソースを作成する {#2-create-a-clickhouse-data-source} - -「Admin」->「Connections」に移動し、右上隅の「Add Connection」ボタンをクリックします。 - - -
- -`ClickHouse`を選択します。フォームに認証情報を入力します。 - - -
- -これで、OmniでClickHouseからデータをクエリおよび視覚化できるようになります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md.hash deleted file mode 100644 index 321ff035432..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -27748bf6ceeeda0d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md deleted file mode 100644 index b9d91374095..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -sidebar_label: 'Power BI' -slug: '/integrations/powerbi' -keywords: -- 'clickhouse' -- 'Power BI' -- 'connect' -- 'integrate' -- 'ui' -description: 'Microsoft Power BIは、Microsoftによって開発された対話型のデータ可視化ソフトウェア製品で、ビジネスインテリジェンスを主眼に置いています。' -title: 'Power BI' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import powerbi_odbc_install from '@site/static/images/integrations/data-visualization/powerbi_odbc_install.png'; -import powerbi_odbc_search from '@site/static/images/integrations/data-visualization/powerbi_odbc_search.png'; -import powerbi_odbc_verify from '@site/static/images/integrations/data-visualization/powerbi_odbc_verify.png'; -import powerbi_get_data from '@site/static/images/integrations/data-visualization/powerbi_get_data.png'; -import powerbi_search_clickhouse from '@site/static/images/integrations/data-visualization/powerbi_search_clickhouse.png'; -import powerbi_connect_db from '@site/static/images/integrations/data-visualization/powerbi_connect_db.png'; -import powerbi_connect_user from '@site/static/images/integrations/data-visualization/powerbi_connect_user.png'; -import powerbi_table_navigation from '@site/static/images/integrations/data-visualization/powerbi_table_navigation.png'; -import powerbi_add_dsn from '@site/static/images/integrations/data-visualization/powerbi_add_dsn.png'; -import powerbi_select_unicode from '@site/static/images/integrations/data-visualization/powerbi_select_unicode.png'; -import powerbi_connection_details from '@site/static/images/integrations/data-visualization/powerbi_connection_details.png'; -import powerbi_select_odbc from '@site/static/images/integrations/data-visualization/powerbi_select_odbc.png'; -import powerbi_select_dsn from '@site/static/images/integrations/data-visualization/powerbi_select_dsn.png'; -import powerbi_dsn_credentials from '@site/static/images/integrations/data-visualization/powerbi_dsn_credentials.png'; -import powerbi_16 from '@site/static/images/integrations/data-visualization/powerbi_16.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# Power BI - - - -Microsoft Power BIは、[ClickHouse Cloud](https://clickhouse.com/cloud)またはセルフマネージドデプロイメントからデータをクエリしたり、メモリに読み込んだりすることができます。 - -データを可視化するために使用できるPower BIにはいくつかのバリエーションがあります。 - -* Power BI Desktop: ダッシュボードやビジュアライゼーションを作成するためのWindowsデスクトップアプリケーション -* Power BI Service: Power BI Desktopで作成したダッシュボードをホストするためのSaaSとしてAzure内で利用可能 - -Power BIでは、デスクトップ版でダッシュボードを作成し、それをPower BI Serviceに公開する必要があります。 - -このチュートリアルでは、以下のプロセスについてガイドします。 - -* [ClickHouse ODBCドライバのインストール](#install-the-odbc-driver) -* [Power BI DesktopへのClickHouse Power BIコネクタのインストール](#power-bi-installation) -* [Power BI Desktopでの可視化のためにClickHouseからデータをクエリする](#query-and-visualise-data) -* [Power BI Serviceのためのオンプレミスデータゲートウェイの設定](#power-bi-service) - -## 前提条件 {#prerequisites} - -### Power BIのインストール {#power-bi-installation} - -このチュートリアルでは、WindowsマシンにMicrosoft Power BI Desktopがインストールされていることを前提としています。Power BI Desktopは[こちら](https://www.microsoft.com/en-us/download/details.aspx?id=58494)からダウンロードしてインストールできます。 - -Power BIの最新バージョンへの更新をお勧めします。ClickHouseコネクタはバージョン`2.137.751.0`からデフォルトで利用可能です。 - -### ClickHouse接続情報の収集 {#gather-your-clickhouse-connection-details} - -ClickHouseインスタンスに接続するために、以下の情報が必要です。 - -* ホスト名 - ClickHouse -* ユーザー名 - ユーザーの資格情報 -* パスワード - ユーザーのパスワード -* データベース - 接続したいインスタンスのデータベース名 - -## Power BI Desktop {#power-bi-desktop} - -Power BI Desktopでデータをクエリするために、以下のステップを完了する必要があります。 - -1. ClickHouse ODBCドライバをインストールする -2. ClickHouseコネクタを探す -3. ClickHouseに接続する -4. データをクエリして可視化する - -### ODBCドライバのインストール {#install-the-odbc-driver} - -最新の[ClickHouse ODBCリリース](https://github.com/ClickHouse/clickhouse-odbc/releases)をダウンロードします。 - -提供された`.msi`インストーラーを実行し、ウィザードに従ってください。 - - -
- -:::note -`デバッグシンボル`はオプションであり、必須ではありません。 -::: - -#### ODBCドライバの確認 {#verify-odbc-driver} - -ドライバのインストールが完了したら、次の手順でインストールが成功したかどうかを確認できます。 - -スタートメニューでODBCを検索し、「ODBCデータソース **(64ビット)**」を選択します。 - - -
- -ClickHouseドライバが一覧に表示されていることを確認します。 - - -
- -### ClickHouseコネクタを探す {#find-the-clickhouse-connector} - -:::note -Power BI Desktopのバージョン`2.137.751.0`で利用可能 -::: -Power BI Desktopのスタート画面で「データを取得」をクリックします。 - - -
- -「ClickHouse」を検索します。 - - -
- -### ClickHouseに接続する {#connect-to-clickhouse} - -コネクタを選択し、ClickHouseインスタンスの資格情報を入力します: - -* ホスト(必須) - インスタンスのドメイン/アドレス。接頭辞/接尾辞なしで追加してください。 -* ポート(必須) - インスタンスのポート。 -* データベース - データベース名。 -* オプション - [ClickHouse ODBC GitHubページ](https://github.com/ClickHouse/clickhouse-odbc#configuration)にリストされている任意のODBCオプション。 -* データ接続モード - DirectQuery - - -
- -:::note -ClickHouseに直接クエリを行うためにDirectQueryを選択することをお勧めします。 - -データの量が少ないユースケースがある場合には、インポートモードを選択すると、すべてのデータがPower BIに読み込まれます。 -::: - -* ユーザー名とパスワードを指定します。 - - -
- -### データをクエリして可視化する {#query-and-visualise-data} - -最後に、ナビゲータビューにデータベースとテーブルが表示されるはずです。目的のテーブルを選択して「読み込む」をクリックし、ClickHouseからデータをインポートします。 - - -
- -インポートが完了すると、ClickHouseデータは通常通りPower BIでアクセス可能になります。 -
- -## Power BI Service {#power-bi-service} - -Microsoft Power BI Serviceを使用するには、[オンプレミスデータゲートウェイ](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-onprem)を作成する必要があります。 - -カスタムコネクタを設定する方法の詳細については、Microsoftのドキュメントを参照してください。[オンプレミスデータゲートウェイでカスタムデータコネクタを使用する方法](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-custom-connectors)をご覧ください。 - -## ODBCドライバ(インポートのみ) {#odbc-driver-import-only} - -DirectQueryを使用するClickHouseコネクタの利用をお勧めします。 - -オンプレミスデータゲートウェイインスタンスに[ODBCドライバ](#install-the-odbc-driver)をインストールし、上記の手順に従って[確認](#verify-odbc-driver)します。 - -### 新しいユーザーDSNを作成する {#create-a-new-user-dsn} - -ドライバのインストールが完了すると、ODBCデータソースを作成できます。スタートメニューでODBCを検索し、「ODBCデータソース (64ビット)」を選択します。 - - -
- -ここに新しいユーザーDSNを追加する必要があります。左側の「追加」ボタンをクリックします。 - - -
- -ODBCドライバのUnicode版を選択します。 - - -
- -接続情報を入力します。 - - -
- -:::note -SSLが有効なデプロイ(例:ClickHouse Cloudまたはセルフマネージドインスタンス)を使用している場合は、`SSLMode`フィールドに`require`を指定する必要があります。 - -- `Host`には常にプロトコル(`http://`または`https://`)を省略する必要があります。 -- `Timeout`は秒で表される整数です。デフォルト値:`30秒`。 -::: - -### Power BIにデータを取得する {#get-data-into-power-bi} - -まだPower BIをインストールしていない場合は、[Power BI Desktopをダウンロードしてインストール](https://www.microsoft.com/en-us/download/details.aspx?id=58494)します。 - -Power BI Desktopのスタート画面で「データを取得」をクリックします。 - - -
- -「その他」->「ODBC」を選択します。 - - -
- -リストから前に作成したデータソースを選択します。 - - -
- -:::note -データソース作成時に資格情報を指定しなかった場合、ユーザー名とパスワードを指定するように求められます。 -::: - - -
- -最後に、ナビゲータビューにデータベースとテーブルが表示されるはずです。目的のテーブルを選択して「読み込む」をクリックし、ClickHouseからデータをインポートします。 - - -
- -インポートが完了すると、ClickHouseデータは通常通りPower BIでアクセス可能になります。 - -## 既知の制限事項 {#known-limitations} - -### UInt64 {#uint64} - -UInt64やそれ以上の符号なし整数型は、自動的にデータセットに読み込まれないため、Int64がPower BIによってサポートされる最大の整数型です。 - -:::note -データを正しくインポートするには、ナビゲータで「読み込む」ボタンを押す前に「データを変換」をクリックしてください。 -::: - -この例では、`pageviews`テーブルにはUInt64カラムがあり、デフォルトでは「バイナリ」として認識されます。 -「データの変換」をクリックするとPower Query Editorが開き、カラムの型を再割り当てすることができ、例えば、テキストとして設定できます。 - - -
- -完了したら、左上隅の「閉じて適用」をクリックし、データの読み込みを続けます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md.hash deleted file mode 100644 index 53d3b7dafd1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -a74fbed001f84722 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md deleted file mode 100644 index 19416bf4925..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -sidebar_label: 'QuickSight' -slug: '/integrations/quicksight' -keywords: -- 'clickhouse' -- 'aws' -- 'amazon' -- 'QuickSight' -- 'mysql' -- 'connect' -- 'integrate' -- 'ui' -description: 'Amazon QuickSightは、統合されたビジネスインテリジェンス(BI)でデータ駆動型の組織を支援します。' -title: 'QuickSight' ---- - -import MySQLOnPremiseSetup from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx'; -import Image from '@theme/IdealImage'; -import quicksight_01 from '@site/static/images/integrations/data-visualization/quicksight_01.png'; -import quicksight_02 from '@site/static/images/integrations/data-visualization/quicksight_02.png'; -import quicksight_03 from '@site/static/images/integrations/data-visualization/quicksight_03.png'; -import quicksight_04 from '@site/static/images/integrations/data-visualization/quicksight_04.png'; -import quicksight_05 from '@site/static/images/integrations/data-visualization/quicksight_05.png'; -import quicksight_06 from '@site/static/images/integrations/data-visualization/quicksight_06.png'; -import quicksight_07 from '@site/static/images/integrations/data-visualization/quicksight_07.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# QuickSight - - - -QuickSightは、公式のMySQLデータソースとDirect Queryモードを使用して、オンプレミスのClickHouseセットアップ (23.11以上) にMySQLインターフェースで接続できます。 - -## オンプレミスのClickHouseサーバー設定 {#on-premise-clickhouse-server-setup} - -ClickHouseサーバーをMySQLインターフェースで設定する方法については、[公式ドキュメント](/interfaces/mysql) を参照してください。 - -サーバーの `config.xml` にエントリを追加することに加えて、 - -```xml - - 9004 - -``` - -MySQLインターフェースを使用するユーザーに対して、[Double SHA1パスワード暗号化](/operations/settings/settings-users#user-namepassword) を使用することが**必須**です。 - -シェルからDouble SHA1で暗号化されたランダムパスワードを生成するには: - -```shell -PASSWORD=$(base64 < /dev/urandom | head -c16); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' -``` - -出力は以下のようになります。 - -```text -LZOQYnqQN4L/T6L0 -fbc958cc745a82188a51f30de69eebfc67c40ee4 -``` - -最初の行は生成されたパスワードで、2行目はClickHouseの設定に使用できるハッシュです。 - -以下は、生成されたハッシュを使用した `mysql_user` の設定例です。 - -`/etc/clickhouse-server/users.d/mysql_user.xml` - -```xml - - - fbc958cc745a82188a51f30de69eebfc67c40ee4 - - ::/0 - - default - default - - -``` - -`password_double_sha1_hex` のエントリを、生成したDouble SHA1ハッシュと置き換えてください。 - -QuickSightは、MySQLユーザーのプロファイルにいくつかの追加設定を要求します。 - -`/etc/clickhouse-server/users.d/mysql_user.xml` - -```xml - - - 1 - 1 - 1 - - -``` - -ただし、デフォルトのプロファイルではなく、MySQLユーザーが使用できる別のプロファイルに割り当てることをお勧めします。 - -最後に、クリックハウスサーバーを希望のIPアドレスでリッスンするように構成します。 -`config.xml` で、すべてのアドレスでリッスンするように以下の行のコメントアウトを外します。 - -```bash -:: -``` - -`mysql` バイナリが利用可能であれば、コマンドラインから接続をテストできます。 -上記のサンプルユーザー名 (`mysql_user`) とパスワード (`LZOQYnqQN4L/T6L0`) を使用した場合、コマンドラインは以下のようになります。 - -```bash -mysql --protocol tcp -h localhost -u mysql_user -P 9004 --password=LZOQYnqQN4L/T6L0 -``` - -```response -mysql> show databases; -+--------------------+ -| name | -+--------------------+ -| INFORMATION_SCHEMA | -| default | -| information_schema | -| system | -+--------------------+ -4 rows in set (0.00 sec) -Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. -``` - -## QuickSightをClickHouseに接続する {#connecting-quicksight-to-clickhouse} - -最初に、 https://quicksight.aws.amazon.com にアクセスし、データセットに移動して「新しいデータセット」をクリックします。 - - -
- -QuickSightにバンドルされている公式のMySQLコネクタを検索します(名称は**MySQL**)。 - - -
- -接続詳細を指定します。MySQLインターフェースポートはデフォルトで9004ですが、サーバー構成によって異なる場合があります。 - - -
- -ClickHouseからデータを取得する方法として、2つの選択肢があります。まずは、リストからテーブルを選択できます。 - - -
- -あるいは、カスタムSQLを指定してデータを取得することもできます。 - - -
- -「データを編集/プレビュー」をクリックすると、テーブルの構造を確認したり、カスタムSQLを調整したりできます。 - - -
- -UIの左下隅で「Direct Query」モードが選択されていることを確認します。 - - -
- -これで、データセットを公開し、新しい視覚化を作成することができます! - -## 知られている制限事項 {#known-limitations} - -- SPICEインポートは期待通りに動作しません。かわりにDirect Queryモードを使用してください。詳細は [#58553](https://github.com/ClickHouse/ClickHouse/issues/58553) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md.hash deleted file mode 100644 index 2f8b4c1f799..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -73edf2d87d7b9b83 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md deleted file mode 100644 index 6ff414d1665..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -sidebar_label: 'Rocket BI' -sidebar_position: 131 -slug: '/integrations/rocketbi' -keywords: -- 'clickhouse' -- 'RocketBI' -- 'connect' -- 'integrate' -- 'ui' -description: 'RocketBI is a self-service business intelligence platform that helps - you quickly analyze data, build drag-n-drop visualizations and collaborate with - colleagues right on your web browser.' -title: 'GOAL: BUILD YOUR 1ST DASHBOARD' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import rocketbi_01 from '@site/static/images/integrations/data-visualization/rocketbi_01.gif'; -import rocketbi_02 from '@site/static/images/integrations/data-visualization/rocketbi_02.gif'; -import rocketbi_03 from '@site/static/images/integrations/data-visualization/rocketbi_03.png'; -import rocketbi_04 from '@site/static/images/integrations/data-visualization/rocketbi_04.png'; -import rocketbi_05 from '@site/static/images/integrations/data-visualization/rocketbi_05.png'; -import rocketbi_06 from '@site/static/images/integrations/data-visualization/rocketbi_06.png'; -import rocketbi_07 from '@site/static/images/integrations/data-visualization/rocketbi_07.png'; -import rocketbi_08 from '@site/static/images/integrations/data-visualization/rocketbi_08.png'; -import rocketbi_09 from '@site/static/images/integrations/data-visualization/rocketbi_09.png'; -import rocketbi_10 from '@site/static/images/integrations/data-visualization/rocketbi_10.png'; -import rocketbi_11 from '@site/static/images/integrations/data-visualization/rocketbi_11.png'; -import rocketbi_12 from '@site/static/images/integrations/data-visualization/rocketbi_12.png'; -import rocketbi_13 from '@site/static/images/integrations/data-visualization/rocketbi_13.png'; -import rocketbi_14 from '@site/static/images/integrations/data-visualization/rocketbi_14.png'; -import rocketbi_15 from '@site/static/images/integrations/data-visualization/rocketbi_15.png'; -import rocketbi_16 from '@site/static/images/integrations/data-visualization/rocketbi_16.png'; -import rocketbi_17 from '@site/static/images/integrations/data-visualization/rocketbi_17.png'; -import rocketbi_18 from '@site/static/images/integrations/data-visualization/rocketbi_18.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ゴール: 最初のダッシュボードを構築する - - - -このガイドでは、Rocket.BIを使用してシンプルなダッシュボードをインストールして構築します。 -これがダッシュボードです: - - -
- -[このリンクからダッシュボードをチェックできます。](https://demo.rocket.bi/dashboard/sales-dashboard-7?token=7eecf750-cbde-4c53-8fa8-8b905fec667e) - -## インストール {#install} - -あらかじめ用意されたDockerイメージを使用してRocket.BIを起動します。 - -docker-compose.ymlと設定ファイルを取得します: - -```bash -wget https://raw.githubusercontent.com/datainsider-co/rocket-bi/main/docker/docker-compose.yml -wget https://raw.githubusercontent.com/datainsider-co/rocket-bi/main/docker/.clickhouse.env -``` -.clickhouse.envを編集し、ClickHouseサーバー情報を追加します。 - -次のコマンドを実行してRocket.BIを起動します: ``` docker-compose up -d . ``` - -ブラウザを開き、```localhost:5050```にアクセスし、このアカウントでログインします: ```hello@gmail.com/123456``` - -ソースからビルドしたり、詳細な設定を行ったりする場合は、こちらを確認できます: [Rocket.BI Readme](https://github.com/datainsider-co/rocket-bi/blob/main/README.md) - -## ダッシュボードの構築を始めましょう {#lets-build-the-dashboard} - -ダッシュボードでは、レポートを見つけ、**+新規**をクリックして視覚化を開始します。 - -**無限のダッシュボード**を構築し、ダッシュボードに**無限のチャート**を描くことができます。 - - -
- -Youtubeでの高解像度チュートリアルを参照してください: [https://www.youtube.com/watch?v=TMkdMHHfvqY](https://www.youtube.com/watch?v=TMkdMHHfvqY) - -### チャートコントロールを構築する {#build-the-chart-controls} - -#### メトリックコントロールを作成する {#create-a-metrics-control} -タブフィルターで使用したいメトリックフィールドを選択します。集計設定を確認することを忘れないでください。 - - -
- -フィルターの名前を変更し、ダッシュボードにコントロールを保存します。 - - - -#### 日付タイプのコントロールを作成する {#create-a-date-type-control} -メイン日付カラムとして日付フィールドを選択します: - - -
- -異なるルックアップ範囲を持つ重複バリアントを追加します。例えば、年、月次、日次の日付や曜日。 - - -
- -フィルターの名前を変更し、ダッシュボードにコントロールを保存します。 - - - -### さあ、チャートを構築しましょう {#now-let-build-the-charts} - -#### 円グラフ: 地域別売上メトリックス {#pie-chart-sales-metrics-by-regions} -新しいチャートを追加して、円グラフを選択します。 - - -
- -まず、データセットから"Region"カラムをドラッグ&ドロップしてレジェンドフィールドに追加します。 - - -
- -次に、チャートコントロールタブに移動します。 - - -
- -メトリックコントロールを値フィールドにドラッグ&ドロップします。 - - -
- -(メトリックコントロールをソートとして使用することもできます) - -チャート設定に移動してさらなるカスタマイズを行います。 - - -
- -例えば、データラベルをパーセンテージに変更します。 - - -
- -チャートを保存してダッシュボードに追加します。 - - - -#### 時系列チャートで日付コントロールを使用する {#use-date-control-in-a-time-series-chart} -スタックカラムチャートを使用します。 - - -
- -チャートコントロールで、Y軸にメトリックコントロールを、X軸に日付範囲を使用します。 - - -
- -地域カラムをブレイクダウンに追加します。 - - -
- -KPIとして数字チャートを追加し、ダッシュボードをグレードアップします。 - - -
- -これで、あなたはRocket.BIで最初のダッシュボードを成功裏に構築しました。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md.hash deleted file mode 100644 index 49724651fb6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/rocketbi-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -69dabb993bc3c5d0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md deleted file mode 100644 index 4c867fdea7f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -sidebar_label: 'Splunk' -sidebar_position: 198 -slug: '/integrations/splunk' -keywords: -- 'Splunk' -- 'integration' -- 'data visualization' -description: 'Connect Splunk dashboards to ClickHouse' -title: 'Connecting Splunk to ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import splunk_1 from '@site/static/images/integrations/splunk/splunk-1.png'; -import splunk_2 from '@site/static/images/integrations/splunk/splunk-2.png'; -import splunk_3 from '@site/static/images/integrations/splunk/splunk-3.png'; -import splunk_4 from '@site/static/images/integrations/splunk/splunk-4.png'; -import splunk_5 from '@site/static/images/integrations/splunk/splunk-5.png'; -import splunk_6 from '@site/static/images/integrations/splunk/splunk-6.png'; -import splunk_7 from '@site/static/images/integrations/splunk/splunk-7.png'; -import splunk_8 from '@site/static/images/integrations/splunk/splunk-8.png'; -import splunk_9 from '@site/static/images/integrations/splunk/splunk-9.png'; -import splunk_10 from '@site/static/images/integrations/splunk/splunk-10.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# SplunkをClickHouseに接続する - - - -Splunkは、セキュリティとオブザーバビリティのための人気のある技術です。また、強力な検索とダッシュボードエンジンでもあります。さまざまなユースケースに対応する数百のSplunkアプリがあります。 - -特にClickHouseの場合、非常に高性能なClickHouse JDBCドライバを使用して、ClickHouse内のテーブルを直接クエリできるシンプルな統合を提供する[Splunk DB Connect App](https://splunkbase.splunk.com/app/2686)を活用しています。 - -この統合の理想的なユースケースは、NetFlow、AvroまたはProtobufバイナリデータ、DNS、VPCフローログ、その他のOTELログといった大規模なデータソースにClickHouseを使用している時です。データはSplunkのインデックス層には取り込まれず、[Metabase](https://www.metabase.com/)や[Superset](https://superset.apache.org/)などの他のビジュアライゼーション統合と同様に、ClickHouseから直接クエリされます。 - -## 目的​ {#goal} - -このガイドでは、ClickHouse JDBCドライバを使用してClickHouseをSplunkに接続します。ローカルのSplunk Enterpriseをインストールしますが、データはインデックスしません。代わりに、DB Connectクエリエンジンを介して検索機能を使用しています。 - -このガイドを使えば、次のようなClickHouseに接続したダッシュボードを作成できるようになります: - - - -:::note -このガイドでは[ニューヨーク市のタクシーデータセット](/getting-started/example-datasets/nyc-taxi)を使用しています。他にも[私たちのドキュメント](http://localhost:3000/docs/getting-started/example-datasets)から使用できるデータセットがたくさんあります。 -::: - -## 前提条件 {#prerequisites} - -始める前に、以下が必要です: -- 検索ヘッド機能を使用するためのSplunk Enterprise -- OSまたはコンテナにインストールされた[Java Runtime Environment (JRE)](https://docs.splunk.com/Documentation/DBX/3.16.0/DeployDBX/Prerequisites)要件 -- [Splunk DB Connect](https://splunkbase.splunk.com/app/2686) -- Splunk Enterprise OSインスタンスへの管理者またはSSHアクセス -- ClickHouse接続詳細(ClickHouse Cloudを使用している場合は[こちら](/integrations/metabase#1-gather-your-connection-details)を参照) - -## Splunk EnterpriseにDB Connectをインストールして構成する {#install-and-configure-db-connect-on-splunk-enterprise} - -まず、Splunk EnterpriseインスタンスにJava Runtime Environmentをインストールする必要があります。Dockerを使用している場合は、`microdnf install java-11-openjdk`コマンドを使用できます。 - -`java_home`パスをメモしてください:`java -XshowSettings:properties -version`。 - -DB Connect AppがSplunk Enterpriseにインストールされていることを確認してください。Splunk Web UIのアプリセクションで見つけることができます: -- Splunk Webにログインし、Apps > Find More Appsに移動 -- 検索ボックスを使用してDB Connectを検索 -- Splunk DB Connectの横にある緑の「インストール」ボタンをクリック -- 「Splunkを再起動」をクリック - -DB Connect Appのインストールに問題がある場合は、追加の手順について[こちらのリンク](https://splunkbase.splunk.com/app/2686)を参照してください。 - -DB Connect Appがインストールされていることを確認したら、DB Connect AppのConfiguration -> Settingsにjava_homeパスを追加し、保存してリセットをクリックします。 - - - -## ClickHouse用にJDBCを構成する {#configure-jdbc-for-clickhouse} - -[ClickHouse JDBCドライバ](https://github.com/ClickHouse/clickhouse-java)をDB Connect Driversフォルダにダウンロードします。例えば: - -```bash -$SPLUNK_HOME/etc/apps/splunk_app_db_connect/drivers -``` - -次に、接続タイプ構成を編集し、ClickHouse JDBCドライバクラスの詳細を`$SPLUNK_HOME/etc/apps/splunk_app_db_connect/default/db_connection_types.conf`に追加します。 - -次のスタンザをファイルに追加します: - -```text -[ClickHouse] -displayName = ClickHouse -serviceClass = com.splunk.dbx2.DefaultDBX2JDBC -jdbcUrlFormat = jdbc:ch://:/ -jdbcUrlSSLFormat = jdbc:ch://:/?ssl=true -jdbcDriverClass = com.clickhouse.jdbc.ClickHouseDriver -ui_default_catalog = $database$ -``` - -`$SPLUNK_HOME/bin/splunk restart`を使用してSplunkを再起動します。 - -再度DB Connect Appに移動し、Configuration > Settings > Driversに移動します。ClickHouseの横に緑のチェックマークが表示されるはずです: - - - -## Splunk検索をClickHouseに接続する {#connect-splunk-search-to-clickhouse} - -DB Connect App Configuration -> Databases -> Identitiesに移動し、ClickHouse用のアイデンティティを作成します。 - -Configuration -> Databases -> ConnectionsからClickHouseへの新しい接続を作成し、「新しい接続」を選択します。 - - - -
- -ClickHouseホストの詳細を追加し、「SSLを有効にする」がチェックされていることを確認します: - - - -接続を保存した後、ClickHouseにSplunkを接続できたことになります! - -:::note -エラーが発生した場合は、SplunkインスタンスのIPアドレスをClickHouse Cloud IPアクセスリストに追加したことを確認してください。詳細については[ドキュメント](/cloud/security/setting-ip-filters)を参照してください。 -::: - -## SQLクエリを実行する {#run-a-sql-query} - -すべてが正常に機能するか確認するために、SQLクエリを実行します。 - -DB Connect AppのDataLabセクションから接続詳細をSQLエクスプローラーで選択します。このデモでは`trips`テーブルを使用しています: - - - -`trips`テーブルのすべてのレコードの数を返すSQLクエリを実行します: - - - -クエリが成功すれば、結果が表示されるはずです。 - -## ダッシュボードを作成する {#create-a-dashboard} - -SQLと強力なSplunk Processing Language (SPL)を組み合わせたダッシュボードを作成しましょう。 - -続行する前に、最初に[SPL保護を無効にする](https://docs.splunk.com/Documentation/Splunk/9.2.1/Security/SPLsafeguards?ref=hk#Deactivate_SPL_safeguards)必要があります。 - -最も頻繁にピックアップされる上位10の地域を示す以下のクエリを実行します: - -```sql -dbxquery query="SELECT pickup_ntaname, count(*) AS count -FROM default.trips GROUP BY pickup_ntaname -ORDER BY count DESC LIMIT 10;" connection="chc" -``` - -ビジュアライゼーションタブを選択して作成されたカラムチャートを表示します: - - - -「名前を付けて保存」>「ダッシュボードに保存」をクリックしてダッシュボードを作成します。 - -乗客数に基づいて平均料金を示す別のクエリを追加します。 - -```sql -dbxquery query="SELECT passenger_count,avg(total_amount) -FROM default.trips GROUP BY passenger_count;" connection="chc" -``` - -今回はバー チャートビジュアライゼーションを作成し、以前のダッシュボードに保存します。 - - - -最後に、乗客数と移動距離の相関関係を示すもう1つのクエリを追加します: - -```sql -dbxquery query="SELECT passenger_count, toYear(pickup_datetime) AS year, -round(trip_distance) AS distance, count(* FROM default.trips) -GROUP BY passenger_count, year, distance -ORDER BY year, count(*) DESC; " connection="chc" -``` - -最終的なダッシュボードは次のようになります: - - - -## 時系列データ {#time-series-data} - -Splunkには、ダッシュボードが時系列データの視覚化とプレゼンテーションに使用できる数百の組み込み関数があります。この例では、SQL + SPLを組み合わせて、Splunkの時系列データで機能するクエリを作成します。 - -```sql -dbxquery query="SELECT time, orig_h, duration -FROM "demo"."conn" WHERE time >= now() - interval 1 HOURS" connection="chc" -| eval time = strptime(time, "%Y-%m-%d %H:%M:%S.%3Q") -| eval _time=time -| timechart avg(duration) as duration by orig_h -| eval duration=round(duration/60) -| sort - duration: -``` - -## 詳細を学ぶ {#learn-more} - -Splunk DB Connectとダッシュボードを作成する方法について詳しく知りたい場合は、[Splunkドキュメント](https://docs.splunk.com/Documentation)を訪問してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md.hash deleted file mode 100644 index 322f552113b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -e3caa028ebab3bac diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md deleted file mode 100644 index 2e77aee6640..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -sidebar_label: 'Superset' -sidebar_position: 198 -slug: '/integrations/superset' -keywords: -- 'clickhouse' -- 'superset' -- 'connect' -- 'integrate' -- 'ui' -description: 'Apache Superset is an open-source data exploration and visualization - platform.' -title: 'Connect Superset to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import superset_01 from '@site/static/images/integrations/data-visualization/superset_01.png'; -import superset_02 from '@site/static/images/integrations/data-visualization/superset_02.png'; -import superset_03 from '@site/static/images/integrations/data-visualization/superset_03.png'; -import superset_04 from '@site/static/images/integrations/data-visualization/superset_04.png'; -import superset_05 from '@site/static/images/integrations/data-visualization/superset_05.png'; -import superset_06 from '@site/static/images/integrations/data-visualization/superset_06.png'; -import superset_08 from '@site/static/images/integrations/data-visualization/superset_08.png'; -import superset_09 from '@site/static/images/integrations/data-visualization/superset_09.png'; -import superset_10 from '@site/static/images/integrations/data-visualization/superset_10.png'; -import superset_11 from '@site/static/images/integrations/data-visualization/superset_11.png'; -import superset_12 from '@site/static/images/integrations/data-visualization/superset_12.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# SupersetをClickHouseに接続する - - - -Apache Supersetは、Pythonで書かれたオープンソースのデータ探索および可視化プラットフォームです。Supersetは、ClickHouseによって提供されるPythonドライバーを使用してClickHouseに接続します。どのように機能するか見てみましょう... - -## 目標 {#goal} - -このガイドでは、ClickHouseデータベースからのデータを使ってSupersetでダッシュボードを作成します。ダッシュボードは次のようになります。 - - -
- -:::tip データを追加する -作業するデータセットがない場合は、いずれかの例を追加できます。このガイドでは[UK Price Paid](/getting-started/example-datasets/uk-price-paid.md)データセットを使用しているので、それを選択することもできます。同じドキュメントカテゴリー内には他にもいくつかの例があります。 -::: - -## 1. 接続情報を集める {#1-gather-your-connection-details} - - -## 2. ドライバーをインストールする {#2-install-the-driver} - -1. SupersetはClickHouseに接続するために`clickhouse-connect`ドライバーを使用します。`clickhouse-connect`の詳細はhttps://pypi.org/project/clickhouse-connect/で確認でき、次のコマンドでインストールできます: - - ```console - pip install clickhouse-connect - ``` - -2. Supersetを起動(または再起動)します。 - -## 3. SupersetをClickHouseに接続する {#3-connect-superset-to-clickhouse} - -1. Superset内で、上部メニューから**Data**を選択し、ドロップダウンメニューから**Databases**を選択します。**+ Database**ボタンをクリックして新しいデータベースを追加します: - - -
- -2. 最初のステップでは、データベースのタイプとして**ClickHouse Connect**を選択します: - - -
- -3. 二番目のステップでは: - - SSLをオンまたはオフに設定します。 - - 以前に収集した接続情報を入力します。 - - **DISPLAY NAME**を指定します:これは任意の名前で構いません。他の複数のClickHouseデータベースに接続する場合は、より説明的な名前を付けてください。 - - -
- -4. **CONNECT**ボタンをクリックし、次に**FINISH**ボタンをクリックしてセットアップウィザードを完了すると、データベースのリストにデータベースが表示されるはずです。 - -## 4. データセットを追加する {#4-add-a-dataset} - -1. SupersetでClickHouseデータと対話するには、**_dataset_**を定義する必要があります。Supersetの上部メニューから**Data**を選択し、ドロップダウンメニューから**Datasets**を選択します。 - -2. データセットを追加するボタンをクリックします。データソースとして新しいデータベースを選択すると、データベースに定義されているテーブルが表示されます: - - -
- -3. ダイアログウィンドウの底部にある**ADD**ボタンをクリックすると、テーブルがデータセットのリストに表示されます。これでダッシュボードを作成し、ClickHouseデータを分析する準備が整いました! - -## 5. Supersetでのチャートとダッシュボードを作成する {#5--creating-charts-and-a-dashboard-in-superset} - -Supersetに慣れている方は、次のセクションをすぐに理解できるでしょう。Supersetが初めての方は...世界の他の多くのクールな可視化ツールのように、始めるのにそれほど時間はかかりませんが、詳細やニュアンスは使用しながら学んでいくことになります。 - -1. ダッシュボードから始めます。Supersetの上部メニューから**Dashboards**を選択します。右上のボタンをクリックして新しいダッシュボードを追加します。次のダッシュボードは**UK property prices**と名付けられています: - - -
- -2. 新しいチャートを作成するには、上部メニューから**Charts**を選択し、新しいチャートを追加するボタンをクリックします。多くのオプションが表示されます。次の例は、**CHOOSE A DATASET**ドロップダウンから**uk_price_paid**データセットを使用している**Pie Chart**です: - - -
- -3. Supersetの円グラフには**Dimension**と**Metric**が必要で、残りの設定はオプションです。次元とメトリックには独自のフィールドを選択できます。この例では、ClickHouseフィールド`district`を次元、`AVG(price)`をメトリックとして使用しています。 - - - -
- -5. 円グラフよりもドーナツチャートを好む場合は、**CUSTOMIZE**の下でそれや他のオプションを設定できます: - - -
- -6. **SAVE**ボタンをクリックしてチャートを保存し、次に**ADD TO DASHBOARD**ドロップダウンから**UK property prices**を選択し、**SAVE & GO TO DASHBOARD**を選択してチャートを保存しダッシュボードに追加します: - - -
- -7. 以上です。ClickHouseのデータに基づいてSupersetでダッシュボードを構築することで、高速なデータ分析の新しい世界が広がります! - - -
- -## 関連コンテンツ {#related-content} - -- ブログ: [ClickHouseでのデータの可視化 - パート2 - Superset](https://clickhouse.com/blog/visualizing-data-with-superset) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md.hash deleted file mode 100644 index 70092763b95..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -e49f8de08272ea75 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md deleted file mode 100644 index 86cd273d2b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -sidebar_label: 'Analysis Tips' -sidebar_position: 4 -slug: '/integrations/tableau/analysis-tips' -keywords: -- 'clickhouse' -- 'tableau' -- 'online' -- 'mysql' -- 'connect' -- 'integrate' -- 'ui' -description: 'ClickHouse 公式コネクタを使用する際のTableau解析のヒント。' -title: 'Analysis tips' ---- - - - - -# 分析のヒント -## MEDIAN() および PERCENTILE() 関数 {#median-and-percentile-functions} -- ライブモードでは、MEDIAN() および PERCENTILE() 関数(コネクタ v0.1.3 リリース以降)は [ClickHouse quantile()() 関数](/sql-reference/aggregate-functions/reference/quantile/) を使用し、計算速度が大幅に向上しますが、サンプリングを利用します。正確な計算結果が必要な場合は、`MEDIAN_EXACT()` および `PERCENTILE_EXACT()` 関数を使用してください([quantileExact()()](/sql-reference/aggregate-functions/reference/quantileexact/) に基づく)。 -- エクストラクトモードでは、MEDIAN_EXACT() および PERCENTILE_EXACT() を使用できません。なぜなら、MEDIAN() および PERCENTILE() は常に正確(そして遅い)だからです。 -## ライブモードでの計算フィールドのための追加関数 {#additional-functions-for-calculated-fields-in-live-mode} -ClickHouse にはデータ分析に使用できる関数が多数あり、Tableau がサポートしているものよりも遥かに多くあります。ユーザーの便宜のために、計算フィールドを作成する際にライブモードで使用可能な新しい関数を追加しました。残念ながら、Tableau インターフェイスにはこれらの関数の説明を追加することができないため、ここに説明を追加します。 -- **[`-If` 集約コンビネータ](/sql-reference/aggregate-functions/combinators/#-if)** *(v0.2.3 に追加)* - 集約計算内に行レベルのフィルターを持つことができます。`SUM_IF(), AVG_IF(), COUNT_IF(), MIN_IF() & MAX_IF()` 関数が追加されました。 -- **`BAR([my_int], [min_val_int], [max_val_int], [bar_string_length_int])`** *(v0.2.1 に追加)* — 退屈な棒グラフは忘れましょう!代わりに `BAR()` 関数を使用してください(ClickHouse の [`bar()`](/sql-reference/functions/other-functions#bar) と同等)。例えば、この計算フィールドは文字列として美しい棒を返します: - ```text - BAR([my_int], [min_val_int], [max_val_int], [bar_string_length_int]) + " " + FORMAT_READABLE_QUANTITY([my_int]) - ``` - ```text - == BAR() == - ██████████████████▊ 327.06 million - █████ 88.02 million - ███████████████ 259.37 million - ``` -- **`COUNTD_UNIQ([my_field])`** *(v0.2.0 に追加)* — 引数の異なる値の近似数を計算します。[uniq()](/sql-reference/aggregate-functions/reference/uniq/) と同等。`COUNTD()` よりもはるかに高速です。 -- **`DATE_BIN('day', 10, [my_datetime_or_date])`** *(v0.2.1 に追加)* — ClickHouse の [`toStartOfInterval()`](/sql-reference/functions/date-time-functions#tostartofinterval) と同等。与えられたインターバルに基づいて日付または日付と時間を切り捨てます。例えば: - ```text - == my_datetime_or_date == | == DATE_BIN('day', 10, [my_datetime_or_date]) == - 28.07.2004 06:54:50 | 21.07.2004 00:00:00 - 17.07.2004 14:01:56 | 11.07.2004 00:00:00 - 14.07.2004 07:43:00 | 11.07.2004 00:00:00 - ``` -- **`FORMAT_READABLE_QUANTITY([my_integer])`** *(v0.2.1 に追加)* — 接尾辞(千、百万、十億など)を伴った丸めた数値を文字列として返します。これは人間が大きな数を読むのに便利です。[`formatReadableQuantity()`](/sql-reference/functions/other-functions#formatreadablequantity) と同等。 -- **`FORMAT_READABLE_TIMEDELTA([my_integer_timedelta_sec], [optional_max_unit])`** *(v0.2.1 に追加)* — 秒単位の時間差を受け取ります。文字列として (年、月、日、時間、分、秒) の時間差を返します。`optional_max_unit` は表示する最大単位です。受け入れ可能な値:`seconds`, `minutes`, `hours`, `days`, `months`, `years`。[`formatReadableTimeDelta()`](/sql-reference/functions/other-functions/#formatreadabletimedelta) と同等。 -- **`GET_SETTING([my_setting_name])`** *(v0.2.1 に追加)* — カスタム設定の現在の値を返します。[`getSetting()`](/sql-reference/functions/other-functions#getsetting) と同等。 -- **`HEX([my_string])`** *(v0.2.1 に追加)* — 引数の16進数表現を含む文字列を返します。[`hex()`](/sql-reference/functions/encoding-functions/#hex) と同等。 -- **`KURTOSIS([my_number])`** — 数列のサンプル尖度を計算します。[`kurtSamp()`](/sql-reference/aggregate-functions/reference/kurtsamp) と同等。 -- **`KURTOSISP([my_number])`** — 数列の尖度を計算します。[`kurtPop()`](/sql-reference/aggregate-functions/reference/kurtpop) と同等。 -- **`MEDIAN_EXACT([my_number])`** *(v0.1.3 に追加)* — 数値データのシーケンスの中央値を正確に計算します。[`quantileExact(0.5)(...)`](/sql-reference/aggregate-functions/reference/quantileexact/#quantileexact) と同等。 -- **`MOD([my_number_1], [my_number_2])`** — 割り算の余りを計算します。引数が浮動小数点数である場合、小数部分を切り捨てて整数に変換されます。[`modulo()`](/sql-reference/functions/arithmetic-functions/#modulo) と同等。 -- **`PERCENTILE_EXACT([my_number], [level_float])`** *(v0.1.3 に追加)* — 数値データシーケンスのパーセンタイルを正確に計算します。推奨されるレベル範囲は [0.01, 0.99] です。[`quantileExact()()`](/sql-reference/aggregate-functions/reference/quantileexact/#quantileexact) と同等。 -- **`PROPER([my_string])`** *(v0.2.5 に追加)* - 各単語の最初の文字を大文字にし、それ以外の文字は小文字に変換します。スペースや句読点などの非アルファベット文字も区切りとして作用します。例えば: - ```text - PROPER("PRODUCT name") => "Product Name" - ``` - ```text - PROPER("darcy-mae") => "Darcy-Mae" - ``` -- **`RAND()`** *(v0.2.1 に追加)* — 整数 (UInt32) の数を返します。例えば `3446222955`。[`rand()`](/sql-reference/functions/random-functions/#rand) と同等。 -- **`RANDOM()`** *(v0.2.1 に追加)* — 非公式の [`RANDOM()`](https://kb.tableau.com/articles/issue/random-function-produces-inconsistent-results) Tableau 関数で、0と1の間の浮動小数点数を返します。 -- **`RAND_CONSTANT([optional_field])`** *(v0.2.1 に追加)* — ランダム値の定数カラムを生成します。 `{RAND()}` 固定 LOD に似ていますが、より高速です。[`randConstant()`](/sql-reference/functions/random-functions/#randconstant) と同等。 -- **`REAL([my_number])`** — フィールドを浮動小数点数 (Float64) にキャストします。詳細は [`here`](/sql-reference/data-types/decimal/#operations-and-result-type)。 -- **`SHA256([my_string])`** *(v0.2.1 に追加)* — 文字列からSHA-256ハッシュを計算し、得られたバイトセットを文字列 (FixedString) として返します。`HEX()` 関数と一緒に使うのが便利です。例えば、`HEX(SHA256([my_string]))`。[`SHA256()`](/sql-reference/functions/hash-functions#sha1-sha224-sha256-sha512-sha512_256) と同等。 -- **`SKEWNESS([my_number])`** — 数列のサンプル歪度を計算します。[`skewSamp()`](/sql-reference/aggregate-functions/reference/skewsamp) と同等。 -- **`SKEWNESSP([my_number])`** — 数列の歪度を計算します。[`skewPop()`](/sql-reference/aggregate-functions/reference/skewpop) と同等。 -- **`TO_TYPE_NAME([field])`** *(v0.2.1 に追加)* — 渡された引数の ClickHouse 型名を含む文字列を返します。[`toTypeName()`](/sql-reference/functions/other-functions#totypename) と同等。 -- **`TRUNC([my_float])`** — `FLOOR([my_float])` 関数と同じです。[`trunc()`](/sql-reference/functions/rounding-functions#truncate) と同等。 -- **`UNHEX([my_string])`** *(v0.2.1 に追加)* — `HEX()` の逆操作を行います。[`unhex()`](/sql-reference/functions/encoding-functions#unhex) と同等。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md.hash deleted file mode 100644 index df9fce7b97b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-analysis-tips.md.hash +++ /dev/null @@ -1 +0,0 @@ -8bc34a6f4e4cc4a1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md deleted file mode 100644 index ce4a2487f8f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -sidebar_label: 'Tableau Desktop' -sidebar_position: 1 -slug: '/integrations/tableau' -keywords: -- 'clickhouse' -- 'tableau' -- 'connect' -- 'integrate' -- 'ui' -description: 'TableauはClickHouseデータベースとテーブルをデータソースとして使用できます。' -title: 'Connecting Tableau to ClickHouse' ---- - -import TOCInline from '@theme/TOCInline'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import tableau_connecttoserver from '@site/static/images/integrations/data-visualization/tableau_connecttoserver.png'; -import tableau_connector_details from '@site/static/images/integrations/data-visualization/tableau_connector_details.png'; -import tableau_connector_dialog from '@site/static/images/integrations/data-visualization/tableau_connector_dialog.png'; -import tableau_newworkbook from '@site/static/images/integrations/data-visualization/tableau_newworkbook.png'; -import tableau_tpcdschema from '@site/static/images/integrations/data-visualization/tableau_tpcdschema.png'; -import tableau_workbook1 from '@site/static/images/integrations/data-visualization/tableau_workbook1.png'; -import tableau_workbook2 from '@site/static/images/integrations/data-visualization/tableau_workbook2.png'; -import tableau_workbook3 from '@site/static/images/integrations/data-visualization/tableau_workbook3.png'; -import tableau_workbook4 from '@site/static/images/integrations/data-visualization/tableau_workbook4.png'; -import tableau_workbook5 from '@site/static/images/integrations/data-visualization/tableau_workbook5.png'; -import tableau_workbook6 from '@site/static/images/integrations/data-visualization/tableau_workbook6.png'; -import tableau_workbook7 from '@site/static/images/integrations/data-visualization/tableau_workbook7.png'; - - -# TableauをClickHouseに接続する - -ClickHouseは公式のTableauコネクタを提供しており、[Tableau Exchange](https://exchange.tableau.com/products/1064)に掲載されています。このコネクタはClickHouseの高度な[JDBCドライバ](/integrations/language-clients/java/jdbc)を基にしています。 - -このコネクタを使用することで、TableauはClickHouseのデータベースやテーブルをデータソースとして統合します。この機能を有効にするには、以下のセットアップガイドに従ってください。 - - - -## 使用前に必要なセットアップ {#setup-required-prior-usage} - -1. 接続の詳細を集める - - -2. Tableau desktopをダウンロードしてインストールします。 -3. `clickhouse-tableau-connector-jdbc`の指示に従って、ClickHouse JDBCドライバの互換性のあるバージョンをダウンロードします。 - -:::note -**clickhouse-jdbc-x.x.x-shaded-all.jar** JARファイルをダウンロードしてください。現在、バージョン`0.8.X`の使用を推奨しています。 -::: - -4. JDBCドライバを以下のフォルダに保存します(OSに応じて、フォルダが存在しない場合は新しく作成できます): - - macOS: `~/Library/Tableau/Drivers` - - Windows: `C:\Program Files\Tableau\Drivers` -5. TableauでClickHouseデータソースを構成し、データビジュアライゼーションの作成を開始します! - -## TableauでClickHouseデータソースを構成する {#configure-a-clickhouse-data-source-in-tableau} - -`clickhouse-jdbc`ドライバがインストールされ、設定されたので、ClickHouseの**TPCD**データベースに接続するデータソースをTableauで定義する方法を見てみましょう。 - -1. Tableauを起動します。(既に起動している場合は再起動します。) - -2. 左側のメニューから、**サーバーに接続**セクションの**その他**をクリックします。利用可能なコネクタのリストから**ClickHouse by ClickHouse**を検索します: - - -
- -:::note -コネクタのリストに**ClickHouse by ClickHouse**が表示されないですか?それは古いTableau Desktopバージョンに関連している可能性があります。それを解決するには、Tableau Desktopアプリケーションをアップグレードするか、[コネクタを手動でインストールする](#install-the-connector-manually)ことを検討してください。 -::: - -3. **ClickHouse by ClickHouse**をクリックすると、以下のダイアログがポップアップします: - - -
- -4. **インストールしてTableauを再起動**をクリックします。アプリケーションを再起動します。 -5. 再起動後、コネクタは完全な名称で表示されます:`ClickHouse JDBC by ClickHouse, Inc.`。これをクリックすると、以下のダイアログがポップアップします: - - -
- -6. 接続の詳細を入力します: - - | 設定 | 値 | - |-----------|--------------------------------------------------------| - | サーバー | **あなたのClickHouseホスト(プレフィックスやサフィックスなし)** | - | ポート | **8443** | - | データベース | **default** | - | ユーザー名 | **default** | - | パスワード | *\***** | - -:::note -ClickHouseクラウドを使用する場合、セキュア接続のためにSSLチェックボックスを有効にする必要があります。 -::: -
- -:::note -私たちのClickHouseデータベースは**TPCD**という名前ですが、上記のダイアログでは**データベース**を**default**に設定し、次のステップで**スキーマ**に**TPCD**を選択してください。(これはコネクタのバグによるものと思われるため、この動作は変更される可能性がありますが、現時点ではデータベースには**default**を使用する必要があります。) -::: - -7. **サインイン**ボタンをクリックすると、新しいTableauワークブックが表示されます: - - -
- -8. **スキーマ**のドロップダウンから**TPCD**を選択すると、**TPCD**内のテーブルのリストが表示されます: - - -
- -これでTableauでビジュアライゼーションを作成する準備が整いました! - -## Tableauでビジュアライゼーションを構築する {#building-visualizations-in-tableau} - -TableauでClickHouseデータソースが設定されたので、データを視覚化してみましょう... - -1. **CUSTOMER**テーブルをワークブックにドラッグします。カラムが表示されますが、データテーブルは空です: - - -
- -2. **今すぐ更新**ボタンをクリックすると、**CUSTOMER**から100行がテーブルに入ります。 - -3. **ORDERS**テーブルをワークブックにドラッグし、両方のテーブル間のリレーションシップフィールドを**Custkey**として設定します: - - -
- -4. **ORDERS**および**LINEITEM**テーブルがデータソースとして関連付けられたので、この関係を利用してデータに関する質問に答えることができます。ワークブックの下部で**シート1**タブを選択します。 - - -
- -5. 特定のアイテムが毎年いくつ注文されたかを知りたいとします。**ORDERS**から**OrderDate**を**Columns**セクション(横のフィールド)にドラッグし、**LINEITEM**から**Quantity**を**Rows**にドラッグします。Tableauは以下の折れ線グラフを生成します: - - -
- -あまり魅力的な折れ線グラフではありませんが、このデータセットはスクリプトによって生成され、クエリパフォーマンスをテストするために構築されたため、TPCDデータのシミュレートされた注文にはあまり変動がないことに注意してください。 - -6. 各四半期の平均注文額(ドル単位)を知りたいとします。また、配送モード(航空、郵便、船、トラックなど)についても: - - - **新しいワークシート**タブをクリックして新しいシートを作成します。 - - **ORDERS**から**OrderDate**を**Columns**にドラッグし、**Year**から**Quarter**に変更します。 - - **LINEITEM**から**Shipmode**を**Rows**にドラッグします。 - -以下のようになります: - - -
- -7. **Abc**の値は、テーブルにメトリックをドラッグするまでのスペースを埋めているだけです。**ORDERS**から**Totalprice**をテーブルにドラッグします。デフォルトの計算が**Totalprices**の**SUM**であることに注意してください: - - -
- -8. **SUM**をクリックし、**Measure**を**Average**に変更します。同じドロップダウンメニューから**Format**を選択し、**Numbers**を**通貨(標準)**に変更します: - - -
- -素晴らしい!TableauをClickHouseに接続し、ClickHouseデータの分析と視覚化に向けた新たな可能性を開拓しました。 - -## コネクタを手動でインストールする {#install-the-connector-manually} - -デフォルトでコネクタが含まれていない古いTableau Desktopバージョンを使用している場合は、次の手順に従って手動でインストールできます: - -1. [Tableau Exchange](https://exchange.tableau.com/products/1064)から最新のtacoファイルをダウンロードします。 -2. tacoファイルを以下の場所に置きます: - * macOS: `~/Documents/My Tableau Repository/Connectors` - * Windows: `C:\Users\[Windows User]\Documents\My Tableau Repository\Connectors` -3. Tableau Desktopを再起動します。設定が成功した場合は、`新しいデータソース`セクションにコネクタが表示されます。 - -## 接続と分析のヒント {#connection-and-analysis-tips} - -Tableau-ClickHouse統合の最適化に関するさらなるガイダンスについては、[接続のヒント](/integrations/tableau/connection-tips)および[分析のヒント](/integrations/tableau/analysis-tips)をご覧ください。 - -## テスト {#tests} -コネクタは[TDVTフレームワーク](https://tableau.github.io/connector-plugin-sdk/docs/tdvt)でテストされており、現在97%のカバレッジ比率を維持しています。 - -## まとめ {#summary} -一般的なODBC/JDBC ClickHouseドライバを使用してTableauをClickHouseに接続できます。ただし、このコネクタは接続設定プロセスを簡素化します。コネクタに関して問題がある場合は、お気軽にGitHubでお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md.hash deleted file mode 100644 index 5900bd04a5c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -a222758bba9a9896 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md deleted file mode 100644 index fb17ef2e764..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_label: 'Connection Tips' -sidebar_position: 3 -slug: '/integrations/tableau/connection-tips' -keywords: -- 'clickhouse' -- 'tableau' -- 'online' -- 'mysql' -- 'connect' -- 'integrate' -- 'ui' -description: 'Tableau connection tips when using ClickHouse official connector.' -title: 'Connection tips' ---- - -import Image from '@theme/IdealImage'; - - - -# 接続のヒント -## 初期 SQL タブ {#initial-sql-tab} -*Set Session ID* チェックボックスが詳細タブで有効になっている場合(デフォルト)、セッションレベルの [設定](/operations/settings/settings/) を以下のように設定できます。 -```text -SET my_setting=value; -``` -## 詳細タブ {#advanced-tab} - -99% のケースでは詳細タブを使用する必要はありませんが、残りの 1% のために以下の設定を使用できます: -- **カスタム接続パラメータ**。デフォルトでは `socket_timeout` が既に指定されています。このパラメータは、一部の抽出が非常に長い時間更新される場合に変更する必要があるかもしれません。このパラメータの値はミリ秒単位で指定されます。他のパラメータについては [こちら](https://github.com/ClickHouse/clickhouse-jdbc/blob/master/clickhouse-client/src/main/java/com/clickhouse/client/config/ClickHouseClientOption.java) で確認し、カンマで区切ってこのフィールドに追加してください。 -- **JDBC ドライバ custom_http_params**。このフィールドでは、ClickHouse 接続文字列にいくつかのパラメータを追加することができます。[`custom_http_params` パラメータに値を渡す](https://github.com/ClickHouse/clickhouse-jdbc#configuration)ことで実現します。例えば、*Set Session ID* チェックボックスが有効になっている場合、`session_id` はこのように指定されます。 -- **JDBC ドライバ `typeMappings`**。このフィールドでは、[ClickHouse のデータ型マッピングを JDBC ドライバで使用する Java データ型のリストとして渡すことができます](https://github.com/ClickHouse/clickhouse-jdbc#configuration)。このパラメータのおかげで、コネクタは大きな整数を文字列として自動的に表示しますが、このマッピングセットを渡すことで変更できます *(理由はわかりません)*。 - ```text - UInt256=java.lang.Double,Int256=java.lang.Double - ``` - マッピングの詳細については、該当するセクションを参照してください。 - -- **JDBC ドライバ URL パラメータ**。残りの [ドライバパラメータ](https://github.com/ClickHouse/clickhouse-jdbc#configuration)、例えば `jdbcCompliance` をこのフィールドに渡すことができます。注意してください。パラメータの値は URL エンコード形式で渡す必要があり、`custom_http_params` または `typeMappings` をこのフィールドと詳細タブの前のフィールドに渡す場合、詳細タブの両方の先行フィールドの値が優先されます。 -- **Set Session ID** チェックボックス。これは初期 SQL タブでセッションレベルの設定を行うために必要で、タイムスタンプと擬似乱数を持つ `session_id` を `"tableau-jdbc-connector-*{timestamp}*-*{number}*"` という形式で生成します。 -## UInt64、Int128、(U)Int256 データ型のサポートを制限 {#limited-support-for-uint64-int128-uint256-data-types} -デフォルトでは、ドライバは *UInt64、Int128、(U)Int256* 型のフィールドを文字列として表示しますが、**表示するだけで変換はしません**。これは、次の計算フィールドを記述しようとするとエラーが発生することを意味します。 -```text -LEFT([myUInt256], 2) // エラー! -``` -大きな整数フィールドを文字列として扱うには、フィールドを STR() 関数で明示的にラップする必要があります。 - -```text -LEFT(STR([myUInt256]), 2) // 正常に動作します! -``` - -しかし、そのようなフィールドは大抵、ユニークな値の数を見つけるために使用されます *(Watch ID や Visit ID などの ID、Yandex.Metrica における)* または視覚化の詳細を指定する *Dimension* として使用され、正常に機能します。 - -```text -COUNTD([myUInt256]) // こちらも正常に動作します! -``` -UInt64 フィールドを持つテーブルのデータプレビュー(データの表示)を使用する際、エラーは今は表示されません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md.hash deleted file mode 100644 index a42e50c769c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md.hash +++ /dev/null @@ -1 +0,0 @@ -604e4002cc5640cc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md deleted file mode 100644 index 1157ab0f7e2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -sidebar_label: 'Tableau Online' -sidebar_position: 2 -slug: '/integrations/tableau-online' -keywords: -- 'clickhouse' -- 'tableau' -- 'online' -- 'mysql' -- 'connect' -- 'integrate' -- 'ui' -description: 'Tableau Online streamlines the power of data to make people faster - and more confident decision makers from anywhere.' -title: 'Tableau Online' ---- - -import MySQLCloudSetup from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_cloud_setup.mdx'; -import MySQLOnPremiseSetup from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_clickhouse_mysql_on_premise_setup.mdx'; -import Image from '@theme/IdealImage'; -import tableau_online_01 from '@site/static/images/integrations/data-visualization/tableau_online_01.png'; -import tableau_online_02 from '@site/static/images/integrations/data-visualization/tableau_online_02.png'; -import tableau_online_03 from '@site/static/images/integrations/data-visualization/tableau_online_03.png'; -import tableau_online_04 from '@site/static/images/integrations/data-visualization/tableau_online_04.png'; -import tableau_desktop_01 from '@site/static/images/integrations/data-visualization/tableau_desktop_01.png'; -import tableau_desktop_02 from '@site/static/images/integrations/data-visualization/tableau_desktop_02.png'; -import tableau_desktop_03 from '@site/static/images/integrations/data-visualization/tableau_desktop_03.png'; -import tableau_desktop_04 from '@site/static/images/integrations/data-visualization/tableau_desktop_04.png'; -import tableau_desktop_05 from '@site/static/images/integrations/data-visualization/tableau_desktop_05.png'; - - -# Tableau Online - -Tableau Onlineは、公式のMySQLデータソースを使用して、ClickHouse CloudまたはオンプレミスのClickHouseセットアップにMySQLインターフェイス経由で接続できます。 - -## ClickHouse Cloudの設定 {#clickhouse-cloud-setup} - - -## オンプレミスClickHouseサーバーの設定 {#on-premise-clickhouse-server-setup} - - -## Tableau OnlineをClickHouseに接続する(SSLなしのオンプレミス) {#connecting-tableau-online-to-clickhouse-on-premise-without-ssl} - -Tableau Cloudサイトにログインし、新しい公開データソースを追加します。 - - -
- -利用可能なコネクタのリストから"MySQL"を選択します。 - - -
- -ClickHouseの設定中に収集した接続詳細を指定します。 - - -
- -Tableau Onlineはデータベースを調査し、利用可能なテーブルのリストを提供します。必要なテーブルを右側のキャンバスにドラッグします。また、「Update Now」をクリックしてデータをプレビューしたり、調査したフィールドタイプや名前を微調整することもできます。 - - -
- -その後は、右上の「Publish As」をクリックするだけで、新しく作成されたデータセットを通常通りTableau Onlineで使用できるようになります。 - -注: Tableau OnlineをTableau Desktopと組み合わせて使用し、ClickHouseデータセットを共有したい場合は、Tableau DesktopでもデフォルトのMySQLコネクタを使用し、データソースドロップダウンからMySQLを選択したときに表示されるセットアップガイドに従ってください。M1 Macを使用している場合は、[このトラブルシューティングスレッド](https://community.tableau.com/s/question/0D58b0000Ar6OhvCQE/unable-to-install-mysql-driver-for-m1-mac)をチェックして、ドライバーのインストール回避策を確認してください。 - -## Tableau OnlineをClickHouseに接続する(SSLを使用したCloudまたはオンプレミスの設定) {#connecting-tableau-online-to-clickhouse-cloud-or-on-premise-setup-with-ssl} - -Tableau OnlineのMySQL接続設定ウィザードからSSL証明書を提供することはできないため、唯一の方法はTableau Desktopを使用して接続を設定し、それをTableau Onlineにエクスポートすることです。しかし、このプロセスはかなり簡単です。 - -WindowsまたはMacマシンでTableau Desktopを実行し、「Connect」->「To a Server」->「MySQL」を選択します。最初にマシンにMySQLドライバーをインストールする必要がある場合があります。データソースドロップダウンからMySQLを選択すると表示されるセットアップガイドに従ってこれを行うことができます。M1 Macを使用している場合は、[このトラブルシューティングスレッド](https://community.tableau.com/s/question/0D58b0000Ar6OhvCQE/unable-to-install-mysql-driver-for-m1-mac)をチェックしてドライバーのインストール回避策を確認してください。 - - -
- -:::note -MySQL接続設定UIで「SSL」オプションが有効になっていることを確認してください。 -ClickHouse CloudのSSL証明書は、[Let's Encrypt](https://letsencrypt.org/certificates/)により署名されています。 -このルート証明書は[こちら](https://letsencrypt.org/certs/isrgrootx1.pem)からダウンロードできます。 -::: - -ClickHouse CloudインスタンスのMySQLユーザー資格情報とダウンロードしたルート証明書へのパスを提供します。 - - -
- -希望のテーブルを通常通り選択し(Tableau Onlineと同様に)、 -「Server」->「Publish Data Source」-> Tableau Cloudを選択します。 - - -
- -重要: 「Authentication」オプションで「Embedded password」を選択する必要があります。 - - -
- -さらに、「Update workbook to use the published data source」オプションを選択します。 - - -
- -最後に、「Publish」をクリックすると、埋め込まれた資格情報を持つデータソースが自動的にTableau Onlineで開かれます。 - -## 知られている制限事項 (ClickHouse 23.11) {#known-limitations-clickhouse-2311} - -すべての知られている制限事項はClickHouse `23.11`で修正されました。他に互換性のない問題が発生した場合は、[お問い合わせ](https://clickhouse.com/company/contact)いただくか、[新しいイシュー](https://github.com/ClickHouse/ClickHouse/issues)を作成してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md.hash deleted file mode 100644 index 26a048aafee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -c12a24ad0233f97b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md deleted file mode 100644 index 4e3fdd3cc31..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -sidebar_label: 'Zing Data' -sidebar_position: 206 -slug: '/integrations/zingdata' -keywords: -- 'clickhouse' -- 'Zing Data' -- 'connect' -- 'integrate' -- 'ui' -description: 'Zing Dataは、iOS、Android、およびWeb用に作成された、ClickHouse向けのシンプルなソーシャルビジネスインテリジェンスです。' -title: 'Zing DataをClickHouseに接続する' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import zing_01 from '@site/static/images/integrations/data-visualization/zing_01.png'; -import zing_02 from '@site/static/images/integrations/data-visualization/zing_02.png'; -import zing_03 from '@site/static/images/integrations/data-visualization/zing_03.png'; -import zing_04 from '@site/static/images/integrations/data-visualization/zing_04.png'; -import zing_05 from '@site/static/images/integrations/data-visualization/zing_05.png'; -import zing_06 from '@site/static/images/integrations/data-visualization/zing_06.png'; -import zing_07 from '@site/static/images/integrations/data-visualization/zing_07.png'; -import zing_08 from '@site/static/images/integrations/data-visualization/zing_08.png'; -import zing_09 from '@site/static/images/integrations/data-visualization/zing_09.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connect Zing Data to ClickHouse - - - -Zing Data は、データ探索と視覚化のプラットフォームです。Zing Data は、ClickHouse が提供する JS ドライバーを使用して ClickHouse に接続します。 - -## How to connect {#how-to-connect} -1. 接続詳細を収集します。 - - -2. Zing Data をダウンロードまたは訪問します。 - - * モバイルで Zing Data を使用して Clickhouse を利用するには、[Google Play ストア](https://play.google.com/store/apps/details?id=com.getzingdata.android) または [Apple App Store](https://apps.apple.com/us/app/zing-data-collaborative-bi/id1563294091) から Zing Data アプリをダウンロードしてください。 - - * ウェブで Zing Data を使用して Clickhouse を利用するには、[Zing ウェブコンソール](https://console.getzingdata.com/) にアクセスしてアカウントを作成します。 - -3. データソースを追加します。 - - * Zing Data で ClickHouse データに対話するには、**_データソース_** を定義する必要があります。Zing Data のモバイルアプリメニューで **Sources** を選択し、次に **Add a Datasource** をクリックします。 - - * ウェブでデータソースを追加するには、上部メニューの **Data Sources** をクリックし、**New Datasource** をクリックしてドロップダウンメニューから **Clickhouse** を選択します。 - - -
- -4. 接続詳細を記入し、**Check Connection** をクリックします。 - - -
- -5. 接続が成功すると、Zing はテーブル選択に進みます。必要なテーブルを選択し、**Save** をクリックします。Zing がデータソースに接続できない場合、認証情報を確認し再試行するように求めるメッセージが表示されます。認証情報を確認し再試行しても問題が解決しない場合は、こちらで Zing サポートにご連絡ください。 - - -
- -6. Clickhouse データソースが追加されると、全ての Zing 組織のメンバーが **Data Sources** / **Sources** タブの下で利用できるようになります。 - -## Creating Charts and Dashboards in Zing Data {#creating-charts-and-dashboards-in-zing-data} - -1. Clickhouse データソースが追加された後、ウェブで **Zing App** をクリックするか、モバイルでデータソースをクリックしてチャートを作成し始めます。 - -2. テーブルのリストからテーブルをクリックしてチャートを作成します。 - - -
- -3. ビジュアルクエリビルダーを使用して、必要なフィールド、集計などを選択し、**Run Question** をクリックします。 - - -
- -4. SQL に慣れている場合は、カスタム SQL を書いてクエリを実行し、チャートを作成することもできます。 - - - - -5. サンプルチャートは次のようになります。質問は三点リーダーメニューを使用して保存できます。チャートにコメントを追加したり、チームメンバーをタグ付けしたり、リアルタイムアラートを作成したり、チャートの種類を変更したりできます。 - - -
- -6. ダッシュボードは、ホーム画面の **Dashboards** の下にある "+" アイコンを使用して作成できます。既存の質問はドラッグしてダッシュボードに表示できます。 - - -
- -## Related Content {#related-content} - -- Blog: [ClickHouse を用いたデータの視覚化 - Zing Data](https://getzingdata.com/blog/zing-adds-support-for-clickhouse-as-a-data-source/) -- [Documentation](https://docs.getzingdata.com/docs/) -- [Quick Start](https://getzingdata.com/quickstart/) -- Guide to [Create Dashboards](https://getzingdata.com/blog/new-feature-create-multi-question-dashboards/) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md.hash deleted file mode 100644 index bc5ba1517ca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/zingdata-and-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -52e86daca719e9f6 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx deleted file mode 100644 index 54240a0bd84..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx +++ /dev/null @@ -1,395 +0,0 @@ ---- -'slug': '/integrations' -'title': 'Integrations' -'keywords': -- 'integrations' -- 'integrate' -- 'integrate with' -'description': 'ClickHouseとの統合' -'hide_table_of_contents': true ---- - -import acceldatapng from '@site/static/images/integrations/logos/acceldata_logo.png'; -import Amazonmsksvg from '@site/static/images/integrations/logos/amazon_msk.svg'; -import Astratosvg from '@site/static/images/integrations/logos/astrato_logo.svg'; -import apachestreamparkpng from '@site/static/images/integrations/logos/apache-streampark.png'; -import Azureeventhubssvg from '@site/static/images/integrations/logos/azure_event_hubs.svg'; -import blinkopspng from '@site/static/images/integrations/logos/blinkops_logo.png'; -import Chdbsvg from '@site/static/images/integrations/logos/chdb.svg'; -import Clickhousesvg from '@site/static/images/integrations/logos/clickhouse.svg'; -import Clickhousemonitoringdashboardsvg from '@site/static/images/integrations/logos/clickhouse-monitoring-dashboard.svg'; -import Cloudcanalsvg from '@site/static/images/integrations/logos/clougence.svg'; -import Cloudquerysvg from '@site/static/images/integrations/logos/cloudquery_logo.svg'; -import Confluentsvg from '@site/static/images/integrations/logos/confluent.svg'; -import Csharpsvg from '@site/static/images/integrations/logos/csharp.svg'; -import Cubejssvg from '@site/static/images/integrations/logos/cubejs.svg'; -import Datagripsvg from '@site/static/images/integrations/logos/data_grip.svg'; -import Datalenssvg from '@site/static/images/integrations/logos/datalens.svg'; -import Dbeaversvg from '@site/static/images/integrations/logos/dbeaver_logo.svg'; -import Dbtsvg from '@site/static/images/integrations/logos/dbt.svg'; -import Deepnotesvg from '@site/static/images/integrations/logos/deepnote.svg'; -import Dlthubsvg from '@site/static/images/integrations/logos/dlthub_logo.svg'; -import Draxlrsvg from '@site/static/images/integrations/logos/draxlr.svg'; -import Emqxsvg from '@site/static/images/integrations/logos/emqx.svg'; -import Explosvg from '@site/static/images/integrations/logos/explo.svg'; -import Fivetransvg from '@site/static/images/integrations/logos/fivetran.svg'; -import Gcssvg from '@site/static/images/integrations/logos/gcs.svg'; -import Golangsvg from '@site/static/images/integrations/logos/golang.svg'; -import Grafanasvg from '@site/static/images/integrations/logos/grafana.svg'; -import Hdfssvg from '@site/static/images/integrations/logos/hadoop.svg'; -import Hivesvg from '@site/static/images/integrations/logos/hive.svg'; -import Javasvg from '@site/static/images/integrations/logos/java.svg'; -import Jitsusvg from '@site/static/images/integrations/logos/jitsu.svg'; -import Kafkasvg from '@site/static/images/integrations/logos/kafka.svg'; -import Kinesissvg from '@site/static/images/integrations/logos/amazon_kinesis_logo.svg'; -import Kestrasvg from '@site/static/images/integrations/logos/kestra.svg'; -import Lookersvg from '@site/static/images/integrations/logos/looker.svg'; -import Lookerstudiosvg from '@site/static/images/integrations/logos/looker_studio.svg'; -import Mongodbsvg from '@site/static/images/integrations/logos/mongodb.svg'; -import Mysqlsvg from '@site/static/images/integrations/logos/mysql.svg'; -import Natssvg from '@site/static/images/integrations/logos/nats.svg'; -import Nodesvg from '@site/static/images/integrations/logos/node_js.svg'; -import Omnisvg from '@site/static/images/integrations/logos/omni.svg'; -import Observablesvg from '@site/static/images/integrations/logos/observable.svg'; -import opsramppng from '@site/static/images/integrations/logos/ops_ramp_logo.png'; -import Popsinksvg from '@site/static/images/integrations/logos/popsink.svg'; -import Postgresqlsvg from '@site/static/images/integrations/logos/postgresql.svg'; -import Prequelsvg from '@site/static/images/integrations/logos/prequel.svg'; -import Pythonsvg from '@site/static/images/integrations/logos/notext-python.svg'; -import Qrynsvg from '@site/static/images/integrations/logos/qryn.svg'; -import Quesmasvg from '@site/static/images/integrations/logos/quesma.svg'; -import Quicksightsvg from '@site/static/images/integrations/logos/quicksight.svg'; -import Rabbitmqsvg from '@site/static/images/integrations/logos/rabbitmq.svg'; -import Redissvg from '@site/static/images/integrations/logos/redis.svg'; -import restackpng from '@site/static/images/integrations/logos/restack_logo.png'; -import Retoolsvg from '@site/static/images/integrations/logos/retool.svg'; -import Rillsvg from '@site/static/images/integrations/logos/rill.svg'; -import Risingwavesvg from '@site/static/images/integrations/logos/risingwave.svg'; -import RocketbiSVG from '@site/static/images/integrations/logos/rocketbi-logo.svg'; -import Rocksdbsvg from '@site/static/images/integrations/logos/rocksdb.svg'; -import Rudderstacksvg from '@site/static/images/integrations/logos/rudderstack.svg'; -import S3svg from '@site/static/images/integrations/logos/amazon_s3_logo.svg'; -import sematextpng from '@site/static/images/integrations/logos/sematext_logo.png'; -import skywalkingjpeg from '@site/static/images/integrations/logos/skywalking_logo.jpeg'; -import snappyflowpng from '@site/static/images/integrations/logos/snappy_flow_logo.png'; -import Sparksvg from '@site/static/images/integrations/logos/apache_spark_logo.svg'; -import sodapng from '@site/static/images/integrations/logos/soda_logo.png'; -import Sqlitesvg from '@site/static/images/integrations/logos/sqlite.svg'; -import Supersetsvg from '@site/static/images/integrations/logos/superset.svg'; -import Tablumsvg from '@site/static/images/integrations/logos/tablum.svg'; -import teleport from '@site/static/images/integrations/logos/teleport_logo.png'; -import Trickstercachesvg from '@site/static/images/integrations/logos/trickster-logo.svg'; -import Upstashsvg from '@site/static/images/integrations/logos/upstash.svg'; -import Yepcodesvg from '@site/static/images/integrations/logos/yepcode.svg'; -import Warpstreamsvg from '@site/static/images/integrations/logos/warpstream.svg'; -import Bytewaxsvg from '@site/static/images/integrations/logos/bytewax.svg'; -import glue_logo from '@site/static/images/integrations/logos/glue_logo.png'; -import azure_synapse_logo from '@site/static/images/integrations/logos/azure-synapse.png'; -import azure_data_factory_logo from '@site/static/images/integrations/logos/azure-data-factory.png'; -import logo_cpp from '@site/static/images/integrations/logos/logo_cpp.png'; -import cassandra from '@site/static/images/integrations/logos/cassandra.png'; -import deltalake from '@site/static/images/integrations/logos/deltalake.png'; -import hudi from '@site/static/images/integrations/logos/hudi.png'; -import iceberg from '@site/static/images/integrations/logos/iceberg.png'; -import metabase from '@site/static/images/integrations/logos/logo_metabase.png'; -import minio from '@site/static/images/integrations/logos/minio.png'; -import odbc from '@site/static/images/integrations/logos/odbc.png'; -import logo_otel from '@site/static/images/integrations/logos/logo_otel.png'; -import powerbi from '@site/static/images/integrations/logos/powerbi.png'; -import redpanda from '@site/static/images/integrations/logos/logo_redpanda.png'; -import rust from '@site/static/images/integrations/logos/logo_rust.png'; -import tableau from '@site/static/images/integrations/logos/logo_tableau.png'; -import airbyte_logo from '@site/static/images/integrations/logos/airbyte-logo.png'; -import acceldata_logo from '@site/static/images/integrations/logos/acceldata_logo.png'; -import atlas_logo from '@site/static/images/integrations/logos/atlas-logo.png'; -import automq_logo from '@site/static/images/integrations/logos/automq.png'; -import blinkops_logo from '@site/static/images/integrations/logos/blinkops_logo.png'; -import calyptia_logo from '@site/static/images/integrations/logos/logo_calyptia.png'; -import dataddo_logo from '@site/static/images/integrations/logos/logo_dataddo.png'; -import dbvisualizer_logo from '@site/static/images/integrations/logos/logo_dbvisualizer.png'; -import decodable_logo from '@site/static/images/integrations/logos/logo_decodable.png'; -import explo_logo from '@site/static/images/integrations/logos/explo.png'; -import gigasheet_logo from '@site/static/images/integrations/logos/gigasheet.png'; -import glassflow_logo from '@site/static/images/integrations/logos/glassflow.png'; -import goldsky_logo from '@site/static/images/integrations/logos/goldsky.png'; -import growthbook_logo from '@site/static/images/integrations/logos/logo_growthbook.png'; -import hex_logo from '@site/static/images/integrations/logos/logo-hex.png'; -import hightouch_logo from '@site/static/images/integrations/logos/logo_hightouch.png'; -import holistics_logo from '@site/static/images/integrations/logos/logo_holistics.png'; -import housewatch_logo from '@site/static/images/integrations/logos/housewatch.png'; -import mindsdb_logo from '@site/static/images/integrations/logos/logo_mindsdb.png'; -import mitzu_logo from '@site/static/images/integrations/logos/logo_mitzu.png'; -import mode_logo from '@site/static/images/integrations/logos/logo_mode.png'; -import redash_logo from '@site/static/images/integrations/logos/logo_redash.png'; -import restack_logo from '@site/static/images/integrations/logos/restack_logo.png'; -import sematext_logo from '@site/static/images/integrations/logos/sematext_logo.png'; -import streamingfast_logo from '@site/static/images/integrations/logos/streamingfast.png'; -import supabase_logo from '@site/static/images/integrations/logos/logo_supabase.png'; -import teleport_logo from '@site/static/images/integrations/logos/teleport_logo.png'; -import tooljet_logo from '@site/static/images/integrations/logos/tooljet.png'; -import vector_logo from '@site/static/images/integrations/logos/vector.png'; -import zing_logo from '@site/static/images/integrations/logos/zing-logo.png'; -import mage_logo from '@site/static/images/integrations/logos/mage.jpg'; -import metaplane_logo from '@site/static/images/integrations/logos/logo_metaplane.png'; -import openBlocks_logo from '@site/static/images/integrations/logos/logo_openBlocks.png'; -import ramp_logo from '@site/static/images/integrations/logos/ops_ramp_logo.png'; -import runreveal_logo from '@site/static/images/integrations/logos/runreveal.png'; -import sisense_logo from '@site/static/images/integrations/logos/logo_sisense.png'; -import signoz_logo from '@site/static/images/integrations/logos/signoz-logo.png'; -import snappy_flow_logo from '@site/static/images/integrations/logos/snappy_flow_logo.png'; -import soda_logo from '@site/static/images/integrations/logos/soda_logo.png'; -import splunk_logo from '@site/static/images/integrations/logos/splunk_logo.png'; -import streamkap_logo from '@site/static/images/integrations/logos/streamkap-logo.png'; -import airflow_logo from '@site/static/images/integrations/logos/logo_airflow.png'; -import beam_logo from '@site/static/images/integrations/logos/logo_beam.png'; -import inlong_logo from '@site/static/images/integrations/logos/logo_inlong.png'; -import nifi_logo from '@site/static/images/integrations/logos/logo_nifi.png'; -import seatunnel_logo from '@site/static/images/integrations/logos/logo_seatunnel.png'; -import skywalking_logo from '@site/static/images/integrations/logos/skywalking_logo.jpeg'; -import streampark_logo from '@site/static/images/integrations/logos/apache-streampark.png'; -import bytebase_logo from '@site/static/images/integrations/logos/logo_bytebase.png'; -import clickhouse_cl_logo from '@site/static/images/integrations/logos/clickhouse-cl.png'; -import dataflow_logo from '@site/static/images/integrations/logos/dataflow_logo.png'; -import dbnet_logo from '@site/static/images/integrations/logos/dbnet_logo.png'; -import datalens_logo from '@site/static/images/integrations/logos/datalens.png'; -import dataease_logo from '@site/static/images/integrations/logos/dataease.png'; -import datahub_logo from '@site/static/images/integrations/logos/logo_datahub.png'; -import deepflow_logo from '@site/static/images/integrations/logos/logo_deepflow.png'; -import easypanel_logo from '@site/static/images/integrations/logos/logo-easypanel.png'; -import flink_logo from '@site/static/images/integrations/logos/logo_flink.png'; -import goose_logo from '@site/static/images/integrations/logos/goose_logo.png'; -import ibis_logo from '@site/static/images/integrations/logos/logo_ibis.png'; -import jaeger_logo from '@site/static/images/integrations/logos/logo_jaeger.png'; -import jupyter_logo from '@site/static/images/integrations/logos/jupyter.png'; -import adaptive_logo from '@site/static/images/integrations/logos/adaptive_logo.png'; -import mprove_logo from '@site/static/images/integrations/logos/logo_mprove.png'; -import php_logo from '@site/static/images/integrations/logos/logo_php.png'; -import pinax_logo from '@site/static/images/integrations/logos/pinax-logo.png'; -import pulse_logo from '@site/static/images/integrations/logos/pulse.png'; -import qstudio_logo from '@site/static/images/integrations/logos/qstudio.png'; -import qryn_logo from '@site/static/images/integrations/logos/logo_qryn.png'; -import rsyslog_logo from '@site/static/images/integrations/logos/rsyslog.png'; -import ruby_logo from '@site/static/images/integrations/logos/logo_ruby.png'; -import r_logo from '@site/static/images/integrations/logos/logo_r.png'; -import scala_logo from '@site/static/images/integrations/logos/logo_scala.png'; -import schemaspy_logo from '@site/static/images/integrations/logos/schemaspy_logo.png'; -import visual_studio_logo from '@site/static/images/integrations/logos/logo_vs.png'; -import vulcansql_logo from '@site/static/images/integrations/logos/logo-vulcansql.png'; -import great_expectations_logo from '@site/static/images/integrations/logos/great-expectations.webp'; -import Hashboardsvg from '@site/static/images/integrations/logos/hashboard.svg'; -import luzmo_logo from '@site/static/images/integrations/logos/luzmo.png'; -import vs_logo from '@site/static/images/integrations/logos/logo_vs.png'; -import Moosesvg from '@site/static/images/integrations/logos/moose_logo.svg'; -import chartbrew_logo from '@site/static/images/integrations/logos/logo_chartbrew.png'; -import marimo_logo from '@site/static/images/integrations/logos/logo_marimo.png'; -import Image from '@theme/IdealImage'; - -ClickHouse の統合は、そのサポートレベルに応じて整理されています: - -- **コア統合:** ClickHouse によって構築または維持され、ClickHouse からサポートを受けており、ClickHouse GitHub 組織に存在します。 -- **パートナー統合:** 第三者のソフトウェアベンダーによって構築または維持され、サポートされています。 -- **コミュニティ統合:** コミュニティメンバーによって構築または維持され、サポートされています。公共の GitHub リポジトリやコミュニティ Slack チャンネルを除き、直接のサポートはありません。 - -各統合はさらに **言語クライアント**、**データ取り込み**、**データ可視化**、および **SQL クライアント** カテゴリに分類されます。 - -:::note -現在、以下の ClickHouse 統合のリストを積極的に編纂しているため、網羅的ではありません。関連する ClickHouse 統合をリストに自由に -[貢献](https://github.com/ClickHouse/clickhouse-docs#contributing)してください。 -::: -## コア統合 - -

- -|名前|ロゴ|カテゴリー|説明|リソース| -|------|----|----------------|------------------|-------------| -|Amazon Kinesis| |データ取り込み|Amazon Kinesis との統合。|[ドキュメント](/integrations/clickpipes/kinesis/)| -|Amazon MSK| |データ取り込み|Amazon Managed Streaming for Apache Kafka (MSK) との統合。|[ドキュメント](/integrations/kafka/cloud/amazon-msk/)| -|Amazon S3||データ取り込み|ClickHouse の組み込みの S3 関数を使用して、S3 データをインポート、エクスポート、および変換します。|[ドキュメント](/integrations/data-ingestion/s3/index.md)| -|Amazon Glue||データ取り込み|JDBC 経由で ClickHouse をクエリします。|[ドキュメント](/integrations/glue)| -|Apache Spark||データ取り込み|Spark ClickHouse Connector は、Spark DataSource V2 に基づいた高性能コネクタです。|[GitHub](https://github.com/housepower/spark-clickhouse-connector),
[ドキュメント](/integrations/data-ingestion/apache-spark/index.md)| -|Azure Event Hubs||データ取り込み|Apache Kafka のネイティブプロトコルをサポートするデータストリーミングプラットフォーム。|[ウェブサイト](https://azure.microsoft.com/en-gb/products/event-hubs)| -|Azure Synapse||データ取り込み|ビッグデータとデータウェアハウジング向けのクラウドベースの分析サービス。|[ドキュメント](/integrations/azure-synapse)| -|Azure Data Factory||データ取り込み|大規模なデータワークフローを作成、スケジュール、およびオーケストレーションできるクラウドベースのデータ統合サービス。|[ドキュメント](/integrations/azure-data-factory)| -|C++||言語クライアント|ClickHouse 用の C++ クライアント。|[GitHub](https://github.com/ClickHouse/clickhouse-cpp)| -|Cassandra||データ取り込み|ClickHouse が [Cassandra](https://cassandra.apache.org/) を辞書ソースとして使用できるようにします。|[ドキュメント](/sql-reference/dictionaries/index.md#cassandra)| -|CHDB||AI/ML|組み込みの OLAP SQL エンジン。|[GitHub](https://github.com/chdb-io/chdb#/),
[ドキュメント](https://doc.chdb.io/)| -|ClickHouse Client||SQL クライアント|ClickHouse Client は ClickHouse のネイティブコマンドラインクライアントです。|[ドキュメント](/interfaces/cli.md)| -|Confluent||データ取り込み|Confluent プラットフォーム上の Apache Kafka との統合。|[ドキュメント](/integrations/kafka/cloud/confluent/custom-connector)| -|dbt||データ統合|dbt (データビルドツール) を使用して、単純に SELECT ステートメントを書くことで ClickHouse のデータを変換します。dbt は ELT の "T" を担います。|[ドキュメント](/integrations/data-ingestion/etl-tools/dbt/index.md)| -|DeltaLake||データ取り込み|既存の [Delta Lake](https://github.com/delta-io/delta) テーブルに対する読み取り専用の統合を提供します。|[ドキュメント](/engines/table-engines/integrations/deltalake)| -|EmbeddedRocksDB||データ統合|ClickHouse を [rocksdb](http://rocksdb.org/) と統合できるようにします。|[ドキュメント](/engines/table-engines/integrations/embedded-rocksdb)| -|Fivetran||データ取り込み|[ClickHouse Cloud](https://clickhouse.com/cloud) への [Fivetran データ移動プラットフォーム](https://www.fivetran.com/) の出力先。|[ドキュメント](/integrations/data-ingestion/etl-tools/fivetran/index.md)| -|Google Cloud Storage||データ取り込み|ClickHouse の組み込みの `S3` 関数を使用して、GCS データをインポート、エクスポート、および変換します。|[ドキュメント](/integrations/data-ingestion/s3/index.md)| -|Golang||言語クライアント|Go クライアントは、ClickHouse への接続のためのパフォーマンスが高く、オーバーヘッドの少ないネイティブインターフェースを使用します。|[ドキュメント](/integrations/language-clients/go/index.md)| -|HDFS||データ取り込み|ClickHouse を介して [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) 上のデータを管理できるようにして、[Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) エコシステムとの統合を提供します。|[ドキュメント](/engines/table-engines/integrations/hdfs)| -|Hive||データ取り込み|Hive エンジンを使用すると、HDFS Hive テーブル上で `SELECT` クエリを実行できます。|[ドキュメント](/engines/table-engines/integrations/hive)| -|Hudi||データ取り込み|既存の Apache [Hudi](https://hudi.apache.org/) テーブルに対する読み取り専用の統合を提供します。|[ドキュメント](/engines/table-engines/integrations/hudi)| -|Iceberg||データ取り込み|既存の Apache [Iceberg](https://iceberg.apache.org/) テーブルに対する読み取り専用の統合を提供します。|[ドキュメント](/engines/table-engines/integrations/iceberg)| -|Java, JDBC||言語クライアント|Java クライアントと JDBC ドライバー。|[ドキュメント](/integrations/language-clients/java/index.md)| -|Kafka||データ取り込み|オープンソースの分散イベントストリーミングプラットフォームである Apache Kafka との統合。|[ドキュメント](/integrations/kafka)| -|Looker Studio||データ可視化|Looker Studio は、データを情報豊かで読みやすく、共有しやすく、完全にカスタマイズ可能なダッシュボードやレポートに変換する無料のツールです。|[ドキュメント](/integrations/lookerstudio)| -|Looker||データ可視化|Looker は、ビジネスインテリジェンス、データアプリケーション、および組み込み分析のためのエンタープライズプラットフォームで、リアルタイムで洞察を探索し共有するのに役立ちます。|[ドキュメント](/integrations/looker)| -|Metabase||データ可視化|Metabase は、データについて質問するための使いやすいオープンソースの UI ツールです。|[ドキュメント](/integrations/metabase)| -|MinIO||データ取り込み|MinIO は、GNU Affero General Public License v3.0 のもとでリリースされた高性能オブジェクトストレージです。Amazon S3 クラウドストレージサービスと API 互換があります。|[ドキュメント](/integrations/minio)| -|MongoDB||データ取り込み|MongoDB エンジンは、リモートの MongoDB コレクションからデータを読み取ることができる読み取り専用のテーブルエンジンです。|[ドキュメント](/engines/table-engines/integrations/mongodb)| -|MySQL||データ取り込み|MySQL エンジンは、リモートの MySQL サーバーに保存されたデータに対して `SELECT` と `INSERT` のクエリを実行できます。|[ドキュメント](/engines/table-engines/integrations/mysql)| -|NATS||データ取り込み|ClickHouse を [NATS](https://nats.io/) と統合できるようにします。|[ドキュメント](/engines/table-engines/integrations/nats)| -|Node.JS||言語クライアント|ClickHouse に接続するための公式 JS クライアント。|[ドキュメント](/integrations/language-clients/js.md)| -|ODBC||データ統合|ClickHouse が外部データベースに [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity) テーブルエンジンを介して接続できるようにします。|[ドキュメント](/engines/table-engines/integrations/odbc)| -|OpenTelemetry||データ取り込み|ログ、メトリック、トレース OpenTelemetry データを ClickHouse に送信できるようにするエクスポーター。|[GitHub](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/clickhouseexporter)| -|PostgreSQL||データ取り込み|PostgreSQL データベースから ClickHouse Cloud へのスナップショットやリアルタイム CDC データレプリケーション。|[ドキュメント](/integrations/postgresql)| -|PowerBI||データ可視化|Microsoft Power BI は、ビジネスインテリジェンスに主に焦点を当てたインタラクティブなデータ可視化ソフトウェア製品です。|[ドキュメント](/integrations/powerbi)| -|Python||言語クライアント|Python から ClickHouse への接続用の Python パッケージのスイート。|[ドキュメント](/integrations/language-clients/python/index.md)| -|QuickSight||データ可視化|Amazon QuickSight は、データ駆動型組織に統一されたビジネスインテリジェンス (BI) を提供します。|[ドキュメント](/integrations/quicksight)| -|RabbitMQ||データ取り込み|ClickHouse が [RabbitMQ](https://www.rabbitmq.com/) に接続できるようにします。|[ドキュメント](/engines/table-engines/integrations/rabbitmq)| -|Redis||データ取り込み|ClickHouse が [Redis](https://redis.io/) を辞書ソースとして使用できるようにします。|[ドキュメント](/sql-reference/dictionaries/index.md#redis)| -|Redpanda||データ取り込み|Redpanda は開発者のためのストリーミングデータプラットフォームです。Apache Kafka との API 互換性があり、10 倍速く、はるかに使いやすく、費用対効果が高いです。|[ブログ](https://redpanda.com/blog/real-time-olap-database-clickhouse-redpanda)| -|Rust||言語クライアント|ClickHouse 用の型付きクライアント。|[ドキュメント](/integrations/language-clients/rust.md)| -|SQLite||データ取り込み|SQLite にデータをインポートおよびエクスポートでき、ClickHouse から直接 SQLite テーブルへのクエリをサポートします。|[ドキュメント](/engines/table-engines/integrations/sqlite)| -|Superset||データ可視化|Apache Superset を使用して ClickHouse データを探索し可視化します。|[ドキュメント](/integrations/data-visualization/superset-and-clickhouse.md)| -|Tableau||データ可視化|ビジネスインテリジェンスに焦点を当てたインタラクティブなデータ可視化ソフトウェア。|[ドキュメント](/integrations/tableau)| -|Tableau Online||データ可視化|Tableau Online は、データの力を活用し、どこからでも迅速かつ自信を持って意思決定を行えるようにします。|[ドキュメント](/integrations/tableau-online)| - -
-
-## パートナー統合 - -
- -|名前|ロゴ|カテゴリ|説明|リソース| -|------|----|----------------|------------------|-------------| -|Airbyte||データインジェクション|Airbyteを使用して、ClickHouseにデータをロードおよび同期するための140以上のコネクタを使用したELTデータパイプラインを作成します。|[Documentation](/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md)| -|AccelData||データ管理|ADOCはユーザーが視覚化されたデータの信頼性と整合性を監視し、リアルタイムデータ処理と分析を容易にします。|[Documentation](https://docs.acceldata.io/documentation/clickhouse) | -|Atlas||スキーマ管理|ClickHouseのスキーマをコードとして管理します。|[Documentation](https://atlasgo.io/guides/clickhouse?utm_source=clickhouse&utm_term=docs)| -|Astrato||データ視覚化|Astratoは、すべてのユーザーがITなしでダッシュボード、レポート、およびデータアプリを構築できるようにすることで、企業およびデータビジネスに真のセルフサービスBIを提供します。|[Documentation](/integrations/astrato)| -|AutoMQ||データインジェクション|S3およびEBSへの耐久性を分離するクラウドネイティブなKafkaおよびRocketMQの代替品。|[Website](https://www.automq.com/)| -|BlinkOps||セキュリティ自動化|データとユーザー権限を管理するための自動化を作成します。|[Documentation](https://docs.blinkops.com/docs/integrations/clickhouse)| -|Bytewax||データインジェクション|データをClickHouseに変換およびインジェクトするためのオープンソースのPythonストリームプロセッサ。|[Documentation](https://bytewax.io/blog/building-a-click-house-sink-for-bytewax)| -|Calyptia (Fluent Bit)||データインジェクション|CNCF卒業のオープンソースプロジェクトで、ログ、メトリクス、およびトレースの収集、処理、配信を行います。|[Blog](https://clickhouse.com/blog/kubernetes-logs-to-clickhouse-fluent-bit)| -|Chartbrew||データ視覚化|Chartbrewは、ユーザーがリアルタイムでデータを監視およびダッシュボードを作成できるデータ視覚化プラットフォームです。|[Documentation](/integrations/chartbrew-and-clickhouse),
[Website](https://chartbrew.com/integrations/clickhouse),
[Blog](https://chartbrew.com/blog/visualizing-clickhouse-data-with-chartbrew-a-step-by-step-guide/)| -|CloudCanal||データ統合|データ同期および移行ツールです。|[Website](https://www.cloudcanalx.com/us/)| -|CloudQuery||データインジェクション|オープンソースの高性能ELTフレームワークです。|[Documentation](https://www.cloudquery.io/docs/plugins/destinations/clickhouse/overview)| -|Cube.js||データ視覚化|Cubeはデータアプリを構築するためのセマンティックレイヤーです。|[Website](https://cube.dev/for/clickhouse-dashboard)| -|DBeaver||SQLクライアント|無料のマルチプラットフォームデータベース管理ツールです。JDBCドライバーを介してClickHouseに接続します。|[Documentation](/integrations/sql-clients/dbeaver.md)| -|DataGrip||SQLクライアント|DataGripはClickHouseに特化した強力なデータベースIDEです。|[Documentation](/integrations/sql-clients/datagrip.md)| -|Dataddo||データ統合|データ統合プラットフォームです。|[Website](https://www.dataddo.com/storage/clickhouse)| -|DbVisualizer||SQLクライアント|DbVisualizerはClickHouseへの拡張サポートを持つデータベースツールです。|[Documentation](/integrations/sql-clients/dbvisualizer.md)| -|Decodable||データインジェクション|Apache Flink上に構築された強力なストリーム処理。|[Website](https://www.decodable.co/connectors/clickhouse)| -|Deepnote||データ視覚化|Deepnoteはチームが洞察を発見および共有するために構築された共同作業のJupyter互換データノートブックです。|[Documentation](/integrations/data-visualization/deepnote.md)| -|DLT||データ統合|データ読み込みを簡単にするオープンソースのPythonライブラリです。|[Documentation](/integrations/data-ingestion/etl-tools/dlt-and-clickhouse)| -|Draxlr||データ視覚化|Draxlrはデータ視覚化および分析を伴うビジネスインテリジェンスツールです。|[Documentation](/integrations/data-visualization/draxlr-and-clickhouse.md)| -|EMQX||データインジェクション|EMQXは高性能リアルタイムメッセージ処理エンジンを備えたオープンソースのMQTTブローカーで、IoTデバイスのためのイベントストリーミングを大規模に実現します。|[Documentation](/integrations/emqx)| -|Explo| | データ視覚化 | Exploは、どのプラットフォームでも使用できる顧客向けの分析ツールです。 | [Documentation](/integrations/explo) | -|Gigasheet| | データ視覚化 | クラウドビッグデータ分析スプレッドシートが、ビジネスユーザーがClickHouseデータを即座に分析および探索できます。 | [Website](https://gigasheet.com/enterprise) | -|GlassFlow| | データインジェクション | ClickHouseのためのPythonによるリアルタイムイベントストリーミングとデータ変換。 | [Documentation](https://docs.glassflow.dev/integrations/managed-connectors/sinks/clickhouse) | -|Goldsky| | データ統合 | サブグラフを通じた高パフォーマンスWeb3データインデックスおよびリアルタイムデータレプリケーションパイプライン。 | [Documentation](https://docs.goldsky.com/introduction) | -|Grafana||データ視覚化|Grafanaを使用すると、ダッシュボードを通じてすべてのデータを作成、探索、共有できます。|[Documentation](/integrations/data-visualization/grafana/index.md)| -|Great Expectations| | データ管理 | オープンソースのデータ管理ツールで、有料のクラウドオファーがあります。 | [Website](https://greatexpectations.io/) | -|GrowthBook| | データ視覚化 | ウェアハウスネイティブの実験プラットフォーム(機能フラグ付けおよびA/Bテスト)。 | [Documentation](https://docs.growthbook.io/warehouses/clickhouse) | -|HEX| | データ視覚化 | Hexは、ノートブック、データアプリ、SQL、Python、ノーコード、Rなど様々な機能を持つ現代的なコラボレーションプラットフォームです。 | [Documentation](https://learn.hex.tech/docs/connect-to-data/data-connections/overview) | -|Hashboard||データ視覚化|[Hashboard](https://hashboard.com)は、セルフサービスデータ探索およびメトリック追跡を可能にするビジネスインテリジェンスプラットフォームです。|[Documentation](https://docs.hashboard.com/docs/database-connections/clickhouse)| -|HighTouch||データ統合|倉庫から140以上の宛先にデータを直接同期します。|[Website](https://hightouch.com/docs/sources/clickhouse)| -|Holistics||データ視覚化|ClickHouseデータベース用のビジネスインテリジェンス。|[Website](https://www.holistics.io/integrations/clickhouse/)| -|HouseWatch||データ管理|ClickHouseクラスタの監視と管理のためのオープンソースツールです。|[GitHub](https://github.com/PostHog/HouseWatch)| -|IBM Instana| |データ管理|InstanaはClickHouseサーバープロセスを自動検出し、監視できます。|[Documentation](https://www.ibm.com/docs/en/instana-observability/current?topic=technologies-monitoring-clickhouse)| -|Jitsu||データ分析|オープンソースのイベント収集プラットフォームです。|[Documentation](https://docs.jitsu.com/destinations/warehouse/clickhouse)| -|LangChain|🦜️🔗|SDK|LangChainは言語モデルを活用したアプリケーションを開発するフレームワークです。|[Documentation](https://python.langchain.com/docs/integrations/vectorstores/clickhouse/)| -|Luzmo||データ視覚化|LuzmoはソフトウェアおよびSaaSアプリケーション向けに完璧に設計されたClickHouse統合を持つ埋め込み分析プラットフォームです。|[Documentation](/integrations/data-visualization/luzmo-and-clickhouse.md)| -|Mage||データインジェクション|データを変換および統合するためのオープンソースデータパイプラインツールです。|[Documentation](https://docs.mage.ai/integrations/databases/ClickHouse)| -|Metaplane||データ管理|すべてのデータチーム向けのデータ可視化。|[Website](https://www.metaplane.dev/integrations)| -|MindsDB||AI/ML|企業データからAIをカスタマイズするためのプラットフォーム。|[Website](https://mindsdb.com/clickhouse-machine-learning )| -|Mitzu||データ視覚化|Mitzuは、データをコピーすることなくファネル、リテンション、ユーザーセグメンテーションの洞察を見つけることができるノーコードのウェアハウスネイティブ製品分析アプリケーションです。|[Documentation](/integrations/mitzu)| -|Mode Analytics||データ視覚化|データチームを中心に構築されたビジネスインテリジェンス。|[Website](https://mode.com/)| -|Moose||データ統合|MooseはClickhouse上にプロダクションアプリケーションを構築するためのオープンソースの開発者フレームワークです。|[Website](https://www.fiveonefour.com/moose)| -|Omni||データ視覚化|あなたの言語で話すビジネスインテリジェンス。自分の方法でデータを探索、視覚化、およびモデリングします。スプレッドシートからSQLまで—1つのプラットフォームで実現。| [Website](https://omni.co/)| -|Openblocks||SQLクライアント|OpenblocksはUIを構築するためのローコードプラットフォームです。|[Documentation](https://blog.openblocks.dev/blog/openblocks-x-clickhouse)| -|OpsRamp (HP)| |データ管理|ClickHouseの可観測性メトリクスを提供します。|[Documentation](https://docs.opsramp.com/integrations/database-no-sql/automonitor-clickhouse-monitoring/)| -|Popsink||データ統合|ClickHouseにリアルタイムの変更データキャプチャ(CDC)パイプラインを構築します。|[Documentation](https://docs.popsink.com/connectors/target/clickhouse/)| -|Prequel||データ共有|あなたのClickHouseインスタンスをPrequelに接続して、ユーザーとパートナーとのデータを共有したり同期したりします。|[Documentation](https://docs.prequel.co/docs/sources-clickhouse-generic)| -|Quesma||データ統合|KibanaとOpenSearch DashboardsをClickHouseのデータで使用します。|[Website](https://quesma.com/quesma-for-elk)| -|Redash||データ視覚化|データソースに接続し、データをクエリし、ダッシュボードを作成して可視化し、共有します。|[Website](https://redash.io/help/data-sources/querying/supported-data-sources)| -|Restack Data Hub||データガバナンス|ユーザーはRestack Data Hubを使用して、より包括的なデータガバナンスおよび可観測性フレームワークを実現できます。|[Documentation](https://www.restack.io/docs/datahub-knowledge-datahub-clickhouse-integration)| -|Restack OpenMetadata||データ品質|Restack OpenMetadataは、メタデータ抽出、クエリ使用状況追跡、データプロファイリング、およびデータ品質チェックをサポートします。|[Documentation](https://www.restack.io/docs/openmetadata-knowledge-openmetadata-clickhouse-integration)| -|Retool||ノーコード|ドラッグ&ドロップインターフェースでアプリケーションを作成します。|[Documentation](/integrations/retool)| -|Rill||データ視覚化|RillはOLAPエンジンを使用してデータをスライス&ダイスするために特化したオペレーションBIツールです。|[Documentation](https://docs.rilldata.com/reference/olap-engines/clickhouse)| -|RisingWave||データインジェクション| SQLストリーム処理をPostgresのような体験で実現。Apache Flinkより10倍速く、コスト効率も良いです。|[Documentation](https://docs.risingwave.com/docs/current/sink-to-clickhouse/)| -|RudderStack||データインジェクション|RudderStackは、顧客データを必要なツールやチームに収集して送信するのを簡単にします。|[Documentation](https://www.rudderstack.com/docs/destinations/warehouse-destinations/clickhouse/)| -|RunReveal||データインジェクション|SaaSアプリケーションから監査ログをClickHouseに取り込み、正規化します。定期的なクエリからアラートと検出を作成します。|[Website](https://runreveal.com)| -|Sematext||データ管理|ClickHouseデータベースの可観測性監視。|[Documentation](https://sematext.com/docs/integration/clickhouse/)| -|SiSense||データ視覚化|どのアプリケーションやワークフローにも分析を埋め込む。|[Website](https://www.sisense.com/data-connectors/)| -|SigNoz||データ視覚化|オープンソースの可観測性プラットフォーム。|[Documentation](https://www.signoz.io/docs/architecture/)| -|Snappy Flow||データ管理|プラグインを介してClickHouseデータベースメトリクスを収集します。|[Documentation](https://docs.snappyflow.io/docs/Integrations/clickhouse/instance)| -|Soda||データ品質|Soda統合は、組織がデータをデータベースに読み込む前にデータ品質チェックを実行することで、データ品質問題を検出、解決、予防するのを簡単にします。|[Website](https://www.soda.io/integrations/clickhouse)| -|Splunk||データ統合|ClickHouse Cloud AuditログをSplunkにインポートするためのSplunkモジュラー入力。|[Website](https://splunkbase.splunk.com/app/7709),
[Documentation](/integrations/tools/data-integration/splunk/index.md)| -|StreamingFast||データインジェクション|ブロックチェーン非依存、並列化されたストリーミングファーストデータエンジンです。|[Website](https://www.streamingfast.io/)| -|Streamkap||データインジェクション|高スループットのリアルタイムCDC(変更データキャプチャ)ストリーミングをClickHouseに設定します。|[Documentation](https://docs.streamkap.com/docs/clickhouse)| -|Supabase||データインジェクション|オープンソースのFirebaseの代替品です。|[GitHub](https://github.com/supabase/wrappers/tree/main/wrappers/src/fdw/clickhouse_fdw),[Blog](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)| -|Teleport||安全な接続|Teleportデータベースサービスは、ClickHouseのHTTPおよびネイティブ(TCP)インターフェースに対して利用可能なx509証明書を使用してClickHouseに認証します。|[Documentation](https://goteleport.com/docs/enroll-resources/database-access/enroll-self-hosted-databases/clickhouse-self-hosted/)| -|TABLUM.IO||SQLクライアント|TABLUM.IOは、さまざまなソースからデータを取り込み、不整合を正規化およびクリーンし、SQLを介してアクセスできるようにします。|[Documentation](/integrations/sql-clients/tablum.md)| -|Tooljet||データ視覚化|ToolJetは、カスタム内部ツールを構築および展開するためのオープンソースのローコードフレームワークです。|[Documentation](https://docs.tooljet.com/docs/data-sources/clickhouse/)| -|Upstash||データインジェクション|サーバーレスKafkaおよび他のソリューションを提供するデータプラットフォームです。|[Website](https://upstash.com/)| -|Vector||データインジェクション|ClickHouseとの互換性を内蔵した軽量で超高速な可観測性パイプラインを構築するためのツールです。|[Documentation](/integrations/vector/)| -|WarpStream||データインジェクション|オブジェクトストレージの上に直接構築されたKafka互換のデータストリーミングプラットフォーム。|[Website](https://www.warpstream.com/)| -|YepCode||データ統合|YepCodeは、ソースコードを愛する統合および自動化ツールです。|[Documentation](https://yepcode.io/docs/integrations/clickhouse/)| -|Zing Data||データ視覚化|ClickHouseのためのシンプルなソーシャルビジネスインテリジェンスで、iOS、Android、ウェブ向けに作られたものです。|[Documentation](https://docs.getzingdata.com/docs/)| - -
- -
-## コミュニティ統合 - -
- -|名前|ロゴ|カテゴリ|説明|リソース| -|------|----|----------------|------------------|-------------| -|Apache Airflow||データインジェクション|データエンジニアリングパイプライン用のオープンソースワークフローマネジメントプラットフォーム。|[Github](https://github.com/bryzgaloff/airflow-clickhouse-plugin)| -|Apache Beam||データインジェクション|オープンソースの統一モデルで、データ処理ワークフローを定義および実行するための言語特有のSDKのセットです。Google Dataflowと互換性があります。|[Documentation](/integrations/apache-beam),
[Examples](https://github.com/ClickHouse/clickhouse-beam-connector/)| -|Apache InLong||データインジェクション|膨大なデータのためのワンストップ統合フレームワーク。|[Documentation](https://inlong.apache.org/docs/data_node/load_node/clickhouse)| -|Apache NiFi||データインジェクション|ソフトウェアシステム間のデータフローを自動化します。|[Documentation](/integrations/nifi)| -|Apache SeaTunnel||データインジェクション|SeaTunnelは非常に使いやすい超高性能分散データ統合プラットフォームです。|[Website](https://seatunnel.apache.org/docs/2.3.0/connector-v2/sink/Clickhouse)| -|Apache SkyWalking||データ管理|オープンソースのAPMシステムで、Cloud Nativeアーキテクチャにおける分散システムの監視、トレーシング、診断機能を提供します。|[Blog](https://skywalking.apache.org/blog/2024-03-12-monitoring-clickhouse-through-skywalking/)| -|Apache StreamPark||データインジェクション|ストリーム処理アプリケーション開発フレームワークおよびストリーム処理運用プラットフォーム。|[Website](https://streampark.apache.org/docs/intro)| -|Bytebase||データ管理|オープンソースのデータベースDevOpsツールで、アプリケーション開発ライフサイクル全体にわたってデータベースを管理するためのGitLabです。|[Documentation](https://www.bytebase.com/docs/introduction/supported-databases)| -|C#||言語クライアント|ClickHouse.Clientは、ClickHouseの機能豊富なADO.NETクライアント実装です。|[Documentation](https://github.com/DarkWanderer/ClickHouse.Client/wiki/Quick-start)| -|CHProxy| |データ管理|ChproxyはClickHouseデータベース用のHTTPプロキシおよびロードバランサーです。|[GitHub](https://github.com/ContentSquare/chproxy)| -|Chat-DBT| |AI統合|Chat GPTを使用してClickHouseクエリを作成します。|[GitHub](https://github.com/plmercereau/chat-dbt)| -|ClickHouse Monitoring Dashboard||ダッシュボード|ClickHouseの簡単な監視ダッシュボードです。|[Github](https://github.com/duyet/clickhouse-monitoring)| -|Common Lisp||言語クライアント|Common Lisp ClickHouseクライアントライブラリ。|[GitHub](https://github.com/juliojimenez/clickhouse-cl)| -| Dataflow||データインジェクション|Google Dataflowは、Apache Beamを使用してバッチおよびストリーミングデータパイプラインを実行するサーバーレスサービスです。|[Documentation](/integrations/google-dataflow/dataflow)| -|DBNet||ソフトウェアIDE|バックエンドにGoを使用し、フロントエンドにブラウザを利用したWebベースのSQL IDE。|[Github](https://github.com/dbnet-io/dbnet)| -|DataLens||データ視覚化|オープンソースのデータ分析および視覚化ツール。|[Website](https://datalens.tech/),
[Documentation](https://datalens.tech/docs/en/)| -|Dataease||データ視覚化|ユーザーがデータを分析し、ビジネストレンドを洞察するのを助けるオープンソースのデータ視覚化分析ツール。|[Website](https://dataease.io/)| -|Datahub||データ管理|オープンソースのデータカタログで、データ発見、データ可観測性、および連邦ガバナンスを可能にします。|[Documentation](https://datahubproject.io/docs/generated/ingestion/sources/clickhouse/)| -|Dbmate| |データ管理|データベース移行ツールで、複数の開発者やサーバー間でデータベーススキーマを同期します。|[GitHub](https://github.com/amacneil/dbmate#clickhouse)| -|DeepFlow||データインジェクション|eBPFを使用したアプリケーション可観測性。|[Website](https://deepflow.io)| -|Easypanel||デプロイメントメソッド|最新のサーバー制御パネルです。これを使用して、ClickHouseを自分のサーバーにデプロイできます。|[Website](https://easypanel.io),
[Documentation](/integrations/tools/data-integration/easypanel/index.md)| -|Explo||データ視覚化|Exploは、企業がリアルタイムの分析ダッシュボードを構築するのを支援します。|[Website](https://www.explo.co/integrations/clickhouse)| -|Flink||データインジェクション|ClickHouseデータベース向けのFlinkシンクで、Async Http Clientを活用しています。|[GitHub](https://github.com/itinycheng/flink-connector-clickhouse)| -|Goose||データ移行|SQLマイグレーションやGo関数をサポートするデータベース移行ツールです。|[GitHub](https://github.com/pressly/goose),
[Documentation](https://pressly.github.io/goose/)| -|Ibis||言語クライアント|Python分析の柔軟性を現代的なSQLのスケールとパフォーマンスの上で実現します。|[Website](https://ibis-project.org/backends/ClickHouse/)| -|Jaeger||データインジェクション|トレースをClickHouseに保存するためのJaeger gRPCストレージプラグインの実装。|[GitHub](https://github.com/jaegertracing/jaeger-clickhouse)| -|JupySQL||SQLクライアント|Jupyterノートブック用のネイティブSQLクライアントです。|[Documentation](/integrations/jupysql)| -|Kestra||データオーケストレーション|オープンソースのデータオーケストレーションおよびスケジューリングプラットフォーム。|[Website](https://kestra.io/plugins/plugin-jdbc-clickhouse/)| -|Logchain||セキュリティ|データセキュリティおよび特権アクセス管理。|[Website](https://github.com/adaptive-scale/logchain)| -|Meltano||データインジェクション|Meltanoはオープンソースのフルスタックデータ統合プラットフォームです。|[Documentation](https://hub.meltano.com/extractors/tap-clickhouse)| -|Mprove||データ視覚化|バージョン管理を備えたセルフサービスビジネスインテリジェンス。|[Website](https://mprove.io/)| -|Netobserv||データ管理|ネットワーク可観測性のためのOpenShiftおよびKubernetesオペレーターです。|[Blog](https://cloud.redhat.com/blog/deploying-network-observability-without-loki-an-example-with-clickhouse)| -|Observable||データ視覚化|Observableは、データを共同で探索、分析、可視化、およびコミュニケーションを行えるプラットフォームです。|[Website](https://observablehq.com/@stas-sl/clickhouse-playground)| -|PHP||言語クライアント|この拡張機能はYiiフレームワーク2.0に対するClickHouse統合を提供します。|[GitHub](https://github.com/smi2/phpClickHouse)| -|Pgwarehouse||データインジェクション|PostgresテーブルをClickHouseに迅速に複製するための簡単なツールです。|[GitHub](https://github.com/scottpersinger/pgwarehouse)| -|Pinax||ブロックチェーン分析|ブロックチェーン用のインデックス作成、分析、検索ツール。|[Blog](https://blog.pinax.network/substreams/simplify-real-time-blockchain-analytics-with-clickhouse/)| -|Pulse||データ管理|内部データUIのためのデベロッパープラットフォーム。|[Website](https://www.timestored.com/pulse/)| -|QStudio||GUI|ClickHouseデータベースとインタラクションするためのシンプルなGUIです。|[Website](https://www.timestored.com/qstudio/database/clickhouse)| -|Qryn||データインジェクション、管理、視覚化 | qrynはClickHouseの上に構築されたポリグロット可観測性スタックで、Loki、Prometheus、Tempo、Openteレメトリ、および他の多くの形式や標準APIに透明に互換性があります。|[Documentation](https://qryn.dev), [Github](https://github.com/metrico), [Website](https://qryn.cloud)| -|RSyslog||データインジェクション|このモジュールはClickHouseへのロギングのためのネイティブサポートを提供します。|[Documentation](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)| -|Rocket.BI||データ視覚化|RocketBIは、リアルタイムでデータを分析し、ドラッグ&ドロップの視覚化を構築し、ウェブブラウザで同僚と共同作業を行うセルフサービスのビジネスインテリジェンスプラットフォームです。|[GitHub](https://github.com/datainsider-co/rocket-bi),
[Documentation](/integrations/data-visualization/rocketbi-and-clickhouse.md)| -|Ruby||言語クライアント|ClickHouse用の現代的なRubyデータベースドライバ。|[GitHub](https://github.com/shlima/click_house)| -|R||言語クライアント|RパッケージはClickHouseデータベースのためのDBIインターフェイスです。|[GitHub](https://github.com/IMSMWU/RClickHouse)| -|SQLPad||SQLクライアント|SQLPadはSQLクエリを書くためのウェブアプリです。|[Documentation](https://getsqlpad.com/en/connections/#clickhouse)| -|Scala||言語クライアント|ClickHouse Scala Clientで、Akka Httpを使用します。|[GitHub](https://github.com/crobox/clickhouse-scala-client)| -|SchemaSpy||データ視覚化|SchemaSpyはClickHouseスキーマの視覚化をサポートします。|[GitHub](https://github.com/schemaspy/schemaspy)| -|TricksterCache||データ視覚化| オープンソースのHTTPリバースプロキシキャッシュおよびタイムシリーズダッシュボードアクセラレーター。 |[Website](https://trickstercache.org/)| -|Visual Studio Client||言語クライアント|Visual Studioの軽量クライアントです。|[Marketplace](https://marketplace.visualstudio.com/items?itemName=fanruten.clickhouse-light)| -|VulcanSQL||データAPIフレームワーク|データアプリケーション用のデータAPIフレームワークで、データ担当者がデータAPIを迅速に作成および共有できるようにします。|[Website](https://vulcansql.com/),
[Documentation](https://vulcansql.com/docs/connect/clickhouse)| -|marimo||SQLクライアント|SQL機能を備えたオープンソースのリアクティブノートブックです。データ視覚化、共有可能なアプリ、または実行可能なスクリプトを作成します。|[Website](https://marimo.io/),
[Documentation](https://docs.marimo.io/guides/working_with_data/sql/?h=sql#clickhouse-support)| -
diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx.hash deleted file mode 100644 index 2e10bd1773e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/index.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -ff3f564c05d7f489 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/_category_.yml deleted file mode 100644 index 2f438d39377..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 101 -label: 'Language clients' -collapsible: true -collapsed: true -link: - type: generated-index - title: Language clients - slug: /integrations/language-clients diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md deleted file mode 100644 index 4939446c7ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md +++ /dev/null @@ -1,2556 +0,0 @@ ---- -sidebar_label: 'Go' -sidebar_position: 1 -keywords: -- 'clickhouse' -- 'go' -- 'client' -- 'golang' -slug: '/integrations/go' -description: 'The Go clients for ClickHouse allows users to connect to ClickHouse - using either the Go standard database/sql interface or an optimized native interface.' -title: 'ClickHouse Go' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; - - - -# ClickHouse Go -## 簡単な例 {#a-simple-example} - -簡単な例を使ってGoを試してみましょう。これによりClickHouseに接続し、システムデータベースから選択します。始めるには、接続情報が必要です。 -### 接続情報 {#connection-details} - - -### モジュールの初期化 {#initialize-a-module} - -```bash -mkdir clickhouse-golang-example -cd clickhouse-golang-example -go mod init clickhouse-golang-example -``` -### サンプルコードのコピー {#copy-in-some-sample-code} - -このコードを `clickhouse-golang-example` ディレクトリに `main.go` としてコピーしてください。 - -```go title=main.go -package main - -import ( - "context" - "crypto/tls" - "fmt" - "log" - - "github.com/ClickHouse/clickhouse-go/v2" - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" -) - -func main() { - conn, err := connect() - if err != nil { - panic(err) - } - - ctx := context.Background() - rows, err := conn.Query(ctx, "SELECT name, toString(uuid) as uuid_str FROM system.tables LIMIT 5") - if err != nil { - log.Fatal(err) - } - - for rows.Next() { - var name, uuid string - if err := rows.Scan(&name, &uuid); err != nil { - log.Fatal(err) - } - log.Printf("name: %s, uuid: %s", name, uuid) - } - -} - -func connect() (driver.Conn, error) { - var ( - ctx = context.Background() - conn, err = clickhouse.Open(&clickhouse.Options{ - Addr: []string{":9440"}, - Auth: clickhouse.Auth{ - Database: "default", - Username: "default", - Password: "", - }, - ClientInfo: clickhouse.ClientInfo{ - Products: []struct { - Name string - Version string - }{ - {Name: "an-example-go-client", Version: "0.1"}, - }, - }, - Debugf: func(format string, v ...interface{}) { - fmt.Printf(format, v) - }, - TLS: &tls.Config{ - InsecureSkipVerify: true, - }, - }) - ) - - if err != nil { - return nil, err - } - - if err := conn.Ping(ctx); err != nil { - if exception, ok := err.(*clickhouse.Exception); ok { - fmt.Printf("Exception [%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) - } - return nil, err - } - return conn, nil -} -``` -### go mod tidyを実行 {#run-go-mod-tidy} - -```bash -go mod tidy -``` -### 接続情報を設定する {#set-your-connection-details} -以前に接続情報を調べました。 `main.go` の `connect()` 関数に設定します: - -```go -func connect() (driver.Conn, error) { - var ( - ctx = context.Background() - conn, err = clickhouse.Open(&clickhouse.Options{ - #highlight-next-line - Addr: []string{":9440"}, - Auth: clickhouse.Auth{ - #highlight-start - Database: "default", - Username: "default", - Password: "", - #highlight-end - }, -``` -### サンプルを実行 {#run-the-example} -```bash -go run . -``` -```response -2023/03/06 14:18:33 name: COLUMNS, uuid: 00000000-0000-0000-0000-000000000000 -2023/03/06 14:18:33 name: SCHEMATA, uuid: 00000000-0000-0000-0000-000000000000 -2023/03/06 14:18:33 name: TABLES, uuid: 00000000-0000-0000-0000-000000000000 -2023/03/06 14:18:33 name: VIEWS, uuid: 00000000-0000-0000-0000-000000000000 -2023/03/06 14:18:33 name: hourly_data, uuid: a4e36bd4-1e82-45b3-be77-74a0fe65c52b -``` -### 詳細を学ぶ {#learn-more} -このカテゴリの残りのドキュメントは、ClickHouse Go クライアントの詳細をカバーしています。 -## ClickHouse Go クライアント {#clickhouse-go-client} - -ClickHouseは、2つの公式Goクライアントをサポートしています。これらのクライアントは相補的であり、意図的に異なるユースケースをサポートしています。 - -* [clickhouse-go](https://github.com/ClickHouse/clickhouse-go) - Goの標準データベース/sqlインターフェースまたはネイティブインターフェースのいずれかをサポートする高レベル言語クライアント。 -* [ch-go](https://github.com/ClickHouse/ch-go) - 低レベルクライアント。ネイティブインターフェースのみ。 - -clickhouse-goは高レベルのインターフェースを提供し、ユーザーが行指向のセマンティクスを使用してデータをクエリしたり挿入したりできるようにし、データ型に関して寛容なバッチを提供します - 精度の損失がない限り、値は変換されます。一方、ch-goは、タイプの厳密さとより複雑な使用の代償に、低CPUおよびメモリオーバーヘッドで迅速なデータブロックストリーミングを提供する最適化された列指向インターフェースを提供します。 - -バージョン2.3から、Clickhouse-goはエンコーディング、デコーディング、および圧縮などの低レベル機能のためにch-goを利用します。clickhouse-goはまた、Goの `database/sql` インターフェース標準もサポートしています。両方のクライアントは、最適なパフォーマンスを提供するためにエンコーディングにネイティブフォーマットを使用し、ネイティブClickHouseプロトコルを介して通信できます。clickhouse-goはまた、ユーザーがトラフィックをプロキシまたは負荷分散する必要がある場合のために、HTTPをその輸送メカニズムとしてサポートしています。 - -クライアントライブラリを選択する際、ユーザーはそれぞれの利点と欠点を認識する必要があります - クライアントライブラリの選択を参照してください。 - -| | ネイティブフォーマット | ネイティブプロトコル | HTTPプロトコル | 行指向API | 列指向API | 型の柔軟性 | 圧縮 | クエリプレースホルダー | -|:-------------:|:-------------:|:---------------:|:-------------:|:------------------:|:---------------------:|:----------------:|:-----------:|:------------------:| -| clickhouse-go | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| ch-go | ✅ | ✅ | | | ✅ | | ✅ | | -## クライアントの選択 {#choosing-a-client} - -クライアントライブラリを選択することは、使用パターンと最適なパフォーマンスの必要性によって異なります。毎秒数百万の挿入が必要な挿入重視のユースケースでは、低レベルクライアントの[ch-go](https://github.com/ClickHouse/ch-go)の使用をお勧めします。このクライアントは、ClickHouseのネイティブフォーマットが要求する行指向形式から列にデータを変換する際の関連するオーバーヘッドを回避します。さらに、使用を簡素化するために、`interface{}` (`any`) タイプのリフレクションや使用を回避します。 - -集計や低スループットの挿入ワークロードに焦点を当てたクエリ処理では、[clickhouse-go](https://github.com/ClickHouse/clickhouse-go)が馴染みのある `database/sql` インターフェースとより簡単な行セマンティクスを提供します。ユーザーはまた、輸送プロトコルとしてHTTPを選択的に使用し、構造体との間で行をマールシャリングするためのヘルパー関数を利用することができます。 -## clickhouse-goクライアント {#the-clickhouse-go-client} - -clickhouse-goクライアントは、ClickHouseと通信するための2つのAPIインターフェースを提供します: - -* ClickHouseクライアント特有のAPI -* `database/sql`標準 - Golangによって提供されるSQLデータベースの一般的なインターフェース。 - -`database/sql`は、データストアを抽象化する開発者にデータベース非依存のインターフェースを提供しますが、一部のタイプとクエリセマンティクスを強制し、パフォーマンスに影響を及ぼすことがあります。このため、[パフォーマンスが重要](https://github.com/clickHouse/clickHouse-go#benchmark)な場合は、クライアント特有のAPIを使用するべきです。ただし、複数のデータベースをサポートするツールにClickHouseを統合したいユーザーは、標準インターフェースの使用を好むかもしれません。 - -両方のインターフェースは、[ネイティブフォーマット](/native-protocol/basics.md)および通信のためのネイティブプロトコルを使用してデータをエンコードします。さらに、標準インターフェースはHTTPを介した通信をサポートしています。 - -| | ネイティブフォーマット | ネイティブプロトコル | HTTPプロトコル | バルク書き込みサポート | 構造体マールシャリング | 圧縮 | クエリプレースホルダー | -|:------------------:|:-------------:|:---------------:|:-------------:|:------------------:|:-----------------:|:-----------:|:------------------:| -| ClickHouse API | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | -| `database/sql` API | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | -## インストール {#installation} - -ドライバのv1は非推奨であり、機能更新や新しいClickHouseタイプのサポートには到達しません。ユーザーは、より優れたパフォーマンスを提供するv2に移行する必要があります。 - -クライアントの2.xバージョンをインストールするには、go.modファイルにパッケージを追加します: - -`require github.com/ClickHouse/clickhouse-go/v2 main` - -または、リポジトリをクローンします: - -```bash -git clone --branch v2 https://github.com/clickhouse/clickhouse-go.git $GOPATH/src/github -``` - -別のバージョンをインストールするには、パスまたはブランチ名を適宜変更します。 - -```bash -mkdir my-clickhouse-app && cd my-clickhouse-app - -cat > go.mod <<-END - module my-clickhouse-app - - go 1.18 - - require github.com/ClickHouse/clickhouse-go/v2 main -END - -cat > main.go <<-END - package main - - import ( - "fmt" - "github.com/ClickHouse/clickhouse-go/v2" - ) - - func main() { - conn, _ := clickhouse.Open(&clickhouse.Options{Addr: []string{"127.0.0.1:9000"}}) - v, _ := conn.ServerVersion() - fmt.Println(v.String()) - } -END - -go mod tidy -go run main.go - -``` -### バージョン管理と互換性 {#versioning--compatibility} - -クライアントはClickHouseとは独立してリリースされます。2.xは現在開発中のメジャーバージョンを表します。2.xのすべてのバージョンは互換性があります。 -#### ClickHouseとの互換性 {#clickhouse-compatibility} - -クライアントは以下をサポートします: - -- 現在サポートされているすべてのClickHouseバージョンは、[こちら](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md)に記録されています。ClickHouseバージョンがもはやサポートされない場合、それらはクライアントリリースに対しても積極的にテストされることはありません。 -- クライアントのリリース日から2年以内のすべてのClickHouseバージョン。ただし、LTSバージョンのみが積極的にテストされています。 -#### Golangとの互換性 {#golang-compatibility} - -| クライアントバージョン | Golangバージョン | -|:--------------:|:---------------:| -| => 2.0 <= 2.2 | 1.17, 1.18 | -| >= 2.3 | 1.18 | -## ClickHouseクライアントAPI {#clickhouse-client-api} - -ClickHouseクライアントAPIのすべてのコード例は[こちら](https://github.com/ClickHouse/clickhouse-go/tree/main/examples)で見つけることができます。 -### 接続 {#connecting} - -以下の例は、サーバーバージョンを返し、ClickHouseに接続することを示しています - ClickHouseが保護されておらず、デフォルトユーザーでアクセス可能であると仮定しています。 - -デフォルトのネイティブポートを使用して接続します。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, -}) -if err != nil { - return err -} -v, err := conn.ServerVersion() -fmt.Println(v) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/connect.go) - -**その後のすべての例では、明示的に示されない限り、ClickHouse `conn` 変数が作成されて利用可能であると仮定します。** -#### 接続設定 {#connection-settings} - -接続を開くとき、Options構造体を使用してクライアントの動作を制御できます。以下の設定が利用可能です: - -* `Protocol` - ネイティブまたはHTTP。HTTPは現在、[database/sql API](#databasesql-api)のみでサポートされています。 -* `TLS` - TLSオプション。非nil値はTLSを有効にします。[TLSの使用](#using-tls)を参照してください。 -* `Addr` - ポートを含むアドレスのスライス。 -* `Auth` - 認証の詳細。[認証](#authentication)を参照してください。 -* `DialContext` - 接続を確立する方法を決定するカスタムダイヤル関数。 -* `Debug` - デバッグを有効にするためのtrue/false。 -* `Debugf` - デバッグ出力を消費する関数を提供します。`debug`をtrueに設定する必要があります。 -* `Settings` - ClickHouse設定のマップ。これらはすべてのClickHouseクエリに適用されます。[コンテキストの使用](#using-context)を使用すると、クエリごとに設定を設定できます。 -* `Compression` - ブロックの圧縮を有効にします。[圧縮](#compression)を参照してください。 -* `DialTimeout` - 接続を確立する最大時間。デフォルトは `1s` です。 -* `MaxOpenConns` - 同時に使用する最大接続数。アイドルプールにはより多くまたは少ない接続がある可能性がありますが、この数の接続のみを使用できます。デフォルトは `MaxIdleConns+5` です。 -* `MaxIdleConns` - プール内で維持する接続の数。可能な場合は接続が再利用されます。デフォルトは `5` です。 -* `ConnMaxLifetime` - 接続を利用可能にする最大ライフタイム。デフォルトは1時間です。この時間の後、接続は破棄され、新しい接続がプールに追加されます。 -* `ConnOpenStrategy` - ノードアドレスのリストをどのように消費して接続を開くかを決定します。[複数ノードへの接続](#connecting-to-multiple-nodes)を参照してください。 -* `BlockBufferSize` - 一度にバッファにデコードする最大ブロック数。大きな値はメモリの代償に並列性を増やします。ブロックサイズはクエリに依存するため、接続でこれを設定できますが、返すデータに基づいてクエリごとに上書きすることをお勧めします。デフォルトは `2` です。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - DialContext: func(ctx context.Context, addr string) (net.Conn, error) { - dialCount++ - var d net.Dialer - return d.DialContext(ctx, "tcp", addr) - }, - Debug: true, - Debugf: func(format string, v ...interface{}) { - fmt.Printf(format, v) - }, - Settings: clickhouse.Settings{ - "max_execution_time": 60, - }, - Compression: &clickhouse.Compression{ - Method: clickhouse.CompressionLZ4, - }, - DialTimeout: time.Duration(10) * time.Second, - MaxOpenConns: 5, - MaxIdleConns: 5, - ConnMaxLifetime: time.Duration(10) * time.Minute, - ConnOpenStrategy: clickhouse.ConnOpenInOrder, - BlockBufferSize: 10, -}) -if err != nil { - return err -} -``` -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/connect_settings.go) -#### 接続プール {#connection-pooling} - -クライアントは接続プールを維持し、必要に応じてこれらをクエリを跨いで再利用します。最も多く `MaxOpenConns` は同時に使用され、プールの最大サイズは `MaxIdleConns` によって制御されます。クライアントは各クエリ実行のためにプールから接続を取得し、再利用のためにプールに戻します。接続はバッチの生涯の間使用され、 `Send()` で解放されます。 - -ユーザーが `MaxOpenConns=1` を設定しない限り、プール内の同じ接続が後続のクエリに使用される保証はありません。これはあまり必要ありませんが、ユーザーが一時テーブルを使用している場合には必要です。 - -また、デフォルトで `ConnMaxLifetime` は1時間です。これは、ノードがクラスタから離れた場合にClickHouseへの負荷が不均一になるケースを引き起こす可能性があります。ノードが利用できなくなると接続は他のノードに均等に振り分けられます。これらの接続は保持され、デフォルトで1時間の間はリフレッシュされません。問題のあるノードがクラスタに戻っても同様です。負荷の高いワークロードの場合はこの値を下げることを検討してください。 -### TLSの使用 {#using-tls} - -低レベルでは、すべてのクライアント接続メソッド(`DSN/OpenDB/Open`)は、[Goのtlsパッケージ](https://pkg.go.dev/crypto/tls)を使用して安全な接続を確立します。Options構造体が非nilの `tls.Config` ポインタを含む場合、クライアントはTLSを使用することを認識します。 - -```go -env, err := GetNativeTestEnvironment() -if err != nil { - return err -} -cwd, err := os.Getwd() -if err != nil { - return err -} -t := &tls.Config{} -caCert, err := ioutil.ReadFile(path.Join(cwd, "../../tests/resources/CAroot.crt")) -if err != nil { - return err -} -caCertPool := x509.NewCertPool() -successful := caCertPool.AppendCertsFromPEM(caCert) -if !successful { - return err -} -t.RootCAs = caCertPool -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.SslPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - TLS: t, -}) -if err != nil { - return err -} -v, err := conn.ServerVersion() -if err != nil { - return err -} -fmt.Println(v.String()) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/ssl.go) - -この最小限の `TLS.Config` は通常、ClickHouseサーバーのセキュアなネイティブポート(通常9440)に接続するのに十分です。ClickHouseサーバーに有効な証明書(期限切れ、誤ったホスト名、一般的に認識されたルート認証機関によって署名されていない)がない場合、 `InsecureSkipVerify` をtrueに設定することができますが、これは強く推奨されません。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.SslPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - TLS: &tls.Config{ - InsecureSkipVerify: true, - }, -}) -if err != nil { - return err -} -v, err := conn.ServerVersion() -``` -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/ssl_no_verify.go) - -追加のTLSパラメータが必要な場合、アプリケーションコードは `tls.Config` 構造体の必要なフィールドを設定するべきです。これには、特定の暗号スイートの強制、特定のTLSバージョンの強制(1.2または1.3など)、内部CA証明書チェーンの追加、ClickHouseサーバーによって要求された場合のクライアント証明書(および秘密鍵)の追加、そしてより専門的なセキュリティセットアップに付随するその他のオプションが含まれます。 -### 認証 {#authentication} - -接続情報にAuth構造体を指定してユーザー名とパスワードを指定します。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, -}) -if err != nil { - return err -} - -v, err := conn.ServerVersion() -``` -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/auth.go) -### 複数ノードへの接続 {#connecting-to-multiple-nodes} - -複数のアドレスを `Addr` 構造体を通じて指定できます。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{"127.0.0.1:9001", "127.0.0.1:9002", fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, -}) -if err != nil { - return err -} -v, err := conn.ServerVersion() -if err != nil { - return err -} -fmt.Println(v.String()) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/1c0d81d0b1388dbb9e09209e535667df212f4ae4/examples/clickhouse_api/multi_host.go#L26-L45) - - -2つの接続戦略が利用可能です: - -* `ConnOpenInOrder` (デフォルト) - アドレスは順番に消費されます。後のアドレスは、リストに含まれる早いアドレスへの接続に失敗した場合にのみ使用されます。これは実質的にフェイルオーバー戦略です。 -* `ConnOpenRoundRobin` - ラウンドロビン戦略を使用してアドレス間の負荷をバランスさせます。 - -これはオプション `ConnOpenStrategy` を通じて制御できます。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{"127.0.0.1:9001", "127.0.0.1:9002", fmt.Sprintf("%s:%d", env.Host, env.Port)}, - ConnOpenStrategy: clickhouse.ConnOpenRoundRobin, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, -}) -if err != nil { - return err -} -v, err := conn.ServerVersion() -if err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/1c0d81d0b1388dbb9e09209e535667df212f4ae4/examples/clickhouse_api/multi_host.go#L50-L67) -### 実行 {#execution} - -任意のステートメントを `Exec` メソッドを通じて実行できます。これはDDLおよび簡単なステートメントに有用です。大きな挿入やクエリ反復には使用しないでください。 - -```go -conn.Exec(context.Background(), `DROP TABLE IF EXISTS example`) -err = conn.Exec(context.Background(), ` - CREATE TABLE IF NOT EXISTS example ( - Col1 UInt8, - Col2 String - ) engine=Memory -`) -if err != nil { - return err -} -conn.Exec(context.Background(), "INSERT INTO example VALUES (1, 'test-1')") -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/exec.go) - - -クエリにContextを渡す能力に注意してください。これは特定のクエリレベルの設定を渡すのに使用できます - [コンテキストの使用](#using-context)を参照してください。 -### バッチ挿入 {#batch-insert} - -大量の行を挿入するには、クライアントはバッチセマンティクスを提供しています。これは、行を追加できるバッチの準備が必要です。これは最終的に `Send()` メソッドを通じて送信されます。バッチはSendが実行されるまでメモリに保持されます。 - -```go -conn, err := GetNativeConnection(nil, nil, nil) -if err != nil { - return err -} -ctx := context.Background() -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -conn.Exec(context.Background(), "DROP TABLE IF EXISTS example") -err = conn.Exec(ctx, ` - CREATE TABLE IF NOT EXISTS example ( - Col1 UInt8 - , Col2 String - , Col3 FixedString(3) - , Col4 UUID - , Col5 Map(String, UInt8) - , Col6 Array(String) - , Col7 Tuple(String, UInt8, Array(Map(String, String))) - , Col8 DateTime - ) Engine = Memory -`) -if err != nil { - return err -} - - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 1000; i++ { - err := batch.Append( - uint8(42), - "ClickHouse", - "Inc", - uuid.New(), - map[string]uint8{"key": 1}, // Map(String, UInt8) - []string{"Q", "W", "E", "R", "T", "Y"}, // Array(String) - []interface{}{ // Tuple(String, UInt8, Array(Map(String, String))) - "String Value", uint8(5), []map[string]string{ - {"key": "value"}, - {"key": "value"}, - {"key": "value"}, - }, - }, - time.Now(), - ) - if err != nil { - return err - } -} -return batch.Send() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/batch.go) - -ClickHouseに対する推奨事項は[ここ](https://guides/inserting-data#best-practices-for-inserts)にも適用されます。バッチはゴルーチン間で共有しないでください - 各Routineごとに別々のバッチを構築してください。 - -上記の例から、行を追加する際には変数の型がカラムの型と一致する必要があることに注意してください。マッピングは通常明白ですが、このインターフェースは柔軟性を提供し、精度の損失がない限り型は変換されます。たとえば、次のことが示されています。 - -```go -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 1000; i++ { - err := batch.Append( - "2006-01-02 15:04:05.999", - ) - if err != nil { - return err - } -} -return batch.Send() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/type_convert.go) - - -各カラム型に対するサポートされたgo型の完全な要約については、[型変換](#type-conversions)を参照してください。 -### 行のクエリ {#querying-rows} - - -ユーザーは `QueryRow` メソッドを使用して単一の行をクエリするか、 `Query` を介して結果セットを反復するためのカーソルを取得できます。前者はデータがシリアライズされる先を受け入れますが、後者は各行で `Scan` を呼び出す必要があります。 - -```go -row := conn.QueryRow(context.Background(), "SELECT * FROM example") -var ( - col1 uint8 - col2, col3, col4 string - col5 map[string]uint8 - col6 []string - col7 []interface{} - col8 time.Time -) -if err := row.Scan(&col1, &col2, &col3, &col4, &col5, &col6, &col7, &col8); err != nil { - return err -} -fmt.Printf("row: col1=%d, col2=%s, col3=%s, col4=%s, col5=%v, col6=%v, col7=%v, col8=%v\n", col1, col2, col3, col4, col5, col6, col7, col8) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/query_row.go) - -```go -rows, err := conn.Query(ctx, "SELECT Col1, Col2, Col3 FROM example WHERE Col1 >= 2") -if err != nil { - return err -} -for rows.Next() { - var ( - col1 uint8 - col2 string - col3 time.Time - ) - if err := rows.Scan(&col1, &col2, &col3); err != nil { - return err - } - fmt.Printf("row: col1=%d, col2=%s, col3=%s\n", col1, col2, col3) -} -rows.Close() -return rows.Err() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/query_rows.go) - -どちらの場合でも、シリアライズしたい各カラムの値を格納する変数のポインタを渡す必要があることに注意してください。これらは、デフォルトで、 `SELECT` ステートメントで指定された順序で渡す必要があります - デフォルトでは、 `SELECT *` の場合、カラム宣言の順序が使用されます。 - -挿入と同様に、Scanメソッドはターゲット変数が適切な型である必要があります。これは再度柔軟であることを目指しており、精度の損失が可能であれば型が変換されます。たとえば、上記の例ではUUIDカラムが文字列変数に読み取られています。各カラム型に対するサポートされたgo型の完全なリストについては、[型変換](#type-conversions)を参照してください。 - -最後に、 `Query` および `QueryRow` メソッドに `Context` を渡す能力に注意してください。これはクエリレベルの設定に使用できます - 詳細は[コンテキストの使用](#using-context)を参照してください。 -### 非同期挿入 {#async-insert} - -非同期挿入はAsyncメソッドを介してサポートされています。これにより、クライアントがサーバーに挿入を完了するまで待機するか、データを受信した時点で応答するかを指定できます。これは実質的にパラメータ [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert) を制御します。 - -```go -conn, err := GetNativeConnection(nil, nil, nil) -if err != nil { - return err -} -ctx := context.Background() -if err := clickhouse_tests.CheckMinServerServerVersion(conn, 21, 12, 0); err != nil { - return nil -} -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -conn.Exec(ctx, `DROP TABLE IF EXISTS example`) -const ddl = ` - CREATE TABLE example ( - Col1 UInt64 - , Col2 String - , Col3 Array(UInt8) - , Col4 DateTime - ) ENGINE = Memory -` -if err := conn.Exec(ctx, ddl); err != nil { - return err -} -for i := 0; i < 100; i++ { - if err := conn.AsyncInsert(ctx, fmt.Sprintf(`INSERT INTO example VALUES ( - %d, '%s', [1, 2, 3, 4, 5, 6, 7, 8, 9], now() - )`, i, "Golang SQL database driver"), false); err != nil { - return err - } -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/async.go) -### 列指向挿入 {#columnar-insert} - -挿入は列形式で行うことができます。これは、データがすでにこの構造である場合、行にピボットする必要を回避することにより、パフォーマンスの利点を提供します。 - -```go -batch, err := conn.PrepareBatch(context.Background(), "INSERT INTO example") -if err != nil { - return err -} -var ( - col1 []uint64 - col2 []string - col3 [][]uint8 - col4 []time.Time -) -for i := 0; i < 1_000; i++ { - col1 = append(col1, uint64(i)) - col2 = append(col2, "Golang SQL database driver") - col3 = append(col3, []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9}) - col4 = append(col4, time.Now()) -} -if err := batch.Column(0).Append(col1); err != nil { - return err -} -if err := batch.Column(1).Append(col2); err != nil { - return err -} -if err := batch.Column(2).Append(col3); err != nil { - return err -} -if err := batch.Column(3).Append(col4); err != nil { - return err -} -return batch.Send() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/columnar_insert.go) -### 構造体を使用する {#using-structs} - -ユーザーにとって、Golangの構造体はClickHouseにおけるデータ行の論理的な表現を提供します。これをサポートするために、ネイティブインターフェースはさまざまな便利な関数を提供します。 -#### シリアライズでの選択 {#select-with-serialize} - -Selectメソッドは、一度の呼び出しでレスポンス行のセットを構造体のスライスにマールシャルすることを可能にします。 - -```go -var result []struct { - Col1 uint8 - Col2 string - ColumnWithName time.Time `ch:"Col3"` -} - -if err = conn.Select(ctx, &result, "SELECT Col1, Col2, Col3 FROM example"); err != nil { - return err -} - -for _, v := range result { - fmt.Printf("row: col1=%d, col2=%s, col3=%s\n", v.Col1, v.Col2, v.ColumnWithName) -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/select_struct.go) -``` -#### Scan Struct {#scan-struct} - -`ScanStruct` は、クエリからの単一行を構造体にマーシャリングすることを可能にします。 - -```go -var result struct { - Col1 int64 - Count uint64 `ch:"count"` -} -if err := conn.QueryRow(context.Background(), "SELECT Col1, COUNT() AS count FROM example WHERE Col1 = 5 GROUP BY Col1").ScanStruct(&result); err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/scan_struct.go) -#### Append Struct {#append-struct} - -`AppendStruct` は、構造体を既存の [batch](#batch-insert) に追加し、完全な行として解釈することを可能にします。これには、構造体のカラムがテーブルのカラムと名前と型が一致する必要があります。すべてのカラムには対応する構造体フィールドが必要ですが、いくつかの構造体フィールドには対応するカラム表現がない場合があります。これらは単に無視されます。 - -```go -batch, err := conn.PrepareBatch(context.Background(), "INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 1_000; i++ { - err := batch.AppendStruct(&row{ - Col1: uint64(i), - Col2: "Golang SQL database driver", - Col3: []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9}, - Col4: time.Now(), - ColIgnored: "this will be ignored", - }) - if err != nil { - return err - } -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/append_struct.go) -### 型変換 {#type-conversions} - -クライアントは、挿入と応答のマーシャリングの両方に対して、変数型を受け入れる柔軟性を可能な限り高めることを目指しています。ほとんどの場合、ClickHouseのカラム型に対して等価なGolang型が存在します。例えば、[UInt64](/sql-reference/data-types/int-uint/) は [uint64](https://pkg.go.dev/builtin#uint64) にマッピングされます。これらの論理的なマッピングは常にサポートされるべきです。ユーザーは、カラムに挿入するために使用したり、応答を受け取るために使用したりできる変数型を利用したいと考えるかもしれません。変数または受信データのいずれかの変換が最初に行われる場合があります。クライアントは、これらの変換を透過的にサポートすることを目指しているため、ユーザーは挿入前にデータを正確に揃えるために変換する必要がなく、クエリ時に柔軟なマーシャリングを提供します。この透過的な変換では精度の喪失は許可されません。例えば、uint32はUInt64列からのデータを受け取るために使用することはできません。逆に、文字列はフォーマット要件を満たす限り、datetime64フィールドに挿入できます。 - -現在サポートされているプリミティブ型の型変換は[こちら](https://github.com/ClickHouse/clickhouse-go/blob/main/TYPES.md)に記載されています。 - -この努力は継続中であり、挿入(`Append`/`AppendRow`)と読み取り時(`Scan`を通じて)に分けることができます。特定の変換に対するサポートが必要な場合は、問題を提起してください。 -### 複雑な型 {#complex-types} -#### 日付/日時型 {#datedatetime-types} - -ClickHouseのGoクライアントは、`Date`、`Date32`、`DateTime`、および `DateTime64`の日付/日時型をサポートしています。日付は、`2006-01-02`形式の文字列として挿入できます。またはGoの`time.Time{}`や`sql.NullTime`を使用します。DateTimeもこれらの型をサポートしていますが、文字列は`2006-01-02 15:04:05`形式で渡す必要があり、オプションのタイムゾーンオフセット(例:`2006-01-02 15:04:05 +08:00`)が必要です。`time.Time{}`および`sql.NullTime`は、読み取り時にもサポートされており、`sql.Scanner`インターフェイスの任意の実装も利用できます。 - -タイムゾーン情報の扱いは、ClickHouseの型や、値の挿入または読み取りに依存します: - -* **DateTime/DateTime64** - * **挿入**時に値はUNIXタイムスタンプ形式でClickHouseに送信されます。タイムゾーンが提供されていない場合、クライアントはクライアントのローカルタイムゾーンを想定します。`time.Time{}`または`sql.NullTime`は、その結果としてエポックに変換されます。 - * **選択**時に、カラムに設定されたタイムゾーンが、`time.Time`値を返す際に使用されます。設定されていない場合、サーバーのタイムゾーンが使用されます。 -* **Date/Date32** - * **挿入**時には、日付をUNIXタイムスタンプに変換する際に日付のタイムゾーンが考慮されます。すなわち、日付としてのストレージの前にタイムゾーンによってオフセットされます。ClickHouseのDate型にはロケールがないため、これは文字列値で指定されない限りローカルタイムゾーンが使用されます。 - * **選択**時には、日付が`time.Time{}`または`sql.NullTime{}`のインスタンスにスキャンされ、タイムゾーン情報なしで返されます。 -#### 配列 {#array} - -配列はスライスとして挿入される必要があります。要素の型ルールは、[プリミティブ型](#type-conversions)に対するものと一致します。すなわち、可能な範囲で要素が変換されます。 - -スキャン時にはスライスへのポインタを提供する必要があります。 - -```go -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -var i int64 -for i = 0; i < 10; i++ { - err := batch.Append( - []string{strconv.Itoa(int(i)), strconv.Itoa(int(i + 1)), strconv.Itoa(int(i + 2)), strconv.Itoa(int(i + 3))}, - [][]int64{{i, i + 1}, {i + 2, i + 3}, {i + 4, i + 5}}, - ) - if err != nil { - return err - } -} -if err := batch.Send(); err != nil { - return err -} -var ( - col1 []string - col2 [][]int64 -) -rows, err := conn.Query(ctx, "SELECT * FROM example") -if err != nil { - return err -} -for rows.Next() { - if err := rows.Scan(&col1, &col2); err != nil { - return err - } - fmt.Printf("行: col1=%v, col2=%v\n", col1, col2) -} -rows.Close() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/array.go) -#### マップ {#map} - -マップは、前述の型ルールに準拠するキーと値を持つGolangマップとして挿入される必要があります。 - -```go -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -var i int64 -for i = 0; i < 10; i++ { - err := batch.Append( - map[string]uint64{strconv.Itoa(int(i)): uint64(i)}, - map[string][]string{strconv.Itoa(int(i)): {strconv.Itoa(int(i)), strconv.Itoa(int(i + 1)), strconv.Itoa(int(i + 2)), strconv.Itoa(int(i + 3))}}, - map[string]map[string]uint64{strconv.Itoa(int(i)): {strconv.Itoa(int(i)): uint64(i)}}, - ) - if err != nil { - return err - } -} -if err := batch.Send(); err != nil { - return err -} -var ( - col1 map[string]uint64 - col2 map[string][]string - col3 map[string]map[string]uint64 -) -rows, err := conn.Query(ctx, "SELECT * FROM example") -if err != nil { - return err -} -for rows.Next() { - if err := rows.Scan(&col1, &col2, &col3); err != nil { - return err - } - fmt.Printf("行: col1=%v, col2=%v, col3=%v\n", col1, col2, col3) -} -rows.Close() -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/map.go) -#### タプル {#tuples} - -タプルは、任意の長さのカラムのグループを表します。カラムは明示的に名前を付けることも、型のみを指定することもできます(例: - -```sql -//無名 -Col1 Tuple(String, Int64) - -//名前付き -Col2 Tuple(name String, id Int64, age uint8) -``` - -これらのアプローチのうち、名前付きタプルはより柔軟性があります。無名のタプルはスライスを使用して挿入および読み取りする必要がありますが、名前付きタプルはマップとも互換性があります。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Tuple(name String, age UInt8), - Col2 Tuple(String, UInt8), - Col3 Tuple(name String, id String) - ) - Engine Memory - `); err != nil { - return err -} - -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -// 無名および名前付き両方ともスライスで追加できます。同じ型のすべての要素が同じ場合は、強く型付けされたリストとマップも使用できます。 -if err = batch.Append([]interface{}{"Clicky McClickHouse", uint8(42)}, []interface{}{"Clicky McClickHouse Snr", uint8(78)}, []string{"Dale", "521211"}); err != nil { - return err -} -if err = batch.Append(map[string]interface{}{"name": "Clicky McClickHouse Jnr", "age": uint8(20)}, []interface{}{"Baby Clicky McClickHouse", uint8(1)}, map[string]string{"name": "Geoff", "id": "12123"}); err != nil { - return err -} -if err = batch.Send(); err != nil { - return err -} -var ( - col1 map[string]interface{} - col2 []interface{} - col3 map[string]string -) -// 名前付きタプルはマップまたはスライスに取得でき、無名はスライスのみです。 -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3); err != nil { - return err -} -fmt.Printf("行: col1=%v, col2=%v, col3=%v\n", col1, col2, col3) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/tuple.go) - -注:型付きスライスとマップがサポートされているため、すべての名前付きタプルのサブカラムが同じ型である必要があります。 -#### ネスト {#nested} - -ネストフィールドは、名前付きタプルの配列に相当します。ユーザーが[flatten_nested](/operations/settings/settings#flatten_nested)を1または0に設定したかどうかによって使用法が変わります。 - -flatten_nestedを0に設定すると、ネストしたカラムは単一のタプルの配列として保持されます。これにより、ユーザーは挿入や取得のためにマップのスライスを使用し、任意のレベルのネストを行うことができます。マップのキーはカラムの名前と等しくなければならず、以下の例のように示されます。 - -注:マップはタプルを表すため、`map[string]interface{}`型である必要があります。値は現在、強く型付けされていません。 - -```go -conn, err := GetNativeConnection(clickhouse.Settings{ - "flatten_nested": 0, -}, nil, nil) -if err != nil { - return err -} -ctx := context.Background() -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -conn.Exec(context.Background(), "DROP TABLE IF EXISTS example") -err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Nested(Col1_1 String, Col1_2 UInt8), - Col2 Nested( - Col2_1 UInt8, - Col2_2 Nested( - Col2_2_1 UInt8, - Col2_2_2 UInt8 - ) - ) - ) Engine Memory -`) -if err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -var i int64 -for i = 0; i < 10; i++ { - err := batch.Append( - []map[string]interface{}{ - { - "Col1_1": strconv.Itoa(int(i)), - "Col1_2": uint8(i), - }, - { - "Col1_1": strconv.Itoa(int(i + 1)), - "Col1_2": uint8(i + 1), - }, - { - "Col1_1": strconv.Itoa(int(i + 2)), - "Col1_2": uint8(i + 2), - }, - }, - []map[string]interface{}{ - { - "Col2_2": []map[string]interface{}{ - { - "Col2_2_1": uint8(i), - "Col2_2_2": uint8(i + 1), - }, - }, - "Col2_1": uint8(i), - }, - { - "Col2_2": []map[string]interface{}{ - { - "Col2_2_1": uint8(i + 2), - "Col2_2_2": uint8(i + 3), - }, - }, - "Col2_1": uint8(i + 1), - }, - }, - ) - if err != nil { - return err - } -} -if err := batch.Send(); err != nil { - return err -} -var ( - col1 []map[string]interface{} - col2 []map[string]interface{} -) -rows, err := conn.Query(ctx, "SELECT * FROM example") -if err != nil { - return err -} -for rows.Next() { - if err := rows.Scan(&col1, &col2); err != nil { - return err - } - fmt.Printf("行: col1=%v, col2=%v\n", col1, col2) -} -rows.Close() -``` - -[完全な例 - `flatten_tested=0`](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/nested.go#L28-L118) - -デフォルト値の1が`flatten_nested`に使用される場合、ネストされたカラムは別々の配列にフラット化されます。これにより、挿入および取得のためにネストされたスライスを使用する必要があります。任意のレベルのネストが機能する可能性がありますが、これは正式にはサポートされていません。 - -```go -conn, err := GetNativeConnection(nil, nil, nil) -if err != nil { - return err -} -ctx := context.Background() -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -conn.Exec(ctx, "DROP TABLE IF EXISTS example") -err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Nested(Col1_1 String, Col1_2 UInt8), - Col2 Nested( - Col2_1 UInt8, - Col2_2 Nested( - Col2_2_1 UInt8, - Col2_2_2 UInt8 - ) - ) - ) Engine Memory -`) -if err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -var i uint8 -for i = 0; i < 10; i++ { - col1_1_data := []string{strconv.Itoa(int(i)), strconv.Itoa(int(i + 1)), strconv.Itoa(int(i + 2))} - col1_2_data := []uint8{i, i + 1, i + 2} - col2_1_data := []uint8{i, i + 1, i + 2} - col2_2_data := [][][]interface{}{ - { - {i, i + 1}, - }, - { - {i + 2, i + 3}, - }, - { - {i + 4, i + 5}, - }, - } - err := batch.Append( - col1_1_data, - col1_2_data, - col2_1_data, - col2_2_data, - ) - if err != nil { - return err - } -} -if err := batch.Send(); err != nil { - return err -} -``` - -[完全な例 - `flatten_nested=1`](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/nested.go#L123-L180) - -注:ネストされたカラムは同じ次元でなければなりません。例えば、上述の例では、`Col_2_2`と`Col_2_1`は同じ数の要素を持っている必要があります。 - -より簡潔なインターフェースとネスティングの公式サポートを考慮すると、`flatten_nested=0`を推奨します。 -#### ジオタイプ {#geo-types} - -クライアントは、Point、Ring、Polygon、および Multi Polygon のジオタイプをサポートしています。これらのフィールドは、[github.com/paulmach/orb](https://github.com/paulmach/orb)パッケージを使用してGolangで使用されます。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - point Point, - ring Ring, - polygon Polygon, - mPolygon MultiPolygon - ) - Engine Memory - `); err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} - -if err = batch.Append( - orb.Point{11, 22}, - orb.Ring{ - orb.Point{1, 2}, - orb.Point{1, 2}, - }, - orb.Polygon{ - orb.Ring{ - orb.Point{1, 2}, - orb.Point{12, 2}, - }, - orb.Ring{ - orb.Point{11, 2}, - orb.Point{1, 12}, - }, - }, - orb.MultiPolygon{ - orb.Polygon{ - orb.Ring{ - orb.Point{1, 2}, - orb.Point{12, 2}, - }, - orb.Ring{ - orb.Point{11, 2}, - orb.Point{1, 12}, - }, - }, - orb.Polygon{ - orb.Ring{ - orb.Point{1, 2}, - orb.Point{12, 2}, - }, - orb.Ring{ - orb.Point{11, 2}, - orb.Point{1, 12}, - }, - }, - }, -); err != nil { - return err -} - -if err = batch.Send(); err != nil { - return err -} - -var ( - point orb.Point - ring orb.Ring - polygon orb.Polygon - mPolygon orb.MultiPolygon -) - -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&point, &ring, &polygon, &mPolygon); err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/geo.go) -#### UUID {#uuid} - -UUID型は、[github.com/google/uuid](https://github.com/google/uuid)パッケージによってサポートされています。ユーザーは、UUIDを文字列として送信したり、`sql.Scanner`または`Stringify`を実装した任意の型としてマーシャリングしたりすることもできます。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - col1 UUID, - col2 UUID - ) - Engine Memory - `); err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -col1Data, _ := uuid.NewUUID() -if err = batch.Append( - col1Data, - "603966d6-ed93-11ec-8ea0-0242ac120002", -); err != nil { - return err -} - -if err = batch.Send(); err != nil { - return err -} - -var ( - col1 uuid.UUID - col2 uuid.UUID -) - -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2); err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/uuid.go) -#### Decimal {#decimal} - -Decimal型は、[github.com/shopspring/decimal](https://github.com/shopspring/decimal)パッケージによってサポートされています。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Decimal32(3), - Col2 Decimal(18,6), - Col3 Decimal(15,7), - Col4 Decimal128(8), - Col5 Decimal256(9) - ) Engine Memory - `); err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -if err = batch.Append( - decimal.New(25, 4), - decimal.New(30, 5), - decimal.New(35, 6), - decimal.New(135, 7), - decimal.New(256, 8), -); err != nil { - return err -} - -if err = batch.Send(); err != nil { - return err -} - -var ( - col1 decimal.Decimal - col2 decimal.Decimal - col3 decimal.Decimal - col4 decimal.Decimal - col5 decimal.Decimal -) - -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, &col4, &col5); err != nil { - return err -} -fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v\n", col1, col2, col3, col4, col5) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/decimal.go) -#### Nullable {#nullable} - -GoのNil値は、ClickHouseのNULLを表します。これは、フィールドがNullableとして宣言されている場合に使用できます。挿入時には、通常のカラムとNullableバージョンの両方にNilを渡すことができます。前者の場合、型のデフォルト値が保存されます。例えば、文字列の場合は空の文字列です。Nullableバージョンの場合、NULL値がClickHouseに保存されます。 - -スキャン時に、ユーザーは nil 値をNullableフィールドのために表すために、*string のような nil をサポートする型へのポインタを渡す必要があります。以下の例では、Nullable(String)のcol1は、したがって**stringを受け取ります。これによりnilが表現できるようになります。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - col1 Nullable(String), - col2 String, - col3 Nullable(Int8), - col4 Nullable(Int64) - ) - Engine Memory - `); err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -if err = batch.Append( - nil, - nil, - nil, - sql.NullInt64{Int64: 0, Valid: false}, -); err != nil { - return err -} - -if err = batch.Send(); err != nil { - return err -} - -var ( - col1 *string - col2 string - col3 *int8 - col4 sql.NullInt64 -) - -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, &col4); err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/nullable.go) - -クライアントは追加で、`sql.Null*`型(例:`sql.NullInt64`)をサポートします。これらは、それぞれのClickHouse型と互換性があります。 -#### 大きな整数 - Int128, Int256, UInt128, UInt256 {#big-ints---int128-int256-uint128-uint256} - -64ビットを超える数値型は、ネイティブなGo [big](https://pkg.go.dev/math/big)パッケージを使用して表されます。 - -```go -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Int128, - Col2 UInt128, - Col3 Array(Int128), - Col4 Int256, - Col5 Array(Int256), - Col6 UInt256, - Col7 Array(UInt256) - ) Engine Memory`); err != nil { - return err -} - -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} - -col1Data, _ := new(big.Int).SetString("170141183460469231731687303715884105727", 10) -col2Data := big.NewInt(128) -col3Data := []*big.Int{ - big.NewInt(-128), - big.NewInt(128128), - big.NewInt(128128128), -} -col4Data := big.NewInt(256) -col5Data := []*big.Int{ - big.NewInt(256), - big.NewInt(256256), - big.NewInt(256256256256), -} -col6Data := big.NewInt(256) -col7Data := []*big.Int{ - big.NewInt(256), - big.NewInt(256256), - big.NewInt(256256256256), -} - -if err = batch.Append(col1Data, col2Data, col3Data, col4Data, col5Data, col6Data, col7Data); err != nil { - return err -} - -if err = batch.Send(); err != nil { - return err -} - -var ( - col1 big.Int - col2 big.Int - col3 []*big.Int - col4 big.Int - col5 []*big.Int - col6 big.Int - col7 []*big.Int -) - -if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, &col4, &col5, &col6, &col7); err != nil { - return err -} -fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v, col6=%v, col7=%v\n", col1, col2, col3, col4, col5, col6, col7) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/big_int.go) -### 圧縮 {#compression} - -圧縮方法のサポートは、使用しているプロトコルに依存します。ネイティブプロトコルの場合、クライアントは`LZ4`と`ZSTD`圧縮をサポートしています。これは、ブロックレベルでのみ実行されます。圧縮は、接続の設定に`Compression`設定を含めることによって有効にできます。 - -```go -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - Compression: &clickhouse.Compression{ - Method: clickhouse.CompressionZSTD, - }, - MaxOpenConns: 1, -}) -ctx := context.Background() -defer func() { - conn.Exec(ctx, "DROP TABLE example") -}() -conn.Exec(context.Background(), "DROP TABLE IF EXISTS example") -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 Array(String) - ) Engine Memory - `); err != nil { - return err -} -batch, err := conn.PrepareBatch(ctx, "INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 1000; i++ { - if err := batch.Append([]string{strconv.Itoa(i), strconv.Itoa(i + 1), strconv.Itoa(i + 2), strconv.Itoa(i + 3)}); err != nil { - return err - } -} -if err := batch.Send(); err != nil { - return err -} -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/compression.go) - -HTTP経由の標準インターフェースを使用している場合、追加の圧縮手法が利用可能です。詳細については、[database/sql API - 圧縮](#compression)を参照してください。 -### パラメータバインディング {#parameter-binding} - -クライアントは、`Exec`、`Query`、および`QueryRow`メソッドのためのパラメータバインディングをサポートしています。以下の例に示すように、これは名前付き、番号付き、位置指定のパラメータを使用してサポートされています。これについての例を以下に示します。 - -```go -var count uint64 -// 位置指定バインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 >= ? AND Col3 < ?", 500, now.Add(time.Duration(750)*time.Second)).Scan(&count); err != nil { - return err -} -// 250 -fmt.Printf("位置指定バインド count: %d\n", count) -// 数値バインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 <= $2 AND Col3 > $1", now.Add(time.Duration(150)*time.Second), 250).Scan(&count); err != nil { - return err -} -// 100 -fmt.Printf("数値バインド count: %d\n", count) -// 名前付きバインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 <= @col1 AND Col3 > @col3", clickhouse.Named("col1", 100), clickhouse.Named("col3", now.Add(time.Duration(50)*time.Second))).Scan(&count); err != nil { - return err -} -// 50 -fmt.Printf("名前付きバインド count: %d\n", count) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind.go) -#### 特殊ケース {#special-cases} - -デフォルトでは、スライスはクエリにパラメータとして渡された場合、値のカンマ区切りリストに展開されます。ユーザーが値のセットを `[]` でラップして挿入する必要がある場合は、`ArraySet`を使用する必要があります。 - -グループ/タプルが必要な場合は、`( )` でラップされ、IN演算子と共に使用するために、`GroupSet`を使用できます。これは、以下の例に示すように、複数のグループが必要なケースに特に便利です。 - -最後に、DateTime64フィールドはパラメータが適切に表示されるように精度が必要です。フィールドの精度レベルはクライアントには不明ですが、ユーザーはそれを提供しなければなりません。これを簡素化するために、`DateNamed`パラメータを提供します。 - -```go -var count uint64 -// 配列は展開されます -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 IN (?)", []int{100, 200, 300, 400, 500}).Scan(&count); err != nil { - return err -} -fmt.Printf("配列展開 count: %d\n", count) -// 配列は [ ] で保持されます -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col4 = ?", clickhouse.ArraySet{300, 301}).Scan(&count); err != nil { - return err -} -fmt.Printf("配列 count: %d\n", count) -// グループセットにより ( ) リストを形成できます -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 IN ?", clickhouse.GroupSet{[]interface{}{100, 200, 300, 400, 500}}).Scan(&count); err != nil { - return err -} -fmt.Printf("グループ count: %d\n", count) -// ネストが必要な場合にもっと便利 -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE (Col1, Col5) IN (?)", []clickhouse.GroupSet{{[]interface{}{100, 101}}, {[]interface{}{200, 201}}}).Scan(&count); err != nil { - return err -} -fmt.Printf("グループ count: %d\n", count) -// 時間の精度が必要な際に DateNamed を使用 -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col3 >= @col3", clickhouse.DateNamed("col3", now.Add(time.Duration(500)*time.Millisecond), clickhouse.NanoSeconds)).Scan(&count); err != nil { - return err -} -fmt.Printf("NamedDate count: %d\n", count) -``` - -[完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind_special.go) -### コンテキストの使用 {#using-context} - -Goのコンテキストは、締切、キャンセレーション信号、および他のリクエストスコープの値をAPIの境界を越えて渡す手段を提供します。接続上のすべてのメソッドは、最初の変数としてコンテキストを受け入れます。前の例ではcontext.Background()が使用されていましたが、ユーザーはこの機能を利用して設定や締切を渡し、クエリをキャンセルすることができます。 - -`withDeadline`で作成されたコンテキストを渡すことで、クエリには実行時間の制限を設けることができます。これは絶対的な時間であり、有効期限が切れると接続が解放され、ClickHouseにキャンセル信号が送信されます。`WithCancel`を代わりに使用して、クエリを明示的にキャンセルすることもできます。 - -ヘルパーの `clickhouse.WithQueryID` と `clickhouse.WithQuotaKey` を使用すると、クエリIDとクオータキーを指定することができます。クエリIDは、ログ内でのクエリ追跡やキャンセル目的に役立ちます。クオータキーは、ユニークなキー値に基づいてClickHouseの使用制限を設けるために使用されます - 詳細については[クオータ管理](/operations/access-rights#quotas-management)を参照してください。 - -ユーザーはまた、コンテキストを使用して特定のクエリに対してのみ設定を適用することができます - 接続全体ではなく、[接続設定](#connection-settings)に示されています。 - -最後に、`clickhouse.WithBlockSize`を介してブロックバッファのサイズを制御できます。これは接続レベルの設定`BlockBufferSize`を上書きし、メモリ内でデコードされて保持されるブロックの最大数を制御します。大きな値は、メモリの代償としてより多くの並列化を意味する可能性があります。 - -以下に上記の例を示します。 - -```go -dialCount := 0 -conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - DialContext: func(ctx context.Context, addr string) (net.Conn, error) { - dialCount++ - var d net.Dialer - return d.DialContext(ctx, "tcp", addr) - }, -}) -if err != nil { - return err -} -if err := clickhouse_tests.CheckMinServerServerVersion(conn, 22, 6, 1); err != nil { - return nil -} -// コンテキストを使用して特定のAPI呼び出しに設定を渡すことができます -ctx := clickhouse.Context(context.Background(), clickhouse.WithSettings(clickhouse.Settings{ - "allow_experimental_object_type": "1", -})) - -conn.Exec(ctx, "DROP TABLE IF EXISTS example") - -// JSONカラムを作成するにはallow_experimental_object_type=1が必要です -if err = conn.Exec(ctx, ` - CREATE TABLE example ( - Col1 JSON - ) - Engine Memory - `); err != nil { - return err -} - -// コンテキストを使用してクエリをキャンセルできます -ctx, cancel := context.WithCancel(context.Background()) -go func() { - cancel() -}() -if err = conn.QueryRow(ctx, "SELECT sleep(3)").Scan(); err == nil { - return fmt.Errorf("expected cancel") -} - -// クエリの締切を設定します - これは絶対時間が経過した後にクエリをキャンセルします。 -// クエリはClickHouseで完了するまで実行され続けます -ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(-time.Second)) -defer cancel() -if err := conn.Ping(ctx); err == nil { - return fmt.Errorf("expected deadline exceeded") -} - -// クエリIDを設定してログでのクエリトレースを支援します e.g. see system.query_log -var one uint8 -queryId, _ := uuid.NewUUID() -ctx = clickhouse.Context(context.Background(), clickhouse.WithQueryID(queryId.String())) -if err = conn.QueryRow(ctx, "SELECT 1").Scan(&one); err != nil { - return err -} - -conn.Exec(context.Background(), "DROP QUOTA IF EXISTS foobar") -defer func() { - conn.Exec(context.Background(), "DROP QUOTA IF EXISTS foobar") -}() -ctx = clickhouse.Context(context.Background(), clickhouse.WithQuotaKey("abcde")) -// クオータキーを設定します - 先にクオータを作成します -if err = conn.Exec(ctx, "CREATE QUOTA IF NOT EXISTS foobar KEYED BY client_key FOR INTERVAL 1 minute MAX queries = 5 TO default"); err != nil { - return err -} - -type Number struct { - Number uint64 `ch:"number"` -} -for i := 1; i <= 6; i++ { - var result []Number - if err = conn.Select(ctx, &result, "SELECT number FROM numbers(10)"); err != nil { - return err - } -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/context.go) -### 進捗/プロファイル/ログ情報 {#progressprofilelog-information} - -クエリに関して進捗、プロファイル、およびログ情報を要求することができます。進捗情報は、ClickHouseで読み取りおよび処理された行数およびバイト数の統計を報告します。対照的に、プロファイル情報はクライアントに返されるデータの要約を提供し、バイト(非圧縮)、行、およびブロックの合計を含みます。最後に、ログ情報はスレッドに関する統計を提供し、メモリ使用量やデータ速度を含みます。 - -この情報を得るには、ユーザーは[コンテキスト](#using-context)を使用する必要があり、コールバック関数を渡すことができます。 - -```go -totalRows := uint64(0) -// コンテキストを使用して進捗とプロファイル情報のコールバックを渡します -ctx := clickhouse.Context(context.Background(), clickhouse.WithProgress(func(p *clickhouse.Progress) { - fmt.Println("進捗: ", p) - totalRows += p.Rows -}), clickhouse.WithProfileInfo(func(p *clickhouse.ProfileInfo) { - fmt.Println("プロファイル情報: ", p) -}), clickhouse.WithLogs(func(log *clickhouse.Log) { - fmt.Println("ログ情報: ", log) -})) - -rows, err := conn.Query(ctx, "SELECT number from numbers(1000000) LIMIT 1000000") -if err != nil { - return err -} -for rows.Next() { -} - -fmt.Printf("合計行数: %d\n", totalRows) -rows.Close() -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/progress.go) -### 動的スキャン {#dynamic-scanning} - -ユーザーは、スキーマやフィールドの型がわからないテーブルを読み取る必要がある場合があります。これは、アドホックデータ分析が行われる場合や、汎用ツールが書かれる場合に一般的です。これを達成するために、クエリ応答ではカラムタイプ情報が利用可能です。これは、Goのリフレクションを使って、スキャンに渡すことができる正しい型の変数のインスタンスを作成するために使用できます。 - -```go -const query = ` -SELECT - 1 AS Col1 - , 'Text' AS Col2 -` -rows, err := conn.Query(context.Background(), query) -if err != nil { - return err -} -var ( - columnTypes = rows.ColumnTypes() - vars = make([]interface{}, len(columnTypes)) -) -for i := range columnTypes { - vars[i] = reflect.New(columnTypes[i].ScanType()).Interface() -} -for rows.Next() { - if err := rows.Scan(vars...); err != nil { - return err - } - for _, v := range vars { - switch v := v.(type) { - case *string: - fmt.Println(*v) - case *uint8: - fmt.Println(*v) - } - } -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/dynamic_scan_types.go) -### 外部テーブル {#external-tables} - -[外部テーブル](/engines/table-engines/special/external-data/)は、クライアントがSELECTクエリを介してデータをClickHouseに送信できるようにします。このデータは一時テーブルに配置され、評価のためにクエリ自体で使用できます。 - -外部データをクエリでクライアントに送信するには、ユーザーはコンテキストを介してこれを渡す前に `ext.NewTable` を使用して外部テーブルを構築する必要があります。 - -```go -table1, err := ext.NewTable("external_table_1", - ext.Column("col1", "UInt8"), - ext.Column("col2", "String"), - ext.Column("col3", "DateTime"), -) -if err != nil { - return err -} - -for i := 0; i < 10; i++ { - if err = table1.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()); err != nil { - return err - } -} - -table2, err := ext.NewTable("external_table_2", - ext.Column("col1", "UInt8"), - ext.Column("col2", "String"), - ext.Column("col3", "DateTime"), -) - -for i := 0; i < 10; i++ { - table2.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()) -} -ctx := clickhouse.Context(context.Background(), - clickhouse.WithExternalTable(table1, table2), -) -rows, err := conn.Query(ctx, "SELECT * FROM external_table_1") -if err != nil { - return err -} -for rows.Next() { - var ( - col1 uint8 - col2 string - col3 time.Time - ) - rows.Scan(&col1, &col2, &col3) - fmt.Printf("col1=%d, col2=%s, col3=%v\n", col1, col2, col3) -} -rows.Close() - -var count uint64 -if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM external_table_1").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_1: %d\n", count) -if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM external_table_2").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_2: %d\n", count) -if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM (SELECT * FROM external_table_1 UNION ALL SELECT * FROM external_table_2)").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/external_data.go) -### Open Telemetry {#open-telemetry} - -ClickHouseは、ネイティブプロトコルの一部として[トレースコンテキスト](/operations/opentelemetry/)を渡すことを可能にします。クライアントは、`clickhouse.withSpan`関数を介してスパンを作成し、これをコンテキストを介して渡すことができます。 - -```go -var count uint64 -rows := conn.QueryRow(clickhouse.Context(context.Background(), clickhouse.WithSpan( - trace.NewSpanContext(trace.SpanContextConfig{ - SpanID: trace.SpanID{1, 2, 3, 4, 5}, - TraceID: trace.TraceID{5, 4, 3, 2, 1}, - }), -)), "SELECT COUNT() FROM (SELECT number FROM system.numbers LIMIT 5)") -if err := rows.Scan(&count); err != nil { - return err -} -fmt.Printf("count: %d\n", count) -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/open_telemetry.go) - -トレースの活用に関する詳細は、[OpenTelemetryサポート](/operations/opentelemetry/)の下を参照してください。 -## データベース/SQL API {#databasesql-api} - -`database/sql`または「標準」APIは、アプリケーションコードが基盤となるデータベースに無関心であるべきシナリオでクライアントを使用できるようにします。これはある種のコストがかかります - 追加の抽象化レイヤーと間接化、およびClickHouseと必ずしも一致しないプリミティブです。しかし、これらのコストは通常、ツールが複数のデータベースに接続する必要があるシナリオでは受け入れられます。 - -さらに、このクライアントはHTTPをトランスポートレイヤーとして使用することをサポートしており、データは最適なパフォーマンスのためにネイティブ形式でエンコードされます。 - -以下は、ClickHouse APIのドキュメント構造に合わせることを目指しています。 - -標準APIのフルコード例は[こちら](https://github.com/ClickHouse/clickhouse-go/tree/main/examples/std)で見つけることができます。 -### 接続 {#connecting-1} - -接続は、`clickhouse://:?=`という形式のDSN文字列と`Open`メソッド、または`clickhouse.OpenDB`メソッドを介して達成できます。後者は`database/sql`仕様の一部ではありませんが、`sql.DB`インスタンスを返します。このメソッドは、`database/sql`仕様を通じて明示的に公開する明確な手段がないプロファイリングなどの機能を提供します。 - -```go -func Connect() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - }) - return conn.Ping() -} - - -func ConnectDSN() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("clickhouse://%s:%d?username=%s&password=%s", env.Host, env.Port, env.Username, env.Password)) - if err != nil { - return err - } - return conn.Ping() -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect.go) - -**以降のすべての例では、明示的に示されない限り、ClickHouseの `conn` 変数が作成され、利用可能であると仮定します。** -#### 接続設定 {#connection-settings-1} - -以下のパラメータをDSN文字列に渡すことができます: - -* `hosts` - ロードバランシングおよびフェイルオーバーのための単一アドレスホストのカンマ区切りリスト - [複数ノードへの接続](#connecting-to-multiple-nodes)を参照してください。 -* `username/password` - 認証資格情報 - [認証](#authentication)を参照してください。 -* `database` - 現在のデフォルトデータベースを選択する -* `dial_timeout` - 期間文字列は、符号付きの可能性がある小数のシーケンスであり、各小数にはオプションの分数と`300ms`、`1s`のような単位の接尾辞があります。有効な時間単位は`ms`、`s`、`m`です。 -* `connection_open_strategy` - `random/in_order`(デフォルトは`random`) - [複数ノードに接続する](#connecting-to-multiple-nodes)を参照してください。 - - `round_robin` - セットからラウンドロビンサーバーを選択します - - `in_order` - 指定された順序で最初のライブサーバーが選択されます -* `debug` - デバッグ出力を有効にする(ブール値) -* `compress` - 圧縮アルゴリズムを指定する - `none`(デフォルト)、`zstd`、`lz4`、`gzip`、`deflate`、`br`。`true`に設定すると、`lz4`が使用されます。ネイティブ通信については、`lz4`と`zstd`のみがサポートされます。 -* `compress_level` - 圧縮レベル(デフォルトは`0`)。詳しくは圧縮を参照してください。これはアルゴリズム特有です: - - `gzip` - `-2`(最高のスピード)から`9`(最高の圧縮) - - `deflate` - `-2`(最高のスピード)から`9`(最高の圧縮) - - `br` - `0`(最高のスピード)から`11`(最高の圧縮) - - `zstd`、`lz4` - 無視される -* `secure` - セキュアなSSL接続を確立します(デフォルトは`false`) -* `skip_verify` - 証明書の検証をスキップします(デフォルトは`false`) -* `block_buffer_size` - ユーザーがブロックバッファのサイズを制御できるようにします。[`BlockBufferSize`](#connection-settings)を参照してください(デフォルトは`2`)。 - -```go -func ConnectSettings() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("clickhouse://127.0.0.1:9001,127.0.0.1:9002,%s:%d/%s?username=%s&password=%s&dial_timeout=10s&connection_open_strategy=round_robin&debug=true&compress=lz4", env.Host, env.Port, env.Database, env.Username, env.Password)) - if err != nil { - return err - } - return conn.Ping() -} -``` -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_settings.go) -#### 接続プーリング {#connection-pooling-1} - -ユーザーは、[複数ノードへの接続](#connecting-to-multiple-nodes)で説明されているように、提供されたノードアドレスのリストの使用を影響を与えることができます。ただし、接続管理とプーリングは意図的に`sql.DB`に委任されています。 -#### HTTP経由での接続 {#connecting-over-http} - -デフォルトでは、接続はネイティブプロトコルを介して確立されます。HTTPが必要なユーザーは、DSNを修正してHTTPプロトコルを含めるか、接続オプションにプロトコルを指定することでこれを有効にできます。 - -```go -func ConnectHTTP() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.HttpPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - Protocol: clickhouse.HTTP, - }) - return conn.Ping() -} - -func ConnectDSNHTTP() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&password=%s", env.Host, env.HttpPort, env.Username, env.Password)) - if err != nil { - return err - } - return conn.Ping() -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_http.go) -#### 複数ノードへの接続 {#connecting-to-multiple-nodes-1} - -`OpenDB`を使用する場合は、ClickHouse APIで使用されているのと同じオプションアプローチを使用して複数のホストに接続します。 `ConnOpenStrategy`をオプションとして指定できます。 - -DSNベースの接続の場合、文字列は複数のホストと`connection_open_strategy`パラメーターを受け入れ、その値を`round_robin`または`in_order`に設定できます。 - -```go -func MultiStdHost() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := clickhouse.Open(&clickhouse.Options{ - Addr: []string{"127.0.0.1:9001", "127.0.0.1:9002", fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - ConnOpenStrategy: clickhouse.ConnOpenRoundRobin, - }) - if err != nil { - return err - } - v, err := conn.ServerVersion() - if err != nil { - return err - } - fmt.Println(v.String()) - return nil -} - -func MultiStdHostDSN() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("clickhouse://127.0.0.1:9001,127.0.0.1:9002,%s:%d?username=%s&password=%s&connection_open_strategy=round_robin", env.Host, env.Port, env.Username, env.Password)) - if err != nil { - return err - } - return conn.Ping() -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/multi_host.go) -### TLSの使用 {#using-tls-1} - -DSN接続文字列を使用する場合、SSLは「secure=true」パラメータを介して有効にできます。`OpenDB`メソッドは、[ネイティブAPIのTLS](#using-tls)と同じアプローチを採用しており、非nil TLS構造体の指定に依存しています。DSN接続文字列は、SSL検証をスキップするために`skip_verify`パラメーターをサポートしますが、`OpenDB`メソッドは、構成を渡すことを許可するため、より高度なTLS構成に必要です。 - -```go -func ConnectSSL() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - cwd, err := os.Getwd() - if err != nil { - return err - } - t := &tls.Config{} - caCert, err := ioutil.ReadFile(path.Join(cwd, "../../tests/resources/CAroot.crt")) - if err != nil { - return err - } - caCertPool := x509.NewCertPool() - successful := caCertPool.AppendCertsFromPEM(caCert) - if !successful { - return err - } - t.RootCAs = caCertPool - - - conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.SslPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - TLS: t, - }) - return conn.Ping() -} - -func ConnectDSNSSL() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("https://%s:%d?secure=true&skip_verify=true&username=%s&password=%s", env.Host, env.HttpsPort, env.Username, env.Password)) - if err != nil { - return err - } - return conn.Ping() -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/ssl.go) -### 認証 {#authentication-1} - -`OpenDB`を使用する場合、認証情報は通常のオプションを介して渡すことができます。DSNベースの接続の場合、接続文字列にユーザー名とパスワードをパラメータとして渡すか、アドレスにエンコードされた資格情報として渡すことができます。 - -```go -func ConnectAuth() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.Port)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - }) - return conn.Ping() -} - -func ConnectDSNAuth() error { - env, err := GetStdTestEnvironment() - if err != nil { - return err - } - conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&password=%s", env.Host, env.HttpPort, env.Username, env.Password)) - if err != nil { - return err - } - if err = conn.Ping(); err != nil { - return err - } - conn, err = sql.Open("clickhouse", fmt.Sprintf("http://%s:%s@%s:%d", env.Username, env.Password, env.Host, env.HttpPort)) - if err != nil { - return err - } - return conn.Ping() -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/auth.go) -### 実行 {#execution-1} - -接続が取得されると、ユーザーはExecメソッドを介して`sql`文を実行することができます。 - -```go -conn.Exec(`DROP TABLE IF EXISTS example`) -_, err = conn.Exec(` - CREATE TABLE IF NOT EXISTS example ( - Col1 UInt8, - Col2 String - ) engine=Memory -`) -if err != nil { - return err -} -_, err = conn.Exec("INSERT INTO example VALUES (1, 'test-1')") -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/exec.go) - - -このメソッドは、コンテキストを受け取ることはサポートしていません - デフォルトでは、バックグラウンドコンテキストで実行されます。ユーザーは必要に応じて`ExecContext`を使用できます - [コンテキストの使用](#using-context)を参照してください。 -### バッチ挿入 {#batch-insert-1} - -バッチセマンティクスは、`Being`メソッドを介して`sql.Tx`を作成することによって達成できます。これにより、`INSERT`文を使用して`Prepare`メソッドを取得できます。これにより、行を`Exec`メソッドを使用して追加できる`sql.Stmt`が返されます。バッチは、最初の`sql.Tx`で`Commit`が実行されるまでメモリに蓄積されます。 - -```go -batch, err := scope.Prepare("INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 1000; i++ { - _, err := batch.Exec( - uint8(42), - "ClickHouse", "Inc", - uuid.New(), - map[string]uint8{"key": 1}, // Map(String, UInt8) - []string{"Q", "W", "E", "R", "T", "Y"}, // Array(String) - []interface{}{ // Tuple(String, UInt8, Array(Map(String, String))) - "String Value", uint8(5), []map[string]string{ - map[string]string{"key": "value"}, - map[string]string{"key": "value"}, - map[string]string{"key": "value"}, - }, - }, - time.Now(), - ) - if err != nil { - return err - } -} -return scope.Commit() -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/batch.go) -### 行のクエリ {#querying-rows-1} - -単一の行のクエリは、`QueryRow`メソッドを使用して実行できます。これにより、スキャンを行うために変数へのポインタを伴う*sql.Rowが返されます。`QueryRowContext`のバリアントにより、バックグラウンド以外のコンテキストを渡すことができます - [コンテキストの使用](#using-context)を参照してください。 - -```go -row := conn.QueryRow("SELECT * FROM example") -var ( - col1 uint8 - col2, col3, col4 string - col5 map[string]uint8 - col6 []string - col7 interface{} - col8 time.Time -) -if err := row.Scan(&col1, &col2, &col3, &col4, &col5, &col6, &col7, &col8); err != nil { - return err -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/query_row.go) - -複数行を繰り返すには、`Query`メソッドを使用します。これにより、行を反復処理するためにNextを呼び出すことができる`*sql.Rows`構造体が返されます。`QueryContext`の同等のものはコンテキストの渡しを可能にします。 - -```go -rows, err := conn.Query("SELECT * FROM example") -if err != nil { - return err -} -var ( - col1 uint8 - col2, col3, col4 string - col5 map[string]uint8 - col6 []string - col7 interface{} - col8 time.Time -) -for rows.Next() { - if err := rows.Scan(&col1, &col2, &col3, &col4, &col5, &col6, &col7, &col8); err != nil { - return err - } - fmt.Printf("row: col1=%d, col2=%s, col3=%s, col4=%s, col5=%v, col6=%v, col7=%v, col8=%v\n", col1, col2, col3, col4, col5, col6, col7, col8) -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/query_rows.go) -### 非同期挿入 {#async-insert-1} - -非同期挿入は、`ExecContext`メソッドを介して挿入を実行することで達成できます。これは、非同期モードが有効になったコンテキストを渡す必要があります。これにより、クライアントがサーバーが挿入を完了するまで待つか、データが受信された時点で応答するかを指定できます。これは、[wait_for_async_insert](/operations/settings/settings#wait_for_async_insert)パラメータを制御します。 - -```go -const ddl = ` - CREATE TABLE example ( - Col1 UInt64 - , Col2 String - , Col3 Array(UInt8) - , Col4 DateTime - ) ENGINE = Memory - ` -if _, err := conn.Exec(ddl); err != nil { - return err -} -ctx := clickhouse.Context(context.Background(), clickhouse.WithStdAsync(false)) -{ - for i := 0; i < 100; i++ { - _, err := conn.ExecContext(ctx, fmt.Sprintf(`INSERT INTO example VALUES ( - %d, '%s', [1, 2, 3, 4, 5, 6, 7, 8, 9], now() - )`, i, "Golang SQL database driver")) - if err != nil { - return err - } - } -} -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/async.go) -### 列指向の挿入 {#columnar-insert-1} - -標準インターフェースではサポートされていません。 -### 構造体の使用 {#using-structs-1} - -標準インターフェースではサポートされていません。 -### 型変換 {#type-conversions-1} - -標準の`database/sql`インターフェースは、[ClickHouse API](#type-conversions)と同じ型をサポートする必要があります。いくつかの例外があり、主に複雑な型については、以下にドキュメントされています。ClickHouse APIに類似して、クライアントは挿入およびレスポンスのマシュアリングのために可能な限り柔軟性を持つことを目指しています。詳細については[型変換](#type-conversions)を参照してください。 -### 複雑な型 {#complex-types-1} - -特に明記されている場合を除いて、複雑な型の処理は[ClickHouse API](#complex-types)と同様であるべきです。違いは`database/sql`の内部によるものです。 -#### マップ {#maps} - -ClickHouse APIとは異なり、標準APIはマップをスキャンタイプで厳密に型付けする必要があります。たとえば、ユーザーは`Map(String,String)`フィールドに対して`map[string]interface{}`を渡すことはできず、代わりに`map[string]string`を使用する必要があります。`interface{}`変数は常に互換性があり、より複雑な構造に使用できます。ストラクチャは読み取り時にサポートされていません。 - -```go -var ( - col1Data = map[string]uint64{ - "key_col_1_1": 1, - "key_col_1_2": 2, - } - col2Data = map[string]uint64{ - "key_col_2_1": 10, - "key_col_2_2": 20, - } - col3Data = map[string]uint64{} - col4Data = []map[string]string{ - {"A": "B"}, - {"C": "D"}, - } - col5Data = map[string]uint64{ - "key_col_5_1": 100, - "key_col_5_2": 200, - } -) -if _, err := batch.Exec(col1Data, col2Data, col3Data, col4Data, col5Data); err != nil { - return err -} -if err = scope.Commit(); err != nil { - return err -} -var ( - col1 interface{} - col2 map[string]uint64 - col3 map[string]uint64 - col4 []map[string]string - col5 map[string]uint64 -) -if err := conn.QueryRow("SELECT * FROM example").Scan(&col1, &col2, &col3, &col4, &col5); err != nil { - return err -} -fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v", col1, col2, col3, col4, col5) -``` - -[フル例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/map.go) - -挿入の動作はClickHouse APIと同様です。 -### Compression {#compression-1} - -標準APIは、ネイティブな [ClickHouse API](#compression) と同様に、ブロックレベルでの `lz4` および `zstd` 圧縮アルゴリズムをサポートしています。さらに、HTTP接続に対してはgzip、deflate、およびbr圧縮もサポートされています。これらのどれかが有効になっている場合、圧縮は挿入時およびクエリ応答時のブロックに対して行われます。pingやクエリリクエストなどの他のリクエストは圧縮されません。これは `lz4` および `zstd` オプションと一貫しています。 - -接続を確立するために `OpenDB` メソッドを使用する場合、Compression設定を渡すことができます。これには圧縮レベルを指定する機能も含まれています(以下参照)。DSNを使って `sql.Open` で接続する場合は、`compress` パラメータを使用します。これは、`gzip`、`deflate`、`br`、`zstd`、または `lz4` という特定の圧縮アルゴリズム、またはブーリアンフラグである可能性があります。trueに設定された場合、`lz4` が使用されます。デフォルトは `none` すなわち圧縮無効です。 - -```go -conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.HttpPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - Compression: &clickhouse.Compression{ - Method: clickhouse.CompressionBrotli, - Level: 5, - }, - Protocol: clickhouse.HTTP, -}) -``` -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/compression.go#L27-L76) - -```go -conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&password=%s&compress=gzip&compress_level=5", env.Host, env.HttpPort, env.Username, env.Password)) -``` -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/compression.go#L78-L115) - -適用された圧縮レベルは、DSNパラメータ compress_level または Compressionオプションの Level フィールドで制御できます。これはデフォルトで0ですが、アルゴリズムによって異なります: - -* `gzip` - `-2` (最良の速度) から `9` (最良の圧縮) -* `deflate` - `-2` (最良の速度) から `9` (最良の圧縮) -* `br` - `0` (最良の速度) から `11` (最良の圧縮) -* `zstd`, `lz4` - 無視される - -### Parameter Binding {#parameter-binding-1} - -標準APIは、[ClickHouse API](#parameter-binding) と同様のパラメータバインディング機能をサポートしており、`Exec`、`Query`、および `QueryRow` メソッド(およびそれらの相当する [Context](#using-context) バリアント)にパラメータを渡すことができます。位置指定、名前付き、および番号付きパラメータがサポートされています。 - -```go -var count uint64 -// 位置指定バインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 >= ? AND Col3 < ?", 500, now.Add(time.Duration(750)*time.Second)).Scan(&count); err != nil { - return err -} -// 250 -fmt.Printf("位置指定バインドカウント: %d\n", count) -// 数値バインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 <= $2 AND Col3 > $1", now.Add(time.Duration(150)*time.Second), 250).Scan(&count); err != nil { - return err -} -// 100 -fmt.Printf("数値バインドカウント: %d\n", count) -// 名前付きバインド -if err = conn.QueryRow(ctx, "SELECT count() FROM example WHERE Col1 <= @col1 AND Col3 > @col3", clickhouse.Named("col1", 100), clickhouse.Named("col3", now.Add(time.Duration(50)*time.Second))).Scan(&count); err != nil { - return err -} -// 50 -fmt.Printf("名前付きバインドカウント: %d\n", count) -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/bind.go) - -注意 [特別なケース](#special-cases) は依然として適用されます。 - -### Using Context {#using-context-1} - -標準APIは、[ClickHouse API](#using-context) と同様に、期限、キャンセル信号、およびその他のリクエストスコープの値をコンテキストを通じて渡す機能をサポートしています。ClickHouse APIとは異なり、これは `Exec` のようなメソッドの `Context` バリアントを使用することで実現されます。デフォルトではバックグラウンドコンテキストを使用するメソッドは、コンテキストを最初のパラメータとして渡すことができる `ExecContext` バリアントを持っています。これにより、アプリケーションフローの任意の段階でコンテキストを渡すことができるようになります。たとえば、ユーザーは `ConnContext` を介して接続を確立する際や、`QueryRowContext` を介してクエリ行をリクエストする際にコンテキストを渡すことができます。使用可能なすべてのメソッドの例は以下に示されています。 - -コンテキストを使用して期限、キャンセル信号、クエリID、クォータキー、および接続設定を渡す詳細については、[ClickHouse API](#using-context) におけるコンテキストの使用を参照してください。 - -```go -ctx := clickhouse.Context(context.Background(), clickhouse.WithSettings(clickhouse.Settings{ - "allow_experimental_object_type": "1", -})) -conn.ExecContext(ctx, "DROP TABLE IF EXISTS example") -// JSONカラムを作成するには allow_experimental_object_type=1 が必要です -if _, err = conn.ExecContext(ctx, ` - CREATE TABLE example ( - Col1 JSON - ) - Engine Memory - `); err != nil { - return err -} - -// クエリはコンテキストを使用してキャンセルできます -ctx, cancel := context.WithCancel(context.Background()) -go func() { - cancel() -}() -if err = conn.QueryRowContext(ctx, "SELECT sleep(3)").Scan(); err == nil { - return fmt.Errorf("キャンセルされることが期待されます") -} - -// クエリの期限を設定する - これは絶対時間が到達した後にクエリをキャンセルします。接続のみを終了し、 -// ClickHouse内のクエリは完了まで続行します -ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(-time.Second)) -defer cancel() -if err := conn.PingContext(ctx); err == nil { - return fmt.Errorf("期限切れが発生することが期待されます") -} - -// ログのクエリ追跡を助けるためにクエリIDを設定します。例: system.query_logを参照 -var one uint8 -ctx = clickhouse.Context(context.Background(), clickhouse.WithQueryID(uuid.NewString())) -if err = conn.QueryRowContext(ctx, "SELECT 1").Scan(&one); err != nil { - return err -} - -conn.ExecContext(context.Background(), "DROP QUOTA IF EXISTS foobar") -defer func() { - conn.ExecContext(context.Background(), "DROP QUOTA IF EXISTS foobar") -}() -ctx = clickhouse.Context(context.Background(), clickhouse.WithQuotaKey("abcde")) -// クォータキーを設定します - まずクォータを作成します -if _, err = conn.ExecContext(ctx, "CREATE QUOTA IF NOT EXISTS foobar KEYED BY client_key FOR INTERVAL 1 minute MAX queries = 5 TO default"); err != nil { - return err -} - -// クエリはコンテキストを使用してキャンセルできます -ctx, cancel = context.WithCancel(context.Background()) -// キャンセルする前にいくつかの結果を取得します -ctx = clickhouse.Context(ctx, clickhouse.WithSettings(clickhouse.Settings{ - "max_block_size": "1", -})) -rows, err := conn.QueryContext(ctx, "SELECT sleepEachRow(1), number FROM numbers(100);") -if err != nil { - return err -} -var ( - col1 uint8 - col2 uint8 -) - -for rows.Next() { - if err := rows.Scan(&col1, &col2); err != nil { - if col2 > 3 { - fmt.Println("キャンセルされることが期待されます") - return nil - } - return err - } - fmt.Printf("行: col2=%d\n", col2) - if col2 == 3 { - cancel() - } -} -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/context.go) - -### Sessions {#sessions} - -ネイティブ接続は本質的にセッションを持っていますが、HTTP経由の接続では、ユーザーがコンテキストに設定として渡すためのセッションIDを作成する必要があります。これにより、セッションにバインドされる機能(例:一時テーブル)を使用できるようになります。 - -```go -conn := clickhouse.OpenDB(&clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", env.Host, env.HttpPort)}, - Auth: clickhouse.Auth{ - Database: env.Database, - Username: env.Username, - Password: env.Password, - }, - Protocol: clickhouse.HTTP, - Settings: clickhouse.Settings{ - "session_id": uuid.NewString(), - }, -}) -if _, err := conn.Exec(`DROP TABLE IF EXISTS example`); err != nil { - return err -} -_, err = conn.Exec(` - CREATE TEMPORARY TABLE IF NOT EXISTS example ( - Col1 UInt8 - ) -`) -if err != nil { - return err -} -scope, err := conn.Begin() -if err != nil { - return err -} -batch, err := scope.Prepare("INSERT INTO example") -if err != nil { - return err -} -for i := 0; i < 10; i++ { - _, err := batch.Exec( - uint8(i), - ) - if err != nil { - return err - } -} -rows, err := conn.Query("SELECT * FROM example") -if err != nil { - return err -} -var ( - col1 uint8 -) -for rows.Next() { - if err := rows.Scan(&col1); err != nil { - return err - } - fmt.Printf("行: col1=%d\n", col1) -} -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/session.go) - -### Dynamic Scanning {#dynamic-scanning-1} - -[ClickHouse API](#dynamic-scanning) と同様に、カラム型情報が利用可能であり、これによりユーザーは正しく型付けされた変数のランタイムインスタンスを作成し、Scanに渡すことができます。これは、型が不明なカラムを読み取ることを可能にします。 - -```go -const query = ` -SELECT - 1 AS Col1 - , 'Text' AS Col2 -` -rows, err := conn.QueryContext(context.Background(), query) -if err != nil { - return err -} -columnTypes, err := rows.ColumnTypes() -if err != nil { - return err -} -vars := make([]interface{}, len(columnTypes)) -for i := range columnTypes { - vars[i] = reflect.New(columnTypes[i].ScanType()).Interface() -} -for rows.Next() { - if err := rows.Scan(vars...); err != nil { - return err - } - for _, v := range vars { - switch v := v.(type) { - case *string: - fmt.Println(*v) - case *uint8: - fmt.Println(*v) - } - } -} -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/dynamic_scan_types.go) - -### External Tables {#external-tables-1} - -[外部テーブル](/engines/table-engines/special/external-data/)は、クライアントがClickHouseにデータを送信できるようにし、`SELECT`クエリを使用します。このデータは一時テーブルに配置され、クエリ自体で評価に使用できます。 - -クエリと一緒に外部データをクライアントに送信するには、ユーザーは `ext.NewTable` を使用して外部テーブルを構築し、それをコンテキストを介して渡す必要があります。 - -```go -table1, err := ext.NewTable("external_table_1", - ext.Column("col1", "UInt8"), - ext.Column("col2", "String"), - ext.Column("col3", "DateTime"), -) -if err != nil { - return err -} - -for i := 0; i < 10; i++ { - if err = table1.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()); err != nil { - return err - } -} - -table2, err := ext.NewTable("external_table_2", - ext.Column("col1", "UInt8"), - ext.Column("col2", "String"), - ext.Column("col3", "DateTime"), -) - -for i := 0; i < 10; i++ { - table2.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()) -} -ctx := clickhouse.Context(context.Background(), - clickhouse.WithExternalTable(table1, table2), -) -rows, err := conn.QueryContext(ctx, "SELECT * FROM external_table_1") -if err != nil { - return err -} -for rows.Next() { - var ( - col1 uint8 - col2 string - col3 time.Time - ) - rows.Scan(&col1, &col2, &col3) - fmt.Printf("col1=%d, col2=%s, col3=%v\n", col1, col2, col3) -} -rows.Close() - -var count uint64 -if err := conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM external_table_1").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_1: %d\n", count) -if err := conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM external_table_2").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_2: %d\n", count) -if err := conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM (SELECT * FROM external_table_1 UNION ALL SELECT * FROM external_table_2)").Scan(&count); err != nil { - return err -} -fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/external_data.go) - -### Open Telemetry {#open-telemetry-1} - -ClickHouseは、ネイティブプロトコルの一部として [トレースコンテキスト](/operations/opentelemetry/) を渡すことを許可します。クライアントは、`clickhouse.withSpan` 関数を介してSpanを作成し、これをコンテキストを通じて渡すことでこれを実現します。HTTPがトランスポートとして使用される場合はサポートされません。 - -```go -var count uint64 -rows := conn.QueryRowContext(clickhouse.Context(context.Background(), clickhouse.WithSpan( - trace.NewSpanContext(trace.SpanContextConfig{ - SpanID: trace.SpanID{1, 2, 3, 4, 5}, - TraceID: trace.TraceID{5, 4, 3, 2, 1}, - }), -)), "SELECT COUNT() FROM (SELECT number FROM system.numbers LIMIT 5)") -if err := rows.Scan(&count); err != nil { - return err -} -fmt.Printf("カウント: %d\n", count) -``` - -[フルサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/open_telemetry.go) - -## Performance Tips {#performance-tips} - -* 可能なところではClickHouse APIを利用してください。特にプリミティブ型の場合。これにより、重要なリフレクションや間接呼び出しを避けることができます。 -* 大規模なデータセットを読み取る場合は、[`BlockBufferSize`](#connection-settings) を修正することを検討してください。これにより、メモリフットプリントが増加しますが、行の反復中により多くのブロックを並行してデコードできるようになります。デフォルト値の2は保守的であり、メモリオーバーヘッドを最小限に抑えます。高い値はメモリ内のブロック数を増やすことになります。異なるクエリが異なるブロックサイズを生成する可能性があるため、これはテストが必要です。したがって、これを [クエリレベル](#using-context) でコンテキストを介して設定できます。 -* データを挿入する際は、型を明確に指定してください。クライアントは柔軟性を目指していますが、例えばUUIDやIPのために文字列を解析できるようにすることは、データ検証を必要とし、挿入時にコストがかかります。 -* 可能な限り列指向の挿入を使用してください。これらは強く型付けされているべきであり、クライアントがあなたの値を変換する必要がなくなります。 -* ClickHouseの [推奨事項](/sql-reference/statements/insert-into/#performance-considerations) に従って、最適な挿入パフォーマンスを確保してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md.hash deleted file mode 100644 index d8126d4f12b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -52803f0494d4a4e8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md deleted file mode 100644 index 0c59172fb44..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -slug: '/integrations/language-clients' -title: 'Language Clients' -description: 'Language Clients の目次ページ。' -keywords: -- 'Language Clients' -- 'C++' -- 'Go' -- 'JavaScript' -- 'Java' -- 'Python' -- 'Rust' ---- - - - -このドキュメントのセクションでは、ClickHouseが提供する多くの言語クライアント統合について詳しく学ぶことができます。 - -| ページ | 説明 | -|----------------------------------------------------------------------------|---------------------------------------------------------------------------------------| -| [C++](/interfaces/cpp) | C++ クライアントライブラリと userver 非同期フレームワーク | -| [Go](/integrations/go) | Go プロジェクトを ClickHouse に接続する方法を学ぶ。 | -| [JavaScript](/integrations/javascript) | 公式の JS クライアントを使用して、JS プロジェクトを ClickHouse に接続する方法を学ぶ。 | -| [Java](/integrations/java) | Java と ClickHouse のためのいくつかの統合について詳しく学ぶ。 | -| [Python](/integrations/python) | Python プロジェクトを ClickHouse に接続する方法を学ぶ。 | -| [Rust](/integrations/rust) | Rust プロジェクトを ClickHouse に接続する方法を学ぶ。 | -| [サードパーティクライアント](/interfaces/third-party/client-libraries) | サードパーティの開発者によるクライアントライブラリについて詳しく学ぶ。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md.hash deleted file mode 100644 index 6ba3828a10d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -fb367428afad83b3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client-v1.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client-v1.md.hash deleted file mode 100644 index 8df95114cb7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client-v1.md.hash +++ /dev/null @@ -1 +0,0 @@ -30518d7c15766bfb diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client.md.hash deleted file mode 100644 index a342699819d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client.md.hash +++ /dev/null @@ -1 +0,0 @@ -e6b1d6c81b046969 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx deleted file mode 100644 index e12d3248d2e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx +++ /dev/null @@ -1,339 +0,0 @@ ---- -{} ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Javaクライアントライブラリは、DBサーバーとのプロトコルを通じて通信するためのものです。現在の実装は、[HTTPインターフェース](/interfaces/http)のみをサポートしています。このライブラリは、サーバーにリクエストを送信するための独自のAPIを提供します。 - -:::warning 非推奨 -このライブラリは近日中に非推奨となります。新しいプロジェクトには最新の [Java Client](/integrations/language-clients/java/client/client.mdx) を使用してください。 -::: - -## セットアップ {#setup} - - - - -```xml - - - com.clickhouse - clickhouse-http-client - 0.7.2 - -``` - - - - -```kotlin -// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client -implementation("com.clickhouse:clickhouse-http-client:0.7.2") -``` - - - -```groovy -// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client -implementation 'com.clickhouse:clickhouse-http-client:0.7.2' -``` - - - - -バージョン `0.5.0` 以降、ドライバーは依存関係として追加する必要がある新しいクライアントHTTPライブラリを使用しています。 - - - - -```xml - - - org.apache.httpcomponents.client5 - httpclient5 - 5.3.1 - -``` - - - - -```kotlin -// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5 -implementation("org.apache.httpcomponents.client5:httpclient5:5.3.1") -``` - - - -```groovy -// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5 -implementation 'org.apache.httpcomponents.client5:httpclient5:5.3.1' -``` - - - - -## 初期化 {#initialization} - -接続URL形式: `protocol://host[:port][/database][?param[=value][¶m[=value]][#tag[,tag]]`。例えば: - -- `http://localhost:8443?ssl=true&sslmode=NONE` -- `https://explorer@play.clickhouse.com:443` - -単一ノードに接続: - -```java showLineNumbers -ClickHouseNode server = ClickHouseNode.of("http://localhost:8123/default?compress=0"); -``` -複数ノードを持つクラスタに接続: - -```java showLineNumbers -ClickHouseNodes servers = ClickHouseNodes.of( - "jdbc:ch:http://server1.domain,server2.domain,server3.domain/my_db" - + "?load_balancing_policy=random&health_check_interval=5000&failover=2"); -``` - -## クエリAPI {#query-api} - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("select * from numbers limit :limit") - .params(1000) - .executeAndWait()) { - ClickHouseResponseSummary summary = response.getSummary(); - long totalRows = summary.getTotalRowsToRead(); -} -``` - -## ストリーミングクエリAPI {#streaming-query-api} - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("select * from numbers limit :limit") - .params(1000) - .executeAndWait()) { - for (ClickHouseRecord r : response.records()) { - int num = r.getValue(0).asInteger(); - // 型変換 - String str = r.getValue(0).asString(); - LocalDate date = r.getValue(0).asDate(); - } -} -``` - -[完全なコード例](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L73)については、[リポジトリ](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client)を参照してください。 - -## 挿入API {#insert-api} - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers).write() - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("insert into my_table select c2, c3 from input('c1 UInt8, c2 String, c3 Int32')") - .data(myInputStream) // `myInputStream` はRowBinary形式のデータソースです - .executeAndWait()) { - ClickHouseResponseSummary summary = response.getSummary(); - summary.getWrittenRows(); -} -``` - -[完全なコード例](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L39)については、[リポジトリ](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client)を参照してください。 - -**RowBinaryエンコーディング** - -RowBinaryフォーマットについては、その[ページ](/interfaces/formats#rowbinarywithnamesandtypes)を参照してください。 - -[コードの例](https://github.com/ClickHouse/clickhouse-kafka-connect/blob/main/src/main/java/com/clickhouse/kafka/connect/sink/db/ClickHouseWriter.java#L622)があります。 - -## 機能 {#features} -### 圧縮 {#compression} - -クライアントはデフォルトでLZ4圧縮を使用します。これには以下の依存関係が必要です。 - - - - -```xml - - - org.lz4 - lz4-java - 1.8.0 - -``` - - - - -```kotlin -// https://mvnrepository.com/artifact/org.lz4/lz4-java -implementation("org.lz4:lz4-java:1.8.0") -``` - - - -```groovy -// https://mvnrepository.com/artifact/org.lz4/lz4-java -implementation 'org.lz4:lz4-java:1.8.0' -``` - - - - -gzipを使用する場合は、接続URLに `compress_algorithm=gzip` を設定してください。 - -また、圧縮を無効にする方法もいくつかあります。 - -1. 接続URLで `compress=0` を設定して無効にする: `http://localhost:8123/default?compress=0` -2. クライアント設定で無効にする: - -```java showLineNumbers -ClickHouseClient client = ClickHouseClient.builder() - .config(new ClickHouseConfig(Map.of(ClickHouseClientOption.COMPRESS, false))) - .nodeSelector(ClickHouseNodeSelector.of(ClickHouseProtocol.HTTP)) - .build(); -``` - -さまざまな圧縮オプションについては、[圧縮ドキュメント](/data-compression/compression-modes)を参照してください。 - -### 複数のクエリ {#multiple-queries} - -同じセッション内でワーカースレッドに複数のクエリを順に実行させます: - -```java showLineNumbers -CompletableFuture> future = ClickHouseClient.send(servers.apply(servers.getNodeSelector()), - "create database if not exists my_base", - "use my_base", - "create table if not exists test_table(s String) engine=Memory", - "insert into test_table values('1')('2')('3')", - "select * from test_table limit 1", - "truncate table test_table", - "drop table if exists test_table"); -List results = future.get(); -``` - -### 名前付きパラメーター {#named-parameters} - -パラメーターを位置に頼ることなく、名前で渡すことができます。この機能は `params` 関数を使用して利用可能です。 - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("select * from my_table where name=:name limit :limit") - .params("Ben", 1000) - .executeAndWait()) { - //... - } -} -``` - -:::note パラメーター -すべての `params` シグネチャにおいて `String` 型 (`String`, `String[]`, `Map`) は、有効なClickHouse SQL文字列であることを想定しています。例えば: - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("select * from my_table where name=:name") - .params(Map.of("name","'Ben'")) - .executeAndWait()) { - //... - } -} -``` - -ClickHouse SQLに手動でStringオブジェクトを解析したくない場合は、`com.clickhouse.data` に位置するヘルパー関数 `ClickHouseValues.convertToSqlExpression` を使用できます: - -```java showLineNumbers -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP); - ClickHouseResponse response = client.read(servers) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query("select * from my_table where name=:name") - .params(Map.of("name", ClickHouseValues.convertToSqlExpression("Ben's"))) - .executeAndWait()) { - //... - } -} -``` - -上記の例では、`ClickHouseValues.convertToSqlExpression` は内部の一重引用符をエスケープし、変数を有効な一重引用符で囲みます。 - -`Integer`、`UUID`、`Array`、`Enum` などの他の型は、`params` 内部で自動的に変換されます。 -::: - -## ノード発見 {#node-discovery} - -Javaクライアントは、ClickHouseノードを自動的に発見する機能を提供します。自動発見はデフォルトでは無効です。手動で有効にするには、`auto_discovery` を `true` に設定します: - -```java -properties.setProperty("auto_discovery", "true"); -``` - -接続URLでは次のように設定します: - -```plaintext -jdbc:ch://my-server/system?auto_discovery=true -``` - -自動発見が有効になっている場合、接続URLにすべてのClickHouseノードを指定する必要はありません。URLに指定されたノードはシードとして扱われ、Javaクライアントはシステムテーブルおよび/またはclickhouse-keeperまたはzookeeperから追加のノードを自動的に発見します。 - -以下のオプションは自動発見の設定に関連しています: - -| プロパティ | デフォルト | 説明 | -|-------------------------|---------|-------------------------------------------------------------------------------------------------------| -| auto_discovery | `false` | クライアントがシステムテーブルやclickhouse-keeper/zookeeperから追加のノードを発見すべきかどうか。 | -| node_discovery_interval | `0` | ノード発見のインターバル(ミリ秒単位)。ゼロまたは負の値は一回限りの発見を意味します。 | -| node_discovery_limit | `100` | 一度に発見できるノードの最大数。ゼロまたは負の値は制限なしを意味します。 | - -### ロードバランシング {#load-balancing} - -Javaクライアントは、ロードバランシングポリシーに従ってリクエストを送信するClickHouseノードを選択します。一般的に、ロードバランシングポリシーは次のことを担当します。 - -1. 管理されたノードリストからノードを取得します。 -2. ノードの状態を管理します。 -3. (自動発見が有効な場合)ノード発見のためのバックグラウンドプロセスを予定し、ヘルスチェックを実行します。 - -以下はロードバランシングを設定するためのオプションリストです: - -| プロパティ | デフォルト | 説明 | -|-----------------------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| load_balancing_policy | `""` | ロードバランシングポリシーは次のいずれかです:
  • `firstAlive` - リクエストは管理ノードリストから最初の健康なノードに送信されます
  • `random` - リクエストは管理ノードリストからランダムなノードに送信されます
  • `roundRobin` - リクエストは管理ノードリストの各ノードに順番に送信されます
  • `ClickHouseLoadBalancingPolicy` を実装するカスタムクラス名 - カスタムロードバランシングポリシー
  • 指定がない場合、リクエストは管理ノードリストの最初のノードに送信されます。 | -| load_balancing_tags | `""` | ノードをフィルタリングするためのロードバランシングタグ。リクエストは指定されたタグを持つノードにのみ送信されます。 | -| health_check_interval | `0` | ヘルスチェックのインターバル(ミリ秒単位)。ゼロまたは負の値は一回限りのチェックを意味します。 | -| health_check_method | `ClickHouseHealthCheckMethod.SELECT_ONE` | ヘルスチェック方法。次のいずれか:
  • `ClickHouseHealthCheckMethod.SELECT_ONE` - `select 1`クエリでチェック
  • `ClickHouseHealthCheckMethod.PING` - プロトコル特有のチェック、通常はより速いです
  • | -| node_check_interval | `0` | ノードチェックのインターバル(ミリ秒単位)。負の数はゼロとして扱われます。指定された時間が経過していればノードの状態がチェックされます。
    `health_check_interval` と `node_check_interval` の違いは、`health_check_interval`オプションがバックグラウンドジョブのスケジュールを行うことですが、`node_check_interval`は特定のノードに対する最後のチェックから経過した時間を指定します。 | -| check_all_nodes | `false` | すべてのノードに対してヘルスチェックを行うか、故障したノードのみに行うか。 | - -### フェイルオーバーとリトライ {#failover-and-retry} - -Javaクライアントは、失敗したクエリに対してフェイルオーバーとリトライの動作を設定するためのオプションを提供します: - -| プロパティ | デフォルト | 説明 | -|-------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| failover | `0` | リクエストのフェイルオーバーができる最大回数。ゼロまたは負の値はフェイルオーバーがないことを意味します。フェイルオーバーは、失敗したリクエストを別のノードに送信すること(ロードバランシングポリシーに従って)で、フェイルオーバーから回復します。 | -| retry | `0` | リクエストでリトライができる最大回数。ゼロまたは負の値はリトライがないことを意味します。リトライは、同じノードにリクエストを送信し、ClickHouseサーバーが `NETWORK_ERROR` エラーコードを返した場合にのみ行います。 | -| repeat_on_session_lock | `true` | セッションがタイムアウトするまで(`session_timeout` または `connect_timeout` に従って)実行を繰り返すかどうか。ClickHouseサーバーが `SESSION_IS_LOCKED` エラーコードを返した場合、失敗したリクエストが繰り返されます。 | - -### カスタムHTTPヘッダーの追加 {#adding-custom-http-headers} - -Javaクライアントは、リクエストにカスタムHTTPヘッダーを追加したい場合にHTTP/Sトランスポート層をサポートします。 -`custom_http_headers` プロパティを使用し、ヘッダーは `,` で区切る必要があります。ヘッダーのキー/値は `=` で区切られます。 - -## Javaクライアントサポート {#java-client-support} - -```java -options.put("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test"); -``` - -## JDBCドライバ {#jdbc-driver} - -```java -properties.setProperty("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test"); -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx.hash deleted file mode 100644 index 8a2bca55945..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_7.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -c5114df4d929dabd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx deleted file mode 100644 index dcc9bcb5ef2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx +++ /dev/null @@ -1,589 +0,0 @@ ---- -{} ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Javaクライアントライブラリは、プロトコルを介してDBサーバーと通信するためのものです。現在の実装は、[HTTPインターフェース](/interfaces/http)のサポートのみを提供しています。このライブラリは、サーバーにリクエストを送信するための独自のAPIを提供し、さまざまなバイナリデータフォーマット(RowBinary* & Native*)で作業するためのツールも提供しています。 - -## セットアップ {#setup} - -- Maven Central(プロジェクトウェブページ): https://mvnrepository.com/artifact/com.clickhouse/client-v2 -- ナイトリービルド(リポジトリリンク): https://s01.oss.sonatype.org/content/repositories/snapshots/com/clickhouse/ -
    - - - -```xml - - com.clickhouse - client-v2 - 0.8.2 - -``` - - - - -```kotlin -// https://mvnrepository.com/artifact/com.clickhouse/client-v2 -implementation("com.clickhouse:client-v2:0.8.2") -``` - - - -```groovy -// https://mvnrepository.com/artifact/com.clickhouse/client-v2 -implementation 'com.clickhouse:client-v2:0.8.2' -``` - - - -## 初期化 {#initialization} - -Clientオブジェクトは `com.clickhouse.client.api.Client.Builder#build()` によって初期化されます。各クライアントには独自のコンテキストがあり、オブジェクトは共有されません。Builderには、便利な設定用のメソッドがあります。 - -例: -```java showLineNumbers - Client client = new Client.Builder() - .addEndpoint("https://clickhouse-cloud-instance:8443/") - .setUsername(user) - .setPassword(password) - .build(); -``` - -`Client` は `AutoCloseable` であり、不要になったときは閉じる必要があります。 -### 認証 {#authentication} - -認証は初期化段階でクライアントごとに設定されます。サポートされている認証方法は3つあり:パスワードによる認証、アクセストークンによる認証、SSLクライアント証明書による認証です。 - -パスワードによる認証は、`setUsername(String)` と `setPassword(String)` を呼び出してユーザー名とパスワードを設定します: -```java showLineNumbers - Client client = new Client.Builder() - .addEndpoint("https://clickhouse-cloud-instance:8443/") - .setUsername(user) - .setPassword(password) - .build(); -``` - -アクセストークンによる認証は、`setAccessToken(String)` を呼び出してアクセストークンを設定します: -```java showLineNumbers - Client client = new Client.Builder() - .addEndpoint("https://clickhouse-cloud-instance:8443/") - .setAccessToken(userAccessToken) - .build(); -``` - -SSLクライアント証明書による認証は、ユーザー名を設定し、SSL認証を有効にし、クライアント証明書およびクライアントキーをそれぞれ `setUsername(String)`、`useSSLAuthentication(boolean)`、`setClientCertificate(String)`、`setClientKey(String)` を呼び出して設定する必要があります: -```java showLineNumbers -Client client = new Client.Builder() - .useSSLAuthentication(true) - .setUsername("some_user") - .setClientCertificate("some_user.crt") - .setClientKey("some_user.key") -``` - -:::note -SSL認証は、SSLライブラリからの多くのエラーが十分な情報を提供しないため、本番環境でのトラブルシューティングが難しい場合があります。たとえば、クライアント証明書とキーが一致しない場合、サーバーは接続を即座に終了します(HTTPの場合、これは接続初期化段階であり、HTTPリクエストは送信されないため、レスポンスは送信されません)。 - -[openssl](https://docs.openssl.org/master/man1/openssl/)などのツールを使用して証明書とキーを検証してください: -- キーの整合性を確認する: `openssl rsa -in [key-file.key] -check -noout` -- ユーザーの証明書からCNを取得する: - `openssl x509 -noout -subject -in [user.cert]` -- DB内で同じ値が設定されていることを確認する: `select name, auth_type, auth_params from system.users where auth_type = 'ssl_certificate'`(クエリは `{"common_names":["some_user"]}` のような `auth_params` を出力します)。 - -::: -## 設定 {#configuration} - -すべての設定はインスタンスメソッド(いわゆる設定メソッド)によって定義され、各値のスコープとコンテキストが明確になります。主要な設定パラメータは1つのスコープ(クライアントまたは操作)で定義され、互いに上書きされることはありません。 - -設定はクライアント作成時に定義されます。 `com.clickhouse.client.api.Client.Builder` を参照してください。 -## クライアント設定 {#client-configuration} - -| 設定メソッド | 引数 | 説明 | -|---------------------------------------|:-------------------------------------------------|:--------------------------------------------| -| `addEndpoint(String endpoint)` | - `enpoint` - サーバーアドレスのURL形式。 | 利用可能なサーバーのエンドポイントにサーバーエンドポイントを追加します。現在は1つのエンドポイントのみがサポートされています。 | -| `addEndpoint(Protocol protocol, String host, int port, boolean secure)` | - `protocol` - 接続プロトコル `com.clickhouse.client.api.enums.Protocol#HTTP`.
    - `host` - サーバーのIPまたはホスト名。
    - `secure` - 通信にプロトコルの安全なバージョン(HTTPS)を使用する必要があるかどうか。 | 利用可能なサーバーのエンドポイントにサーバーエンドポイントを追加します。現在は1つのエンドポイントのみがサポートされています。 | -| `setOption(String key, String value)` | - `key` - クライアント設定オプションの文字列キー。
    - `value` - オプションの文字列値。 | クライアントオプションの生の値を設定します。プロパティファイルからの設定を読み込むときに便利です。 | -| `setUsername(String username)` | - `username` - 認証時に使用するユーザー名。 | 選択された認証メソッドのユーザー名を設定します。 | -| `setPassword(String password)` | - `password` - パスワード認証用の秘密値。 | パスワード認証用の秘密を設定し、実質的に認証メソッドを選択します。 | -| `setAccessToken(String accessToken)` | - `accessToken` - アクセストークンの文字列表現。 | 対応する認証メソッドで認証するためのアクセストークンを設定します。 | -| `useSSLAuthentication(boolean useSSLAuthentication)` | - `useSSLAuthentication` - SSL認証を使用するかどうかを示すフラグ。 | SSLクライアント証明書を認証メソッドとして設定します。 | -| `enableConnectionPool(boolean enable)` | - `enable` - オプションを有効にするかどうかを示すフラグ。 | コネクションプールが有効かどうかを設定します。 | -| `setConnectTimeout(long timeout, ChronoUnit unit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `unit` - `timeout`の時間単位。 | すべてのアウトゴーイング接続の接続初期化タイムアウトを設定します。これはソケット接続の取得待機時間に影響します。 | -| `setConnectionRequestTimeout(long timeout, ChronoUnit unit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `unit` - `timeout`の時間単位。 | 接続リクエストタイムアウトを設定します。これはプールから接続を取得する場合のみに影響します。 | -| `setMaxConnections(int maxConnections)` | - `maxConnections` - 接続の数。 | 各サーバーエンドポイントに対してクライアントがオープンできる接続の数を設定します。 | -| `setConnectionTTL(long timeout, ChronoUnit unit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `unit` - `timeout`の時間単位。 | 接続が非アクティブと見なされる接続TTLを設定します。 | -| `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `unit` - `timeout`の時間単位。 | HTTP接続のキープアライブタイムアウトを設定します。このオプションは、タイムアウトをゼロに設定することでキープアライブを無効にすることもできます - `0` | -| `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | - `strategy` - 列挙型 `com.clickhouse.client.api.ConnectionReuseStrategy` 定数。 | コネクションプールが使用すべき戦略を選択します:`LIFO`(接続がプールに戻されたときにすぐに再使用されるべき場合)または`FIFO`(接続が利用可能になる順に使用される場合)。戻った接続は即座に使用されません。 | -| `setSocketTimeout(long timeout, ChronoUnit unit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `unit` - `timeout`の時間単位。 | 読み取りおよび書き込み操作に影響を与えるソケットタイムアウトを設定します。 | -| `setSocketRcvbuf(long size)` | - `size` - バイト数。 | TCPソケットの受信バッファサイズを設定します。このバッファはJVMメモリの外部です。 | -| `setSocketSndbuf(long size)` | - `size` - バイト数。 | TCPソケットの送信バッファサイズを設定します。このバッファはJVMメモリの外部です。 | -| `setSocketKeepAlive(boolean value)` | - `value` - オプションが有効にすべきかどうかを示すフラグ。 | クライアントによって生成されたすべてのTCPソケットに対して`SO_KEEPALIVE`オプションを設定します。TCPキープアライブは、接続の生存をチェックするメカニズムを有効にし、突然終了した接続を検出するのに役立ちます。 | -| `setSocketTcpNodelay(boolean value)` | - `value` - オプションが有効にすべきかどうかを示すフラグ。 | クライアントによって生成されたすべてのTCPソケットに対して`SO_NODELAY`オプションを設定します。このTCPオプションは、ソケットができるだけ早くデータをプッシュすることを可能にします。 | -| `setSocketLinger(int secondsToWait)` | - `secondsToWait` - 待機する秒数。 | クライアントによって生成されたすべてのTCPソケットのラングタイムを設定します。 | -| `compressServerResponse(boolean enabled)` | - `enabled` - オプションが有効にすべきかどうかを示すフラグ。 | サーバーがレスポンスを圧縮すべきかどうかを設定します。 | -| `compressClientRequest(boolean enabled)` | - `enabled` - オプションが有効にすべきかどうかを示すフラグ。 | クライアントがリクエストを圧縮すべきかどうかを設定します。 | -| `useHttpCompression(boolean enabled)` | - `enabled` - オプションが有効にすべきかどうかを示すフラグ。 | クライアント/サーバー間の通信にHTTP圧縮を使用するかどうかを設定します。対応するオプションが有効になっている場合。 | -| `setLZ4UncompressedBufferSize(int size)` | - `size` - バイト数。 | データストリームの未圧縮部分を受け取るためのバッファのサイズを設定します。バッファが過小評価されている場合、新しいものが作成され、対応する警告がログに表示されます。 | -| `setDefaultDatabase(String database)` | - `database` - データベースの名前。 | デフォルトのデータベースを設定します。 | -| `addProxy(ProxyType type, String host, int port)` | - `type` - プロキシの種類。
    - `host` - プロキシのホスト名またはIPアドレス。
    - `port` - プロキシポート。 | サーバーとの通信に使用されるプロキシを設定します。プロキシが認証を要求する場合は、プロキシを設定する必要があります。 | -| `setProxyCredentials(String user, String pass)` | - `user` - プロキシのユーザー名。
    - `pass` - パスワード。 | プロキシへの認証に使用されるユーザーの認証情報を設定します。 | -| `setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | - `timeout` - 一定の時間単位でのタイムアウト。
    - `timeUnit` - `timeout`の時間単位。 | クエリの最大実行タイムアウトを設定します。 | -| `setHttpCookiesEnabled(boolean enabled)` | - `enabled` - オプションが有効にすべきかどうかを示すフラグ。 | HTTPクッキーが記憶され、サーバーに戻されるべきかどうかを設定します。 | -| `setSSLTrustStore(String path)` | - `path` - ローカル(クライアント側)システム上のファイルパス。 | クライアントがサーバーホストの検証にSSLトラストストアを使用すべきかどうかを設定します。 | -| `setSSLTrustStorePassword(String password)` | - `password` - 秘密値。 | `setSSLTrustStore(String path)`で指定されたSSLトラストストアを解除するために使用されるパスワードを設定します。 | -| `setSSLTrustStoreType(String type)` | - `type` - トラストストアタイプ名。 | `setSSLTrustStore(String path)`で指定されたトラストストアのタイプを設定します。 | -| `setRootCertificate(String path)` | - `path` - ローカル(クライアント側)システム上のファイルパス。 | クライアントが指定されたルート(CA)証明書を使用してサーバーホストを検証すべきか設定します。 | -| `setClientCertificate(String path)` | - `path` - ローカル(クライアント側)システム上のファイルパス。 | SSL接続を開始する際に使用されるクライアント証明書のパスを設定します。SSL認証のために使用されます。 | -| `setClientKey(String path)` | - `path` - ローカル(クライアント側)システム上のファイルパス。 | サーバーとのSSL通信を暗号化するために使用されるクライアントプライベートキーを設定します。 | -| `useServerTimeZone(boolean useServerTimeZone)` | - `useServerTimeZone` - オプションが有効にすべきかどうかを示すフラグ。 | デコード時にクライアントがサーバーのタイムゾーンを使用するべきかどうかを設定します。DateTimeおよびDateカラムの値。これが有効になっている場合、サーバータイムゾーンは`setServerTimeZone(String timeZone)`で設定する必要があります。 | -| `useTimeZone(String timeZone)` | - `timeZone` - javaの有効なタイムゾーンIDの文字列値(`java.time.ZoneId`を参照)。 | デコード時に指定されたタイムゾーンを使用すべきかどうかを設定します。DateTimeおよびDateカラムの値。サーバータイムゾーンを上書きします。 | -| `setServerTimeZone(String timeZone)` | - `timeZone` - javaの有効なタイムゾーンIDの文字列値(`java.time.ZoneId`を参照)。 | サーバー側のタイムゾーンを設定します。デフォルトではUTCタイムゾーンが使用されます。 | -| `useAsyncRequests(boolean async)` | - `async` - オプションが有効にすべきかどうかを示すフラグ。 | クライアントがリクエストを別スレッドで実行するべきかどうかを設定します。これはデフォルトでは無効です。アプリケーションはマルチスレッドタスクを整理する方法をよりよく知っており、タスクを別スレッドで実行してもパフォーマンスは向上しません。 | -| `setSharedOperationExecutor(ExecutorService executorService)` | - `executorService` - エグゼキューターサービスのインスタンス。 | 操作タスクのためのエグゼキューターサービスを設定します。 | -| `setClientNetworkBufferSize(int size)` | - `size` - バイト数。 | ソケットとアプリケーション間でデータを往復するために使用されるアプリケーションメモリ空間のバッファのサイズを設定します。大きな値はTCPスタックへのシステムコールを減らしますが、各接続にどれだけのメモリが消費されるかに影響します。このバッファもGCの影響を受けるため、接続が短命です。また、大きな連続メモリブロックの割り当ては問題となる可能性があります。デフォルトは`300,000`バイトです。 | -| `retryOnFailures(ClientFaultCause ...causes)` | - `causes` - `com.clickhouse.client.api.ClientFaultCause`の列挙定数。 | 復旧可能または再試行可能な障害タイプを設定します。 | -| `setMaxRetries(int maxRetries)` | - `maxRetries` - 再試行の数。 | `retryOnFailures(ClientFaultCause ...causes)`で定義された失敗に対する最大再試行回数を設定します。 | -| `allowBinaryReaderToReuseBuffers(boolean reuse)` | - `reuse` - オプションが有効にすべきかどうかを示すフラグ。 | ほとんどのデータセットには、小さなバイト列としてエンコードされた数値データが含まれています。デフォルトでは、リーダーは必要なバッファを割り当て、データをそこに読み込み、次にターゲットのNumberクラスに変換します。これにより、多くの小さなオブジェクトが割り当てられ解除されるため、かなりのGC圧力がかかる可能性があります。このオプションが有効になっている場合、リーダーは再利用可能なバッファを使用して数値を変換します。これは安全です。各リーダーは独自のバッファセットを持ち、リーダーは1つのスレッドによって使用されます。 | -| `httpHeader(String key, String value)` | - `key` - HTTPヘッダのキー。
    - `value` - ヘッダの文字列値。 | 1つのHTTPヘッダの値を設定します。以前の値は上書きされます。| -| `httpHeader(String key, Collection values)` | - `key` - HTTPヘッダのキー。
    - `values` - 文字列値のリスト。 | 1つのHTTPヘッダの値を設定します。以前の値は上書きされます。| -| `httpHeaders(Map headers)` | - `header` - HTTPヘッダとその値のマップ。 | 複数のHTTPヘッダ値を一度に設定します。 | -| `serverSetting(String name, String value)` | - `name` - クエリレベル設定の名前。
    - `value` - 設定の文字列値。 | 各クエリと共にサーバーに渡す設定を設定します。個別の操作設定がこれを上書きすることがあります。[設定のリスト](/operations/settings/query-level) | -| `serverSetting(String name, Collection values)` | - `name` - クエリレベル設定の名前。
    - `values` - 設定の文字列値。 | 各クエリと共にサーバーに渡す設定を設定します。個別の操作設定がこれを上書きすることがあります。[設定のリスト](/operations/settings/query-level)。このメソッドは、複数の値を持つ設定を設定するのに便利です。たとえば、[roles](/interfaces/http#setting-role-with-query-parameters) | -| `columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)` | - `strategy` - カラムとフィールドの一致戦略の実装。 | DTOクラスフィールドとDBカラムを登録する際に使用されるカスタム戦略を設定します。 | -| `useHTTPBasicAuth(boolean useBasicAuth)` | - `useBasicAuth` - オプションが有効にすべきかどうかを示すフラグ。 | ユーザー名とパスワードによる認証に基本HTTP認証を使用すべきかどうかを設定します。デフォルトは有効です。この認証方式を使用すると、HTTPヘッダを転送できない特殊文字を含むパスワードの問題が解決されます。 | -| `setClientName(String clientName)` | - `clientName` - アプリケーション名を表す文字列。 | 呼び出すアプリケーションに関する追加情報を設定します。この文字列は、クライアント名としてサーバーに渡されます。HTTPプロトコルの場合、`User-Agent`ヘッダーとして渡されます。 | -| `useBearerTokenAuth(String bearerToken)` | - `bearerToken` - エンコードされたベアラートークン。 | ベアラー認証を使用し、どのトークンを使用するかを指定します。トークンはそのまま送信されるため、このメソッドに渡す前にエンコードする必要があります。 | -## 一般定義 {#common-definitions} -### ClickHouseFormat {#clickhouseformat} - -[サポートされているフォーマット](/interfaces/formats)の列挙型です。ClickHouseがサポートするすべてのフォーマットを含んでいます。 - -* `raw` - ユーザーは生データをトランスコーディングする必要があります。 -* `full` - クライアントは自身でデータをトランスコードでき、生データストリームを受け入れます。 -* `-` - ClickHouseがこのフォーマットに対してサポートしていない操作。 - -このクライアントバージョンでは次のフォーマットがサポートされています: - -| フォーマット | 入力 | 出力 | -|-------------------------------------------------------------------------------------------------------------------------------|:------:|:-------:| -| [TabSeparated](/interfaces/formats#tabseparated) | raw | raw | -| [TabSeparatedRaw](/interfaces/formats#tabseparatedraw) | raw | raw | -| [TabSeparatedWithNames](/interfaces/formats#tabseparatedwithnames) | raw | raw | -| [TabSeparatedWithNamesAndTypes](/interfaces/formats#tabseparatedwithnamesandtypes) | raw | raw | -| [TabSeparatedRawWithNames](/interfaces/formats#tabseparatedrawwithnames) | raw | raw | -| [TabSeparatedRawWithNamesAndTypes](/interfaces/formats#tabseparatedrawwithnamesandtypes) | raw | raw | -| [Template](/interfaces/formats#format-template) | raw | raw | -| [TemplateIgnoreSpaces](/interfaces/formats#templateignorespaces) | raw | - | -| [CSV](/interfaces/formats#csv) | raw | raw | -| [CSVWithNames](/interfaces/formats#csvwithnames) | raw | raw | -| [CSVWithNamesAndTypes](/interfaces/formats#csvwithnamesandtypes) | raw | raw | -| [CustomSeparated](/interfaces/formats#format-customseparated) | raw | raw | -| [CustomSeparatedWithNames](/interfaces/formats#customseparatedwithnames) | raw | raw | -| [CustomSeparatedWithNamesAndTypes](/interfaces/formats#customseparatedwithnamesandtypes) | raw | raw | -| [SQLInsert](/interfaces/formats#sqlinsert) | - | raw | -| [Values](/interfaces/formats#data-format-values) | raw | raw | -| [Vertical](/interfaces/formats#vertical) | - | raw | -| [JSON](/interfaces/formats#json) | raw | raw | -| [JSONAsString](/interfaces/formats#jsonasstring) | raw | - | -| [JSONAsObject](/interfaces/formats#jsonasobject) | raw | - | -| [JSONStrings](/interfaces/formats#jsonstrings) | raw | raw | -| [JSONColumns](/interfaces/formats#jsoncolumns) | raw | raw | -| [JSONColumnsWithMetadata](/interfaces/formats#jsoncolumnsmonoblock) | raw | raw | -| [JSONCompact](/interfaces/formats#jsoncompact) | raw | raw | -| [JSONCompactStrings](/interfaces/formats#jsoncompactstrings) | - | raw | -| [JSONCompactColumns](/interfaces/formats#jsoncompactcolumns) | raw | raw | -| [JSONEachRow](/interfaces/formats#jsoneachrow) | raw | raw | -| [PrettyJSONEachRow](/interfaces/formats#prettyjsoneachrow) | - | raw | -| [JSONEachRowWithProgress](/interfaces/formats#jsoneachrowwithprogress) | - | raw | -| [JSONStringsEachRow](/interfaces/formats#jsonstringseachrow) | raw | raw | -| [JSONStringsEachRowWithProgress](/interfaces/formats#jsonstringseachrowwithprogress) | - | raw | -| [JSONCompactEachRow](/interfaces/formats#jsoncompacteachrow) | raw | raw | -| [JSONCompactEachRowWithNames](/interfaces/formats#jsoncompacteachrowwithnames) | raw | raw | -| [JSONCompactEachRowWithNamesAndTypes](/interfaces/formats#jsoncompacteachrowwithnamesandtypes) | raw | raw | -| [JSONCompactStringsEachRow](/interfaces/formats#jsoncompactstringseachrow) | raw | raw | -| [JSONCompactStringsEachRowWithNames](/interfaces/formats#jsoncompactstringseachrowwithnames) | raw | raw | -| [JSONCompactStringsEachRowWithNamesAndTypes](/interfaces/formats#jsoncompactstringseachrowwithnamesandtypes) | raw | raw | -| [JSONObjectEachRow](/interfaces/formats#jsonobjecteachrow) | raw | raw | -| [BSONEachRow](/interfaces/formats#bsoneachrow) | raw | raw | -| [TSKV](/interfaces/formats#tskv) | raw | raw | -| [Pretty](/interfaces/formats#pretty) | - | raw | -| [PrettyNoEscapes](/interfaces/formats#prettynoescapes) | - | raw | -| [PrettyMonoBlock](/interfaces/formats#prettymonoblock) | - | raw | -| [PrettyNoEscapesMonoBlock](/interfaces/formats#prettynoescapesmonoblock) | - | raw | -| [PrettyCompact](/interfaces/formats#prettycompact) | - | raw | -| [PrettyCompactNoEscapes](/interfaces/formats#prettycompactnoescapes) | - | raw | -| [PrettyCompactMonoBlock](/interfaces/formats#prettycompactmonoblock) | - | raw | -| [PrettyCompactNoEscapesMonoBlock](/interfaces/formats#prettycompactnoescapesmonoblock) | - | raw | -| [PrettySpace](/interfaces/formats#prettyspace) | - | raw | -| [PrettySpaceNoEscapes](/interfaces/formats#prettyspacenoescapes) | - | raw | -| [PrettySpaceMonoBlock](/interfaces/formats#prettyspacemonoblock) | - | raw | -| [PrettySpaceNoEscapesMonoBlock](/interfaces/formats#prettyspacenoescapesmonoblock) | - | raw | -| [Prometheus](/interfaces/formats#prometheus) | - | raw | -| [Protobuf](/interfaces/formats#protobuf) | raw | raw | -| [ProtobufSingle](/interfaces/formats#protobufsingle) | raw | raw | -| [ProtobufList](/interfaces/formats#protobuflist) | raw | raw | -| [Avro](/interfaces/formats#data-format-avro) | raw | raw | -| [AvroConfluent](/interfaces/formats#data-format-avro-confluent) | raw | - | -| [Parquet](/interfaces/formats#data-format-parquet) | raw | raw | -| [ParquetMetadata](/interfaces/formats#data-format-parquet-metadata) | raw | - | -| [Arrow](/interfaces/formats#data-format-arrow) | raw | raw | -| [ArrowStream](/interfaces/formats#data-format-arrow-stream) | raw | raw | -| [ORC](/interfaces/formats#data-format-orc) | raw | raw | -| [One](/interfaces/formats#data-format-one) | raw | - | -| [Npy](/interfaces/formats#data-format-npy) | raw | raw | -| [RowBinary](/interfaces/formats#rowbinary) | full | full | -| [RowBinaryWithNames](/interfaces/formats#rowbinarywithnamesandtypes) | full | full | -| [RowBinaryWithNamesAndTypes](/interfaces/formats#rowbinarywithnamesandtypes) | full | full | -| [RowBinaryWithDefaults](/interfaces/formats#rowbinarywithdefaults) | full | - | -| [Native](/interfaces/formats#native) | full | raw | -| [Null](/interfaces/formats#null) | - | raw | -| [XML](/interfaces/formats#xml) | - | raw | -| [CapnProto](/interfaces/formats#capnproto) | raw | raw | -| [LineAsString](/interfaces/formats#lineasstring) | raw | raw | -| [Regexp](/interfaces/formats#data-format-regexp) | raw | - | -| [RawBLOB](/interfaces/formats#rawblob) | raw | raw | -| [MsgPack](/interfaces/formats#msgpack) | raw | raw | -| [MySQLDump](/interfaces/formats#mysqldump) | raw | - | -| [DWARF](/interfaces/formats#dwarf) | raw | - | -| [Markdown](/interfaces/formats#markdown) | - | raw | -| [Form](/interfaces/formats#form) | raw | - | -## インサートAPI {#insert-api} -### insert(String tableName, InputStream data, ClickHouseFormat format) {#insertstring-tablename-inputstream-data-clickhouseformat-format} - -指定されたフォーマットの `InputStream` バイトとしてデータを受け付けます。`data` は `format` でエンコードされていると期待されます。 - -**シグネチャ** - -```java -CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format, InsertSettings settings) -CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format) -``` - -**パラメータ** - -`tableName` - 対象のテーブル名。 - -`data` - エンコードされたデータの入力ストリーム。 - -`format` - データがエンコードされているフォーマット。 - -`settings` - リクエスト設定。 - -**返り値** - -`InsertResponse` 型の Future - 操作の結果とサーバー側のメトリクスのような追加情報。 - -**例** - -```java showLineNumbers -try (InputStream dataStream = getDataStream()) { - try (InsertResponse response = client.insert(TABLE_NAME, dataStream, ClickHouseFormat.JSONEachRow, - insertSettings).get(3, TimeUnit.SECONDS)) { - - log.info("Insert finished: {} rows written", response.getMetrics().getMetric(ServerMetrics.NUM_ROWS_WRITTEN).getLong()); - } catch (Exception e) { - log.error("Failed to write JSONEachRow data", e); - throw new RuntimeException(e); - } -} - -``` -### insert(String tableName, List<?> data, InsertSettings settings) {#insertstring-tablename-listlt-data-insertsettings-settings} - -データベースに書き込みリクエストを送信します。オブジェクトのリストは効率的なフォーマットに変換されてからサーバーに送信されます。リストアイテムのクラスは、`register(Class, TableSchema)` メソッドを使用して事前に登録する必要があります。 - -**シグネチャ** -```java -client.insert(String tableName, List data, InsertSettings settings) -client.insert(String tableName, List data) -``` - -**パラメータ** - -`tableName` - 対象テーブルの名前。 - -`data` - コレクションDTO(データ転送オブジェクト)オブジェクト。 - -`settings` - リクエスト設定。 - -**返り値** - -`InsertResponse` 型の Future - 操作の結果とサーバー側のメトリクスのような追加情報。 - -**例** - -```java showLineNumbers -// 重要な手順(1度だけ)- テーブルスキーマに従ってオブジェクトシリアライザーを事前コンパイルするためにクラスを登録します。 -client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME)); - -List events = loadBatch(); - -try (InsertResponse response = client.insert(TABLE_NAME, events).get()) { - // レスポンスを処理し、その後はクローズされ、リクエストを扱った接続が解放されます。 -} -``` -### InsertSettings {#insertsettings} - -挿入操作のための設定オプション。 - -**設定メソッド** - -| メソッド | 説明 | -|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------| -| `setQueryId(String queryId)` | 操作に割り当てられるクエリIDを設定します。デフォルト: `null`。 | -| `setDeduplicationToken(String token)` | 重複排除トークンを設定します。このトークンはサーバーに送信され、クエリを識別するのに使用できます。デフォルト: `null`。 | -| `setInputStreamCopyBufferSize(int size)` | コピーバッファサイズ。バッファは、ユーザー提供の入力ストリームから出力ストリームにデータをコピーする際に使用されます。デフォルト: `8196`。 | -| `serverSetting(String name, String value)` | 操作のための個別のサーバー設定を設定します。 | -| `serverSetting(String name, Collection values)` | 複数の値を持つ個別のサーバー設定を設定します。コレクションのアイテムは `String` 値でなければなりません。 | -| `setDBRoles(Collection dbRoles)` | 操作を実行する前に設定されるDBロールを設定します。コレクションのアイテムは `String` 値でなければなりません。 | -| `setOption(String option, Object value)` | 生の形式で設定オプションを設定します。これはサーバー設定ではありません。 | -### InsertResponse {#insertresponse} - -挿入操作の結果を保持するレスポンスオブジェクト。クライアントがサーバーからレスポンスを受け取ったときのみ利用可能です。 - -:::note -このオブジェクトは可能な限り早くクローズして、接続を解放する必要があります。以前のレスポンスのすべてのデータが完全に読み取られるまで、接続は再利用できません。 -::: - -| メソッド | 説明 | -|-----------------------------------|--------------------------------------------------------------------------------------------------| -| `OperationMetrics getMetrics()` | 操作メトリクスを持つオブジェクトを返します。 | -| `String getQueryId()` | アプリケーションによって操作に割り当てられたクエリIDを返します(操作設定またはサーバーによって)。 | -## Query API {#query-api} -### query(String sqlQuery) {#querystring-sqlquery} - -`sqlQuery` をそのまま送信します。レスポンスフォーマットはクエリ設定によって設定されます。`QueryResponse` は、サポートされているフォーマットのために、読む必要のあるレスポンスストリームへの参照を保持します。 - -**シグネチャ** - -```java -CompletableFuture query(String sqlQuery, QuerySettings settings) -CompletableFuture query(String sqlQuery) -``` - -**パラメータ** - -`sqlQuery` - 単一のSQLステートメント。クエリはそのままサーバーに送信されます。 - -`settings` - リクエスト設定。 - -**返り値** - -`QueryResponse` 型の Future - 結果データセットおよびサーバー側メトリクスのような追加情報。レスポンスオブジェクトはデータセットを消費した後にクローズする必要があります。 - -**例** - -```java -final String sql = "select * from " + TABLE_NAME + " where title <> '' limit 10"; - -// デフォルトフォーマットは RowBinaryWithNamesAndTypesFormatReader なので、リーダーはすべてのカラム情報を持っています。 -try (QueryResponse response = client.query(sql).get(3, TimeUnit.SECONDS);) { - - // データに便利にアクセスするためのリーダーを作成します - ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response); - - while (reader.hasNext()) { - reader.next(); // ストリームから次のレコードを読み取り、解析します - - // 値を取得します - double id = reader.getDouble("id"); - String title = reader.getString("title"); - String url = reader.getString("url"); - - // データを収集します - } -} catch (Exception e) { - log.error("Failed to read data", e); -} - -// ビジネスロジックを読み取りブロックの外に置いて、HTTP接続をできるだけ早く解放します。 -``` -### query(String sqlQuery, Map<String, Object> queryParams, QuerySettings settings) {#querystring-sqlquery-mapltstring-object-queryparams-querysettings-settings} - -`sqlQuery` をそのまま送り、サーバーがSQL式をコンパイルできるようにクエリパラメータも追加で送信します。 - -**シグネチャ** -```java -CompletableFuture query(String sqlQuery, Map queryParams, QuerySettings settings) -``` - -**パラメータ** - -`sqlQuery` - プレースホルダー `{}` を持つSQL式。 - -`queryParams` - サーバーでSQL式を完成させるための変数のマップ。 - -`settings` - リクエスト設定。 - -**返り値** - -`QueryResponse` 型の Future - 結果データセットおよびサーバー側メトリクスのような追加情報。レスポンスオブジェクトはデータセットを消費した後にクローズする必要があります。 - -**例** - -```java showLineNumbers - -// パラメータを定義します。これらはリクエストと共にサーバーに送信されます。 -Map queryParams = new HashMap<>(); -queryParams.put("param1", 2); - -try (QueryResponse response = - client.query("SELECT * FROM " + table + " WHERE col1 >= {param1:UInt32}", queryParams, new QuerySettings()).get()) { - - // データに便利にアクセスするためのリーダーを作成します - ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response); - - while (reader.hasNext()) { - reader.next(); // ストリームから次のレコードを読み取り、解析します - - // データを読み取ります - } - -} catch (Exception e) { - log.error("Failed to read data", e); -} - -``` -### queryAll(String sqlQuery) {#queryallstring-sqlquery} - -`RowBinaryWithNamesAndTypes` フォーマットでデータをクエリします。結果はコレクションとして返されます。読み取りパフォーマンスはリーダーと同じですが、全データセットを保持するためにはより多くのメモリが必要です。 - -**シグネチャ** -```java -List queryAll(String sqlQuery) -``` - -**パラメータ** - -`sqlQuery` - サーバーからデータをクエリするためのSQL式。 - -**返り値** - -結果データに行スタイルでアクセスを提供する `GenericRecord` オブジェクトのリストとして表される完全なデータセット。 - -**例** - -```java showLineNumbers -try { - log.info("Reading whole table and process record by record"); - final String sql = "select * from " + TABLE_NAME + " where title <> ''"; - - // 全結果セットを読み取り、レコードごとに処理します - client.queryAll(sql).forEach(row -> { - double id = row.getDouble("id"); - String title = row.getString("title"); - String url = row.getString("url"); - - log.info("id: {}, title: {}, url: {}", id, title, url); - }); -} catch (Exception e) { - log.error("Failed to read data", e); -} -``` -### QuerySettings {#querysettings} - -クエリ操作のための設定オプション。 - -**設定メソッド** - -| メソッド | 説明 | -|---------------------------------------------|----------------------------------------------------------------------------------------------------------------| -| `setQueryId(String queryId)` | 操作に割り当てられるクエリIDを設定します。 | -| `setFormat(ClickHouseFormat format)` | レスポンスフォーマットを設定します。完全なリストについては `RowBinaryWithNamesAndTypes` を参照してください。 | -| `setMaxExecutionTime(Integer maxExecutionTime)` | サーバー上の操作実行時間を設定します。読み取りタイムアウトには影響しません。 | -| `waitEndOfQuery(Boolean waitEndOfQuery)` | サーバーにレスポンスを送信する前にクエリの終了を待つようリクエストします。 | -| `setUseServerTimeZone(Boolean useServerTimeZone)` | サーバーのタイムゾーン(クライアント設定を参照)が、操作の結果のデータ/時間型を解析するために使用されます。デフォルトは `false`。 | -| `setUseTimeZone(String timeZone)` | サーバーに `timeZone` を使用して時間変換を行うようリクエストします。詳細は [session_timezone](/operations/settings/settings#session_timezone) を参照してください。 | -| `serverSetting(String name, String value)` | 操作のための個別のサーバー設定を設定します。 | -| `serverSetting(String name, Collection values)` | 複数の値を持つ個別のサーバー設定を設定します。コレクションのアイテムは `String` 値でなければなりません。 | -| `setDBRoles(Collection dbRoles)` | 操作を実行する前に設定されるDBロールを設定します。アイテムのコレクションは `String` 値でなければなりません。 | -| `setOption(String option, Object value)` | 生の形式で設定オプションを設定します。これはサーバー設定ではありません。 | -### QueryResponse {#queryresponse} - -クエリ実行の結果を保持するレスポンスオブジェクト。クライアントがサーバーからレスポンスを受け取ったときのみ利用可能です。 - -:::note -このオブジェクトは可能な限り早くクローズして、接続を解放する必要があります。以前のレスポンスのすべてのデータが完全に読み取られるまで、接続は再利用できません。 -::: - -| メソッド | 説明 | -|-------------------------------------|--------------------------------------------------------------------------------------------------| -| `ClickHouseFormat getFormat()` | レスポンスでデータがエンコードされているフォーマットを返します。 | -| `InputStream getInputStream()` | 指定されたフォーマットでのデータの非圧縮バイトストリームを返します。 | -| `OperationMetrics getMetrics()` | 操作メトリクスを持つオブジェクトを返します。 | -| `String getQueryId()` | アプリケーションによって操作に割り当てられたクエリIDを返します(操作設定またはサーバーによって)。 | -| `TimeZone getTimeZone()` | レスポンス内の Date/DateTime タイプを処理するために使用するべきタイムゾーンを返します。 | -### Examples {#examples} - -- 例のコードは [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2) で利用可能です。 -- 参照用の Spring Service [実装](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service) です。 -## Common API {#common-api} -### getTableSchema(String table) {#gettableschemastring-table} - -`table` のテーブルスキーマを取得します。 - -**シグネチャ** - -```java -TableSchema getTableSchema(String table) -TableSchema getTableSchema(String table, String database) -``` - -**パラメータ** - -`table` - スキーマデータを取得するテーブル名。 - -`database` - 対象テーブルが定義されているデータベース。 - -**返り値** - -テーブルカラムのリストを持つ `TableSchema` オブジェクトを返します。 -### getTableSchemaFromQuery(String sql) {#gettableschemafromquerystring-sql} - -SQLステートメントからスキーマを取得します。 - -**シグネチャ** - -```java -TableSchema getTableSchemaFromQuery(String sql) -``` - -**パラメータ** - -`sql` - スキーマを返すべき "SELECT" SQL ステートメント。 - -**返り値** - -`sql` 式に一致するカラムを持つ `TableSchema` オブジェクトを返します。 -### TableSchema {#tableschema} -### register(Class<?> clazz, TableSchema schema) {#registerclasslt-clazz-tableschema-schema} - -データを書き込み/読み込みに使うためのJavaクラスのシリアル化とデシリアル化レイヤーをコンパイルします。メソッドは、ペアゲッター/セッターと対応するカラムのためのシリアライザーとデシリアライザーを作成します。 -カラムの一致は、メソッド名からその名前を抽出することによって見つけられます。例えば、`getFirstName` はカラム `first_name` または `firstname` に対応します。 - -**シグネチャ** - -```java -void register(Class clazz, TableSchema schema) -``` - -**パラメータ** - -`clazz` - データの読み書きに使用するPOJOを表すクラス。 - -`schema` - POJOプロパティと一致させるために使用するデータスキーマ。 - -**例** - -```java showLineNumbers -client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME)); -``` -## Usage Examples {#usage-examples} - -完全な例のコードはリポジトリの 'example' [フォルダ](https://github.com/ClickHouse/clickhouse-java/tree/main/examples) に保存されています: - -- [client-v2](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2) - 主要な例のセット。 -- [demo-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service) - Spring Boot アプリケーションでのクライアントの使用例。 -- [demo-kotlin-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-kotlin-service) - Ktor (Kotlin) アプリケーションでのクライアントの使用例。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx.hash deleted file mode 100644 index d40444c65ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -f1f52f7ed30d542c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx deleted file mode 100644 index 269a4794802..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -'sidebar_label': 'クライアント' -'sidebar_position': 2 -'keywords': -- 'clickhouse' -- 'java' -- 'client' -- 'integrate' -'description': 'Java ClickHouseコネクタ' -'slug': '/integrations/language-clients/java/client' -'title': 'Javaクライアント' ---- - -import ClientVersionDropdown from '@theme/ClientVersionDropdown/ClientVersionDropdown'; -import v07 from './_snippets/_v0_7.mdx' -import v08 from './_snippets/_v0_8.mdx' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx.hash deleted file mode 100644 index 509cca726cd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/client.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -cfe189cd37427b8d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md deleted file mode 100644 index 5389f5c6a9a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: 'Java' -keywords: -- 'clickhouse' -- 'java' -- 'jdbc' -- 'client' -- 'integrate' -- 'r2dbc' -description: 'Options for connecting to ClickHouse from Java' -slug: '/integrations/java' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; - - - -# Java クライアントの概要 - -- [クライアント 0.8+](./client/client.mdx) -- [JDBC 0.8+](./jdbc/jdbc.mdx) -- [R2DBC ドライバ](./r2dbc.md) - -## ClickHouse クライアント {#clickhouse-client} - -Java クライアントは、ClickHouse サーバーとのネットワーク通信の詳細を抽象化する独自の API を実装したライブラリです。現在、HTTP インターフェースのみがサポートされています。このライブラリは、さまざまな ClickHouse フォーマットおよびその他の関連機能を操作するためのユーティリティを提供します。 - -Java クライアントは 2015 年に開発されました。そのコードベースは非常にメンテナンスが難しく、API は混乱を招くものであり、さらなる最適化が困難でした。そこで、私たちは 2024 年に新しいコンポーネント `client-v2` にリファクタリングしました。これにより、明確な API、より軽量なコードベース、パフォーマンスの向上、ClickHouse フォーマットのサポート(主に RowBinary と Native)が改善されました。JDBC は近い将来にこのクライアントを使用します。 - -### サポートされているデータ型 {#supported-data-types} - -|**データ型** |**クライアント V2 サポート**|**クライアント V1 サポート**| -|-----------------------|---------------------|---------------------| -|Int8 |✔ |✔ | -|Int16 |✔ |✔ | -|Int32 |✔ |✔ | -|Int64 |✔ |✔ | -|Int128 |✔ |✔ | -|Int256 |✔ |✔ | -|UInt8 |✔ |✔ | -|UInt16 |✔ |✔ | -|UInt32 |✔ |✔ | -|UInt64 |✔ |✔ | -|UInt128 |✔ |✔ | -|UInt256 |✔ |✔ | -|Float32 |✔ |✔ | -|Float64 |✔ |✔ | -|Decimal |✔ |✔ | -|Decimal32 |✔ |✔ | -|Decimal64 |✔ |✔ | -|Decimal128 |✔ |✔ | -|Decimal256 |✔ |✔ | -|Bool |✔ |✔ | -|String |✔ |✔ | -|FixedString |✔ |✔ | -|Nullable |✔ |✔ | -|Date |✔ |✔ | -|Date32 |✔ |✔ | -|DateTime |✔ |✔ | -|DateTime32 |✔ |✔ | -|DateTime64 |✔ |✔ | -|Interval |✗ |✗ | -|Enum |✔ |✔ | -|Enum8 |✔ |✔ | -|Enum16 |✔ |✔ | -|Array |✔ |✔ | -|Map |✔ |✔ | -|Nested |✔ |✔ | -|Tuple |✔ |✔ | -|UUID |✔ |✔ | -|IPv4 |✔ |✔ | -|IPv6 |✔ |✔ | -|Object |✗ |✔ | -|Point |✔ |✔ | -|Nothing |✔ |✔ | -|MultiPolygon |✔ |✔ | -|Ring |✔ |✔ | -|Polygon |✔ |✔ | -|SimpleAggregateFunction|✔ |✔ | -|AggregateFunction |✗ |✔ | - -[ClickHouse データ型](/sql-reference/data-types) - -:::note -- AggregatedFunction - :warning: `SELECT * FROM table ...` はサポートされていません -- Decimal - 一貫性のために 21.9+ で `SET output_format_decimal_trailing_zeros=1` -- Enum - 文字列および整数の両方として扱うことができます -- UInt64 - client-v1 では `long` にマッピングされています -::: - -### 機能 {#features} - -クライアントの機能一覧: - -| 名称 | クライアント V2 | クライアント V1 | コメント -|----------------------------------------------|:---------:|:---------:|:---------:| -| Http 接続 |✔ |✔ | | -| Http 圧縮 (LZ4) |✔ |✔ | | -| サーバー応答圧縮 - LZ4 |✔ |✔ | | -| クライアントリクエスト圧縮 - LZ4 |✔ |✔ | | -| HTTPS |✔ |✔ | | -| クライアント SSL 証明書 (mTLS) |✔ |✔ | | -| Http プロキシ |✔ |✔ | | -| POJO SerDe |✔ |✗ | | -| 接続プール |✔ |✔ | Apache HTTP Client 使用時 | -| 名前付きパラメータ |✔ |✔ | | -| 失敗時の再試行 |✔ |✔ | | -| フェイルオーバー |✗ |✔ | | -| ロードバランシング |✗ |✔ | | -| サーバーの自動発見 |✗ |✔ | | -| ログコメント |✔ |✔ | | -| セッションロール |✔ |✔ | | -| SSL クライアント認証 |✔ |✔ | | -| セッションタイムゾーン |✔ |✔ | | - - -JDBC ドライバは、基盤となるクライアント実装と同じ機能を継承しています。他の JDBC 機能はその [ページ](/integrations/language-clients/java/jdbc) に記載されています。 - -### 互換性 {#compatibility} - -- このリポジトリのすべてのプロジェクトは、すべての [アクティブな LTS バージョン](https://github.com/ClickHouse/ClickHouse/pulls?q=is%3Aopen+is%3Apr+label%3Arelease) の ClickHouse でテストされています。 -- [サポートポリシー](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#security-change-log-and-support) -- セキュリティ修正や新しい改善を見逃さないために、クライアントは継続的にアップグレードすることをお勧めします。 -- v2 API への移行に関する問題がある場合は、[問題を作成](https://github.com/ClickHouse/clickhouse-java/issues/new?assignees=&labels=v2-feedback&projects=&template=v2-feedback.md&title=)してください。私たちが対応します! - -### ロギング {#logging} - -私たちの Java 言語クライアントは、[SLF4J](https://www.slf4j.org/)を使用してロギングを行います。`Logback` や `Log4j` などの SLF4J に互換性のあるロギングフレームワークを使用できます。 -例えば、Maven を使用している場合、以下の依存関係を `pom.xml` ファイルに追加できます。 - -```xml title="pom.xml" - - - - org.slf4j - slf4j-api - 2.0.16 - - - - - ch.qos.logback - logback-core - 1.5.16 - - - - - ch.qos.logback - logback-classic - 1.5.16 - - -``` - -#### ロギングの設定 {#configuring-logging} - -これは、使用しているロギングフレームワークによって異なります。例えば、`Logback` を使用している場合、`logback.xml` というファイルにロギングを設定できます。 - -```xml title="logback.xml" - - - - - [%d{yyyy-MM-dd HH:mm:ss}] [%level] [%thread] %logger{36} - %msg%n - - - - - - logs/app.log - true - - [%d{yyyy-MM-dd HH:mm:ss}] [%level] [%thread] %logger{36} - %msg%n - - - - - - - - - - - - -``` - -[変更履歴](https://github.com/ClickHouse/clickhouse-java/blob/main/CHANGELOG.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md.hash deleted file mode 100644 index 5c76b6db323..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -075482c10c195271 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc.md.hash deleted file mode 100644 index 4e3e1f32ab5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -262f4eb39e5c1e59 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx deleted file mode 100644 index 8a42cfd37f1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -{} ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -`clickhouse-jdbc` は標準の JDBC インターフェースを実装しています。[clickhouse-client](/integrations/sql-clients/sql-console) の上に構築されており、カスタムタイプマッピング、トランザクションサポート、標準の同期 `UPDATE` および `DELETE` ステートメントなどの追加機能を提供します。そのため、レガシーアプリケーションやツールと簡単に使用できます。 - -:::note -最新の JDBC (0.7.2) バージョンは Client-V1 を使用します -::: - -`clickhouse-jdbc` API は同期的で、一般的に SQL パースやタイプマッピング/変換などのオーバーヘッドが多くなります。パフォーマンスが重要である場合や、ClickHouse へのより直接的なアクセス方法を好む場合は、[clickhouse-client](/integrations/sql-clients/sql-console) を検討してください。 - -## 環境要件 {#environment-requirements} - -- [OpenJDK](https://openjdk.java.net) バージョン >= 8 - -### セットアップ {#setup} - - - - - ```xml - - - com.clickhouse - clickhouse-jdbc - 0.7.2 - - shaded-all - - ``` - - - - - ```kotlin - // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc - // すべての依存関係を含む uber jar を使用します。より小さな jar のために classifier を http に変更します - implementation("com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all") - ``` - - - - ```groovy - // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc - // すべての依存関係を含む uber jar を使用します。より小さな jar のために classifier を http に変更します - implementation 'com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all' - ``` - - - - -バージョン `0.5.0` から、Client にパックされた Apache HTTP Client を使用しています。パッケージの共有バージョンがないため、ロガーを依存関係として追加する必要があります。 - - - - - ```xml - - - org.slf4j - slf4j-api - 2.0.16 - - ``` - - - - - ```kotlin - // https://mvnrepository.com/artifact/org.slf4j/slf4j-api - implementation("org.slf4j:slf4j-api:2.0.16") - ``` - - - - ```groovy - // https://mvnrepository.com/artifact/org.slf4j/slf4j-api - implementation 'org.slf4j:slf4j-api:2.0.16' - ``` - - - - -## 設定 {#configuration} - -**ドライバークラス**: `com.clickhouse.jdbc.ClickHouseDriver` - -**URL 構文**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]` たとえば: - -- `jdbc:ch://localhost` は `jdbc:clickhouse:http://localhost:8123` と同じです -- `jdbc:ch:https://localhost` は `jdbc:clickhouse:http://localhost:8443?ssl=true&sslmode=STRICT` と同じです -- `jdbc:ch:grpc://localhost` は `jdbc:clickhouse:grpc://localhost:9100` と同じです - -**接続プロパティ**: - -| プロパティ | デフォルト | 説明 | -| -------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `continueBatchOnError` | `false` | エラーが発生した場合にバッチ処理を続行するかどうか | -| `createDatabaseIfNotExist` | `false` | データベースが存在しない場合に作成するかどうか | -| `custom_http_headers` | | カンマ区切りのカスタム HTTP ヘッダー、例: `User-Agent=client1,X-Gateway-Id=123` | -| `custom_http_params` | | カンマ区切りのカスタム HTTP クエリパラメータ、例: `extremes=0,max_result_rows=100` | -| `nullAsDefault` | `0` | `0` - Null 値をそのまま扱い、null を非 Nullable カラムに挿入する際に例外をスロー; `1` - Null 値をそのまま扱い、挿入のための null チェックを無効化; `2` - Null を対応するデータ型のデフォルト値に置き換える(クエリと挿入の両方に対して) | -| `jdbcCompliance` | `true` | 標準の同期 UPDATE/DELETE および擬似トランザクションをサポートするかどうか | -| `typeMappings` | | ClickHouse データ型と Java クラス間のマッピングをカスタマイズします。これは [`getColumnType()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSetMetaData.html#getColumnType-int-) と [`getObject(Class<>?>`)](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-java.lang.String-java.lang.Class-) の結果に影響します。たとえば: `UInt128=java.lang.String,UInt256=java.lang.String` | -| `wrapperObject` | `false` | [`getObject()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-int-) が Array/Tuple の場合に java.sql.Array / java.sql.Struct を返すべきかどうか。 | - -注意: 詳細は [JDBC 特有の設定](https://github.com/ClickHouse/clickhouse-java/blob/main/clickhouse-jdbc/src/main/java/com/clickhouse/jdbc/JdbcConfig.java) を参照してください。 - -## サポートされているデータ型 {#supported-data-types} - -JDBC ドライバーは、クライアントライブラリと同じデータ形式をサポートしています。 - -:::note -- AggregatedFunction - :warning: `SELECT * FROM table ...` はサポートされていません -- Decimal - 一貫性のために 21.9+ で `SET output_format_decimal_trailing_zeros=1` -- Enum - 文字列と整数の両方として扱うことができます -- UInt64 - `long`(client-v1 にマッピング) -::: - -## 接続の作成 {#creating-connection} - -```java -String url = "jdbc:ch://my-server/system"; // デフォルトで http プロトコルとポート 8123 を使用する - -Properties properties = new Properties(); - -ClickHouseDataSource dataSource = new ClickHouseDataSource(url, properties); -try (Connection conn = dataSource.getConnection("default", "password"); - Statement stmt = conn.createStatement()) { -} -``` - -## 簡単なステートメント {#simple-statement} - -```java showLineNumbers - -try (Connection conn = dataSource.getConnection(...); - Statement stmt = conn.createStatement()) { - ResultSet rs = stmt.executeQuery("select * from numbers(50000)"); - while(rs.next()) { - // ... - } -} -``` - -## 挿入 {#insert} - -:::note -- `Statement` の代わりに `PreparedStatement` を使用してください -::: - -入力関数に比べると簡単ですが、パフォーマンスが遅くなります(下記を参照): - -```java showLineNumbers -try (PreparedStatement ps = conn.prepareStatement("insert into mytable(* except (description))")) { - ps.setString(1, "test"); // id - ps.setObject(2, LocalDateTime.now()); // タイムスタンプ - ps.addBatch(); // パラメータは即座にバイナリ形式でバッファストリームに書き込まれます - ... - ps.executeBatch(); // すべてを ClickHouse にストリームします -} -``` - -### 入力テーブル関数を使用した場合 {#with-input-table-function} - -優れたパフォーマンス特性を持つオプションです: - -```java showLineNumbers -try (PreparedStatement ps = conn.prepareStatement( - "insert into mytable select col1, col2 from input('col1 String, col2 DateTime64(3), col3 Int32')")) { - // カラム定義が解析されるため、ドライバーは 3 つのパラメータに col1、col2、col3 があることを知っています - ps.setString(1, "test"); // col1 - ps.setObject(2, LocalDateTime.now()); // col2、setTimestamp は遅いため推奨されません - ps.setInt(3, 123); // col3 - ps.addBatch(); // パラメータは即座にバイナリ形式でバッファストリームに書き込まれます - ... - ps.executeBatch(); // すべてを ClickHouse にストリームします -} -``` -- 可能な限り [入力関数のドキュメント](/sql-reference/table-functions/input/) を参照してください - -### プレースホルダを使用した挿入 {#insert-with-placeholders} - -このオプションは、小さな挿入にのみ推奨されます。なぜなら、長い SQL 式が必要となるためです(クライアント側で解析され、CPUとメモリを消費します): - -```java showLineNumbers -try (PreparedStatement ps = conn.prepareStatement("insert into mytable values(trim(?),?,?)")) { - ps.setString(1, "test"); // id - ps.setObject(2, LocalDateTime.now()); // タイムスタンプ - ps.setString(3, null); // 説明 - ps.addBatch(); // クエリにパラメータを追加します - ... - ps.executeBatch(); // 構成されたクエリを発行します: insert into mytable values(...)(...)...(...) -} -``` - -## DateTime とタイムゾーンの扱い {#handling-datetime-and-time-zones} - -`java.sql.Timestamp` の代わりに `java.time.LocalDateTime` または `java.time.OffsetDateTime` を、`java.sql.Date` の代わりに `java.time.LocalDate` を使用してください。 - -```java showLineNumbers -try (PreparedStatement ps = conn.prepareStatement("select date_time from mytable where date_time > ?")) { - ps.setObject(2, LocalDateTime.now()); - ResultSet rs = ps.executeQuery(); - while(rs.next()) { - LocalDateTime dateTime = (LocalDateTime) rs.getObject(1); - } - ... -} -``` - -## `AggregateFunction` の取り扱い {#handling-aggregatefunction} - -:::note -現在のところ、`groupBitmap` のみがサポートされています。 -::: - -```java showLineNumbers -// 入力関数を使用してバッチ挿入する -try (ClickHouseConnection conn = newConnection(props); - Statement s = conn.createStatement(); - PreparedStatement stmt = conn.prepareStatement( - "insert into test_batch_input select id, name, value from input('id Int32, name Nullable(String), desc Nullable(String), value AggregateFunction(groupBitmap, UInt32)')")) { - s.execute("drop table if exists test_batch_input;" - + "create table test_batch_input(id Int32, name Nullable(String), value AggregateFunction(groupBitmap, UInt32))engine=Memory"); - Object[][] objs = new Object[][] { - new Object[] { 1, "a", "aaaaa", ClickHouseBitmap.wrap(1, 2, 3, 4, 5) }, - new Object[] { 2, "b", null, ClickHouseBitmap.wrap(6, 7, 8, 9, 10) }, - new Object[] { 3, null, "33333", ClickHouseBitmap.wrap(11, 12, 13) } - }; - for (Object[] v : objs) { - stmt.setInt(1, (int) v[0]); - stmt.setString(2, (String) v[1]); - stmt.setString(3, (String) v[2]); - stmt.setObject(4, v[3]); - stmt.addBatch(); - } - int[] results = stmt.executeBatch(); - ... -} - -// ビットマップをクエリパラメータとして使用 -try (PreparedStatement stmt = conn.prepareStatement( - "SELECT bitmapContains(my_bitmap, toUInt32(1)) as v1, bitmapContains(my_bitmap, toUInt32(2)) as v2 from {tt 'ext_table'}")) { - stmt.setObject(1, ClickHouseExternalTable.builder().name("ext_table") - .columns("my_bitmap AggregateFunction(groupBitmap,UInt32)").format(ClickHouseFormat.RowBinary) - .content(new ByteArrayInputStream(ClickHouseBitmap.wrap(1, 3, 5).toBytes())) - .asTempTable() - .build()); - ResultSet rs = stmt.executeQuery(); - Assert.assertTrue(rs.next()); - Assert.assertEquals(rs.getInt(1), 1); - Assert.assertEquals(rs.getInt(2), 0); - Assert.assertFalse(rs.next()); -} -``` - -
    - -## HTTP ライブラリの設定 {#configuring-http-library} - -ClickHouse JDBC コネクタは、[`HttpClient`](https://docs.oracle.com/en/java/javase/11/docs/api/java.net.http/java/net/http/HttpClient.html)、[`HttpURLConnection`](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/HttpURLConnection.html)、および [Apache `HttpClient`](https://hc.apache.org/httpcomponents-client-5.2.x/) の 3 つの HTTP ライブラリをサポートしています。 - -:::note -`HttpClient` は JDK 11 以上でのみサポートされています。 -::: - -JDBC ドライバーはデフォルトで `HttpClient` を使用します。ClickHouse JDBC コネクタで使用する HTTP ライブラリを変更するには、次のプロパティを設定します。 - -```java -properties.setProperty("http_connection_provider", "APACHE_HTTP_CLIENT"); -``` - -対応する値の完全な一覧は次の通りです。 - -| プロパティ値 | HTTP ライブラリ | -|------------------------|-----------------------| -| HTTP_CLIENT | `HttpClient` | -| HTTP_URL_CONNECTION | `HttpURLConnection` | -| APACHE_HTTP_CLIENT | Apache `HttpClient` | - -
    - -## SSL を使用して ClickHouse に接続 {#connect-to-clickhouse-with-ssl} - -SSL を使用して ClickHouse に安全な JDBC 接続を確立するには、JDBC プロパティに SSL パラメータを含めるように設定する必要があります。これには通常、JDBC URL または Properties オブジェクトに `sslmode` や `sslrootcert` などの SSL プロパティを指定することが含まれます。 - -## SSL プロパティ {#ssl-properties} - -| 名前 | デフォルト値 | オプション値 | 説明 | -|---------------------|---------------|------------------|-------------------------------------------------------------------------------------------| -| `ssl` | false | true, false | 接続に SSL/TLS を有効にするかどうか | -| `sslmode` | strict | strict, none | SSL/TLS 証明書を検証するかどうか | -| `sslrootcert` | | | SSL/TLS ルート証明書へのパス | -| `sslcert` | | | SSL/TLS 証明書へのパス | -| `sslkey` | | | PKCS#8 形式の RSA キー | -| `key_store_type` | | JKS, PKCS12 | `KeyStore`/`TrustStore` ファイルのタイプまたは形式を指定 | -| `trust_store` | | | `TrustStore` ファイルへのパス | -| `key_store_password` | | | `KeyStore` 設定で指定された `KeyStore` ファイルにアクセスするために必要なパスワード | - -これらのプロパティにより、Java アプリケーションは ClickHouse サーバーとの間で暗号化された接続で通信を行い、データの送信中のセキュリティが向上します。 - -```java showLineNumbers - String url = "jdbc:ch://your-server:8443/system"; - - Properties properties = new Properties(); - properties.setProperty("ssl", "true"); - properties.setProperty("sslmode", "strict"); // NONE ですべてのサーバーを信頼; STRICT は信頼されたのみ - properties.setProperty("sslrootcert", "/mine.crt"); - try (Connection con = DriverManager - .getConnection(url, properties)) { - - try (PreparedStatement stmt = con.prepareStatement( - - // ここにコードを配置します - - } - } -``` - -## 大きな挿入における JDBC タイムアウトの解決 {#resolving-jdbc-timeout-on-large-inserts} - -ClickHouse で長い実行時間の大きな挿入を行う際に、次のような JDBC タイムアウトエラーが発生する場合があります。 - -```plaintext -Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443] -``` - -これらのエラーはデータ挿入プロセスを中断させ、システムの安定性に影響を及ぼす可能性があります。この問題を解決するには、クライアント OS のいくつかのタイムアウト設定を調整する必要があります。 - -### Mac OS {#mac-os} - -Mac OS では、次の設定を調整して問題を解決できます。 - -- `net.inet.tcp.keepidle`: 60000 -- `net.inet.tcp.keepintvl`: 45000 -- `net.inet.tcp.keepinit`: 45000 -- `net.inet.tcp.keepcnt`: 8 -- `net.inet.tcp.always_keepalive`: 1 - -### Linux {#linux} - -Linux では、同等の設定だけでは問題を解決できない場合があります。Linux がソケットのキープアライブ設定を扱う方法の違いから、追加の手順が必要です。次の手順に従ってください。 - -1. `/etc/sysctl.conf` または関連する設定ファイルに次の Linux カーネルパラメータを調整します。 - -- `net.inet.tcp.keepidle`: 60000 -- `net.inet.tcp.keepintvl`: 45000 -- `net.inet.tcp.keepinit`: 45000 -- `net.inet.tcp.keepcnt`: 8 -- `net.inet.tcp.always_keepalive`: 1 -- `net.ipv4.tcp_keepalive_intvl`: 75 -- `net.ipv4.tcp_keepalive_probes`: 9 -- `net.ipv4.tcp_keepalive_time`: 60 (デフォルトの 300 秒から値を下げることを検討してください) - -2. カーネルパラメータを変更した後、次のコマンドを実行して変更を適用します。 - -```shell -sudo sysctl -p - ``` - -これらの設定を行った後、クライアントがソケットで Keep Alive オプションを有効にしていることを確認する必要があります。 - -```java -properties.setProperty("socket_keepalive", "true"); -``` - -:::note -現在、ソケットのキープアライブを設定する場合は Apache HTTP Client ライブラリを使用する必要があります。他の 2 つの HTTP クライアントライブラリは `clickhouse-java` によってサポートされているため、ソケットオプションの設定を許可していません。詳細なガイドについては、[HTTP ライブラリの設定](/integrations/language-clients/java/jdbc-v1#configuring-http-library) を参照してください。 -::: - -また、同等のパラメータを JDBC URL に追加することもできます。 - -JDBC ドライバーのデフォルトのソケットおよび接続タイムアウトは 30 秒です。タイムアウトを大きなデータ挿入操作をサポートするために増やすことができます。`ClickHouseClient` の `options` メソッドを使用し、`ClickHouseClientOption` で定義されたいくつかのオプションを組み合わせて使用します。 - -```java showLineNumbers -final int MS_12H = 12 * 60 * 60 * 1000; // 12 h in ms -final String sql = "insert into table_a (c1, c2, c3) select c1, c2, c3 from table_b;"; - -try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP)) { - client.read(servers).write() - .option(ClickHouseClientOption.SOCKET_TIMEOUT, MS_12H) - .option(ClickHouseClientOption.CONNECTION_TIMEOUT, MS_12H) - .query(sql) - .executeAndWait(); -} -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx.hash deleted file mode 100644 index 9e2537ee4d4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -b89cad33b2325493 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx deleted file mode 100644 index 27abe22af25..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx +++ /dev/null @@ -1,225 +0,0 @@ ---- -{} ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::note -`clickhouse-jdbc`は、最新のJavaクライアントを使用して標準JDBCインターフェースを実装しています。パフォーマンスや直接アクセスが重要な場合は、最新のJavaクライアントを直接使用することをお勧めします。 -::: - -## 0.7.xからの変更 {#changes-from-07x} -0.8では、ドライバがJDBC仕様をより厳密に遵守するように試みたため、いくつかの機能が削除され、影響を受ける可能性があります: - -| 古い機能 | メモ | -|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| トランザクションサポート | ドライバの初期バージョンは、トランザクションサポートを**シミュレート**しているだけで、予期しない結果を引き起こす可能性がありました。 | -| レスポンスカラムのリネーム | `ResultSet`は変更可能でしたが、効率のために現在は読み取り専用です。 | -| マルチステートメントSQL | マルチステートメントサポートは**シミュレート**されていましたが、現在は厳密に1:1に従います。 -| 名前付きパラメータ | JDBC仕様の一部ではありません。 | -| ストリームベースの`PreparedStatement` | ドライバの初期バージョンは、`PreparedStatement`のJDBC以外での使用を許可していました。そのようなオプションを希望される場合は、[Java Client](/integrations/language-clients/java/client/client.mdx)およびその[例](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2)を参照することをお勧めします。 | - -:::note -`Date`はタイムゾーンなしで保存され、`DateTime`はタイムゾーン付きで保存されます。注意しないと予期しない結果が生じる可能性があります。 -::: - -## 環境要件 {#environment-requirements} - -- [OpenJDK](https://openjdk.java.net) バージョン >= 8 - -### セットアップ {#setup} - - - - - ```xml - - - com.clickhouse - clickhouse-jdbc - 0.8.2 - shaded-all - - ``` - - - - - ```kotlin - // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc - implementation("com.clickhouse:clickhouse-jdbc:0.8.2:shaded-all") - ``` - - - - ```groovy - // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc - implementation 'com.clickhouse:clickhouse-jdbc:0.8.2:shaded-all' - ``` - - - - -## 設定 {#configuration} - -**ドライバクラス**: `com.clickhouse.jdbc.ClickHouseDriver` - -**URL構文**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]`。例えば: - -- `jdbc:clickhouse:http://localhost:8123` -- `jdbc:clickhouse:https://localhost:8443?ssl=true` - -**接続プロパティ**: - -標準JDBCプロパティに加えて、ドライバは基盤となる[Java Client](/integrations/language-clients/java/client/client.mdx)が提供するClickHouse特有のプロパティをサポートしています。 -可能な限り、メソッドはその機能がサポートされていない場合、`SQLFeatureNotSupportedException`を返します。他のカスタムプロパティには次のものが含まれます: - -| プロパティ | デフォルト | 説明 | -|----------------------------------|---------|-----------------------------------------------------------| -| `disable_frameworks_detection` | `true` | User-Agentのためのフレームワーク検出を無効にします | -| `jdbc_ignore_unsupported_values` | `false` | `SQLFeatureNotSupportedException`を抑止します | -| `clickhouse.jdbc.v1` | `false` | 新しいJDBCの代わりに古いJDBC実装を使用します | -| `default_query_settings` | `null` | クエリ操作にデフォルトのクエリ設定を渡すことを許可します | - -## サポートされているデータ型 {#supported-data-types} - -JDBCドライバは、基盤となる[Java Client](/integrations/language-clients/java/client/client.mdx)と同じデータフォーマットをサポートしています。 - -### 日付、時刻、およびタイムゾーンの処理 {#handling-dates-times-and-timezones} -`java.sql.Date`、`java.sql.Time`、および`java.sql.Timestamp`は、タイムゾーンの計算を複雑にする可能性がありますが、もちろんサポートされています。 -`ZonedDateTime`および`OffsetDateTime`は、java.sql.Timestamp、java.sql.Date、java.sql.Timeの優れた代替品です。 - -## 接続の作成 {#creating-connection} - -```java -String url = "jdbc:ch://my-server:8123/system"; - -Properties properties = new Properties(); -DataSource dataSource = new DataSource(url, properties);//DataSourceまたはDriverManagerが主なエントリーポイントです -try (Connection conn = dataSource.getConnection()) { -... // 接続に対して何らかの処理を行う -``` - -## 資格情報と設定の提供 {#supplying-credentials-and-settings} - -```java showLineNumbers -String url = "jdbc:ch://localhost:8123?jdbc_ignore_unsupported_values=true&socket_timeout=10"; - -Properties info = new Properties(); -info.put("user", "default"); -info.put("password", "password"); -info.put("database", "some_db"); - -//DataSourceを使って接続を作成する -DataSource dataSource = new DataSource(url, info); -try (Connection conn = dataSource.getConnection()) { -... // 接続に対して何らかの処理を行う -} - -//DriverManagerを使用した別のアプローチ -try (Connection conn = DriverManager.getConnection(url, info)) { -... // 接続に対して何らかの処理を行う -} -``` - -## シンプルステートメント {#simple-statement} - -```java showLineNumbers - -try (Connection conn = dataSource.getConnection(...); - Statement stmt = conn.createStatement()) { - ResultSet rs = stmt.executeQuery("select * from numbers(50000)"); - while(rs.next()) { - // ... - } -} -``` - -## 挿入 {#insert} - -```java showLineNumbers -try (PreparedStatement ps = conn.prepareStatement("INSERT INTO mytable VALUES (?, ?)")) { - ps.setString(1, "test"); // id - ps.setObject(2, LocalDateTime.now()); // タイムスタンプ - ps.addBatch(); - ... - ps.executeBatch(); // すべてのデータをClickHouseにストリームする -} -``` - -## `HikariCP` {#hikaricp} - -```java showLineNumbers -// 接続プーリングはパフォーマンスにあまり寄与しません、 -// なぜなら基盤となる実装自体がプールを持っているからです。 -// 例えば: HttpURLConnection はソケット用にプールを持っています -HikariConfig poolConfig = new HikariConfig(); -poolConfig.setConnectionTimeout(5000L); -poolConfig.setMaximumPoolSize(20); -poolConfig.setMaxLifetime(300_000L); -poolConfig.setDataSource(new ClickHouseDataSource(url, properties)); - -try (HikariDataSource ds = new HikariDataSource(poolConfig); - Connection conn = ds.getConnection(); - Statement s = conn.createStatement(); - ResultSet rs = s.executeQuery("SELECT * FROM system.numbers LIMIT 3")) { - while (rs.next()) { - // 行の処理 - log.info("整数: {}, 文字列: {}", rs.getInt(1), rs.getString(1));//同じカラムだが異なる型 - } -} -``` - -## さらなる情報 {#more-information} -さらなる情報については、当社の[GitHubリポジトリ](https://github.com/ClickHouse/clickhouse-java)および[Java Clientのドキュメント](/integrations/language-clients/java/client/client.mdx)を参照してください。 - -## トラブルシューティング {#troubleshooting} -### ロギング {#logging} -ドライバは[slf4j](https://www.slf4j.org/)を使用してロギングを行い、クラスパス上で最初に利用可能な実装を使用します。 - -### 大規模な挿入時のJDBCタイムアウトの解決 {#resolving-jdbc-timeout-on-large-inserts} - -ClickHouseで長時間の実行時間を伴う大規模な挿入を行う際に、次のようなJDBCタイムアウトエラーに直面することがあります: - -```plaintext -Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443] -``` -これらのエラーはデータ挿入プロセスを中断し、システムの安定性に影響を与える可能性があります。この問題に対処するには、クライアントのOSでいくつかのタイムアウト設定を調整する必要があります。 - -#### Mac OS {#mac-os} - -Mac OSでは、次の設定を調整して問題を解決できます: - -- `net.inet.tcp.keepidle`: 60000 -- `net.inet.tcp.keepintvl`: 45000 -- `net.inet.tcp.keepinit`: 45000 -- `net.inet.tcp.keepcnt`: 8 -- `net.inet.tcp.always_keepalive`: 1 - -#### Linux {#linux} - -Linuxでは、同等の設定のみでは問題を解決できない場合があります。Linuxがソケットのキープアライブ設定を処理する方法の違いから、追加の手順が必要です。次の手順に従ってください: - -1. `/etc/sysctl.conf`または関連する設定ファイルで次のLinuxカーネルパラメータを調整します: - - - `net.inet.tcp.keepidle`: 60000 - - `net.inet.tcp.keepintvl`: 45000 - - `net.inet.tcp.keepinit`: 45000 - - `net.inet.tcp.keepcnt`: 8 - - `net.inet.tcp.always_keepalive`: 1 - - `net.ipv4.tcp_keepalive_intvl`: 75 - - `net.ipv4.tcp_keepalive_probes`: 9 - - `net.ipv4.tcp_keepalive_time`: 60 (デフォルトの300秒からこの値を下げることを考慮することができます) - -2. カーネルパラメータを変更した後、次のコマンドを実行して変更を適用します: - -```shell -sudo sysctl -p -``` - -これらの設定を行った後、クライアントがソケットでKeep Aliveオプションを有効にすることを確認する必要があります: - -```java -properties.setProperty("socket_keepalive", "true"); -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx.hash deleted file mode 100644 index 3b2596cfa49..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -8b1df2d368192ece diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx deleted file mode 100644 index 8110fe11c36..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -'sidebar_label': 'JDBC' -'sidebar_position': 4 -'keywords': -- 'clickhouse' -- 'java' -- 'jdbc' -- 'driver' -- 'integrate' -'description': 'ClickHouse JDBC ドライバー' -'slug': '/integrations/language-clients/java/jdbc' -'title': 'JDBC Driver' ---- - -import ClientVersionDropdown from '@theme/ClientVersionDropdown/ClientVersionDropdown'; -import v07 from './_snippets/_v0_7.mdx' -import v08 from './_snippets/_v0_8.mdx' - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx.hash deleted file mode 100644 index ef22650ea8c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/jdbc/jdbc.mdx.hash +++ /dev/null @@ -1 +0,0 @@ -8f48cf9035fe847b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md deleted file mode 100644 index 97bb7318d87..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -sidebar_label: 'R2DBC ドライバ' -sidebar_position: 5 -keywords: -- 'clickhouse' -- 'java' -- 'driver' -- 'integrate' -- 'r2dbc' -description: 'ClickHouse R2DBC ドライバ' -slug: '/integrations/java/r2dbc' -title: 'R2DBC driver' ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; - - -# R2DBCドライバ - -## R2DBCドライバ {#r2dbc-driver} - -[R2DBC](https://r2dbc.io/) は、ClickHouse用の非同期Javaクライアントのラッパーです。 - -### 環境要件 {#environment-requirements} - -- [OpenJDK](https://openjdk.java.net) バージョン >= 8 - -### セットアップ {#setup} - -```xml - - com.clickhouse - - clickhouse-r2dbc - 0.7.1 - - all - - - * - * - - - -``` - -### ClickHouseに接続する {#connect-to-clickhouse} - -```java showLineNumbers -ConnectionFactory connectionFactory = ConnectionFactories - .get("r2dbc:clickhouse:http://{username}:{password}@{host}:{port}/{database}"); - - Mono.from(connectionFactory.create()) - .flatMapMany(connection -> connection -``` - -### クエリ {#query} - -```java showLineNumbers -connection - .createStatement("select domain, path, toDate(cdate) as d, count(1) as count from clickdb.clicks where domain = :domain group by domain, path, d") - .bind("domain", domain) - .execute() - .flatMap(result -> result - .map((row, rowMetadata) -> String.format("%s%s[%s]:%d", row.get("domain", String.class), - row.get("path", String.class), - row.get("d", LocalDate.class), - row.get("count", Long.class))) - ) - .doOnNext(System.out::println) - .subscribe(); -``` - -### 挿入 {#insert} - -```java showLineNumbers -connection - .createStatement("insert into clickdb.clicks values (:domain, :path, :cdate, :count)") - .bind("domain", click.getDomain()) - .bind("path", click.getPath()) - .bind("cdate", LocalDateTime.now()) - .bind("count", 1) - .execute(); diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md.hash deleted file mode 100644 index 7f1e7b59709..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md.hash +++ /dev/null @@ -1 +0,0 @@ -88af24afdd3f7814 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md deleted file mode 100644 index 9ffc2d5954a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md +++ /dev/null @@ -1,1265 +0,0 @@ ---- -sidebar_label: 'JavaScript' -sidebar_position: 4 -keywords: -- 'clickhouse' -- 'js' -- 'JavaScript' -- 'NodeJS' -- 'web' -- 'browser' -- 'Cloudflare' -- 'workers' -- 'client' -- 'connect' -- 'integrate' -slug: '/integrations/javascript' -description: 'The official JS client for connecting to ClickHouse.' -title: 'ClickHouse JS' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - - -# ClickHouse JS - -ClickHouseに接続するための公式JSクライアントです。 -クライアントはTypeScriptで書かれており、クライアントの公開APIの型定義を提供します。 - -依存関係はゼロで、最大のパフォーマンスを最適化しており、さまざまなClickHouseのバージョンや構成(オンプレミスの単一ノード、オンプレミスクラスター、ClickHouse Cloud)でテストされています。 - -異なる環境用に2つの異なるバージョンのクライアントが利用可能です: -- `@clickhouse/client` - Node.jsのみ -- `@clickhouse/client-web` - ブラウザ(Chrome/Firefox)、Cloudflareワーカー - -TypeScriptを使用する場合は、少なくとも [version 4.5](https://www.typescriptlang.org/docs/handbook/release-notes/typescript-4-5.html) が必要で、これにより [インラインインポートおよびエクスポート構文](https://www.typescriptlang.org/docs/handbook/release-notes/typescript-4-5.html#type-modifiers-on-import-names) が有効になります。 - -クライアントのソースコードは [ClickHouse-JS GitHubリポジトリ](https://github.com/ClickHouse/clickhouse-js) で入手できます。 - -## 環境要件 (Node.js) {#environment-requirements-nodejs} - -Node.jsは、クライアントを実行するために環境に利用可能である必要があります。 -クライアントは、すべての [メンテナンスされている](https://github.com/nodejs/release#readme) Node.jsリリースと互換性があります。 - -Node.jsのバージョンがEnd-Of-Lifeに近づくと、クライアントはそれへのサポートを終了します。これは過去のものと見なされ、安全ではないためです。 - -現在のNode.jsバージョンのサポート: - -| Node.jsバージョン | サポートされている? | -|------------------|------------------| -| 22.x | ✔ | -| 20.x | ✔ | -| 18.x | ✔ | -| 16.x | ベストエフォート | - -## 環境要件 (Web) {#environment-requirements-web} - -クライアントのWebバージョンは、最新のChrome/Firefoxブラウザで公式にテストされており、React/Vue/AngularアプリケーションやCloudflareワーカーの依存関係として使用できます。 - -## インストール {#installation} - -最新の安定したNode.jsクライアントバージョンをインストールするには、次のコマンドを実行します: - -```sh -npm i @clickhouse/client -``` - -Webバージョンのインストール: - -```sh -npm i @clickhouse/client-web -``` - -## ClickHouseとの互換性 {#compatibility-with-clickhouse} - -| クライアントバージョン | ClickHouse | -|------------------|------------| -| 1.8.0 | 23.3+ | - -クライアントは古いバージョンでも機能する可能性がありますが、これはベストエフォートのサポートであり、保証はされていません。もしClickHouseのバージョンが23.3よりも古い場合は、[ClickHouseのセキュリティポリシー](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md)を参照し、アップグレードを検討してください。 - -## 例 {#examples} - -当社は、クライアントの使用シナリオのさまざまなケースを [examples](https://github.com/ClickHouse/clickhouse-js/blob/main/examples) の中で取り上げることを目指しています。 - -概要は [examples README](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/README.md#overview) で入手できます。 - -もし例や以下の文書に不明点や不足があれば、自由に [ご連絡ください](./js.md#contact-us)。 - -### クライアントAPI {#client-api} - -明示的に異なると記載されていない限り、ほとんどの例はNode.jsおよびWebバージョンのクライアントの両方で互換性があります。 - -#### クライアントインスタンスの作成 {#creating-a-client-instance} - -必要に応じて、`createClient`ファクトリーを使ってクライアントインスタンスを作成できます: - -```ts -const client = createClient({ - /* configuration */ -}) -``` - -環境がESMモジュールをサポートしていない場合は、CJS構文を代わりに使用できます: - -```ts -const { createClient } = require('@clickhouse/client'); - -const client = createClient({ - /* configuration */ -}) -``` - -クライアントインスタンスは、インスタンス化時に [事前設定](./js.md#configuration) できます。 - -#### 設定 {#configuration} - -クライアントインスタンスを作成する際に、次の接続設定を調整できます: - -| 設定 | 説明 | デフォルト値 | 詳細情報 | -|----------------------------------------------------------------------|--------------------------------------------------------------------------------------|-------------------------|----------------------------------------------------------------------------------------------------------------------------| -| **url**?: string | ClickHouseインスタンスのURL。 | `http://localhost:8123` | [URL構成に関するドキュメント](./js.md#url-configuration) | -| **pathname**?: string | クライアントによって解析されたClickHouse URLに追加する任意のパス名。 | `''` | [パス名付きプロキシに関するドキュメント](./js.md#proxy-with-a-pathname) | -| **request_timeout**?: number | リクエストタイムアウト(ミリ秒単位)。 | `30_000` | - | -| **compression**?: `{ **response**?: boolean; **request**?: boolean }` | 圧縮を有効にします。 | - | [圧縮に関するドキュメント](./js.md#compression) | -| **username**?: string | リクエストを行うユーザーの名前。 | `default` | - | -| **password**?: string | ユーザーパスワード。 | `''` | - | -| **application**?: string | Node.jsクライアントを使用しているアプリケーションの名前。 | `clickhouse-js` | - | -| **database**?: string | 使用するデータベース名。 | `default` | - | -| **clickhouse_settings**?: ClickHouseSettings | すべてのリクエストに適用するClickHouseの設定。 | `{}` | - | -| **log**?: `{ **LoggerClass**?: Logger, **level**?: ClickHouseLogLevel }` | 内部クライアントログの設定。 | - | [ログに関するドキュメント](./js.md#logging-nodejs-only) | -| **session_id**?: string | 各リクエストに送信するオプションのClickHouseセッションID。 | - | - | -| **keep_alive**?: `{ **enabled**?: boolean }` | Node.jsとWebバージョンの両方でデフォルトで有効です。 | - | - | -| **http_headers**?: `Record` | ClickHouseリクエストのための追加のHTTPヘッダー。 | - | [認証付きリバースプロキシに関するドキュメント](./js.md#reverse-proxy-with-authentication) | -| **roles**?: string \| string[] | アウトゴーイングリクエストにアタッチするClickHouseのロール名。 | - | [HTTPインターフェースでのロールの使用](/interfaces/http#setting-role-with-query-parameters) | - -#### Node.js特有の設定パラメータ {#nodejs-specific-configuration-parameters} - -| 設定 | 説明 | デフォルト値 | 詳細情報 | -|------------------------------------------------------------------------|-----------------------------------------------------|-----------------|---------------------------------------------------------------------------------------------------| -| **max_open_connections**?: number | ホストごとに許可する接続ソケットの最大数。 | `10` | - | -| **tls**?: `{ **ca_cert**: Buffer, **cert**?: Buffer, **key**?: Buffer }` | TLS証明書の構成。 | - | [TLSに関するドキュメント](./js.md#tls-certificates-nodejs-only) | -| **keep_alive**?: `{ **enabled**?: boolean, **idle_socket_ttl**?: number }` | - | - | [Keep Aliveに関するドキュメント](./js.md#keep-alive-configuration-nodejs-only) | -| **http_agent**?: http.Agent \| https.Agent
    | クライアント用のカスタムHTTPエージェント。 | - | [HTTPエージェントに関するドキュメント](./js.md#custom-httphttps-agent-experimental-nodejs-only) | -| **set_basic_auth_header**?: boolean
    | ベーシック認証資格情報で`Authorization`ヘッダーを設定します。 | `true` | [HTTPエージェントドキュメントでのこの設定の使用](./js.md#custom-httphttps-agent-experimental-nodejs-only) | - -### URL構成 {#url-configuration} - -:::important -URL構成は、常にハードコーディングされた値をオーバーライドし、この場合には警告がログに記録されます。 -::: - -クライアントインスタンスのほとんどのパラメータをURLで構成することができます。URL形式は `http[s]://[username:password@]hostname:port[/database][?param1=value1¶m2=value2]` です。ほとんどのケースで、特定のパラメータの名前は、設定オプションインターフェース内のそのパスを反映していますが、いくつかの例外があります。サポートされるパラメータは以下の通りです: - -| パラメータ | 型 | -|--------------------------------------------|-----------------------------------------------| -| `pathname` | 任意の文字列。 | -| `application_id` | 任意の文字列。 | -| `session_id` | 任意の文字列。 | -| `request_timeout` | 非負の数。 | -| `max_open_connections` | 非負の数、ゼロより大きい。 | -| `compression_request` | ブール値。下記参照 (1) | -| `compression_response` | ブール値。 | -| `log_level` | 許可される値: `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`。 | -| `keep_alive_enabled` | ブール値。 | -| `clickhouse_setting_*` または `ch_*` | 下記参照 (2) | -| `http_header_*` | 下記参照 (3) | -| (Node.jsのみ) `keep_alive_idle_socket_ttl` | 非負の数。 | - -- (1) ブール値の場合、有効な値は `true`/`1` と `false`/`0` です。 -- (2) `clickhouse_setting_` または `ch_` で始まる任意のパラメータは、このプレフィックスが削除され、残りがクライアントの `clickhouse_settings` に追加されます。たとえば、 `?ch_async_insert=1&ch_wait_for_async_insert=1` は次のように同じになります: - -```ts -createClient({ - clickhouse_settings: { - async_insert: 1, - wait_for_async_insert: 1, - }, -}) -``` - -注意:`clickhouse_settings` のブール値は、URL内で `1`/`0` として渡す必要があります。 - -- (3) (2) と同様ですが、 `http_header` 構成用です。たとえば、 `?http_header_x-clickhouse-auth=foobar` は次のように相当します: - -```ts -createClient({ - http_headers: { - 'x-clickhouse-auth': 'foobar', - }, -}) -``` - -### 接続 {#connecting} -#### 接続詳細を収集する {#gather-your-connection-details} - - -#### 接続の概要 {#connection-overview} - -クライアントは、HTTP(S)プロトコルを介して接続を実装しています。RowBinaryのサポートは進行中であり、[関連の問題](https://github.com/ClickHouse/clickhouse-js/issues/216)を参照してください。 - -次の例は、ClickHouse Cloudに対する接続の設定方法を示しています。`url`(プロトコルとポートを含む)および `password` の値が環境変数を介して指定されていると仮定し、`default` ユーザーが使用されます。 - -**例:** 環境変数を使用してNode.jsクライアントインスタンスを作成します。 - -```ts -const client = createClient({ - url: process.env.CLICKHOUSE_HOST ?? 'http://localhost:8123', - username: process.env.CLICKHOUSE_USER ?? 'default', - password: process.env.CLICKHOUSE_PASSWORD ?? '', -}) -``` - -クライアントリポジトリには、環境変数を使用した複数の例が含まれています。たとえば、[ClickHouse Cloudでのテーブルの作成](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_cloud.ts)、[非同期挿入の使用](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/async_insert.ts)、その他多数があります。 - -#### 接続プール (Node.jsのみ) {#connection-pool-nodejs-only} - -毎回リクエストごとに接続を確立するオーバーヘッドを避けるために、クライアントはClickHouseへの接続のプールを作成し、再利用します。Keep-Aliveメカニズムを利用しています。デフォルトでは、Keep-Aliveは有効で、接続プールのサイズは `10` に設定されていますが、`max_open_connections` [設定オプション](./js.md#configuration) を使って変更できます。 - -プール内の同じ接続が後続のクエリに使用される保証はありませんが、ユーザーが `max_open_connections: 1` を設定した場合は、必要に応じて使用されることがあります。これは稀に必要ですが、ユーザーが一時テーブルを使用している場合には必要になることがあります。 - -さらに、[Keep-Aliveの構成](./js.md#keep-alive-configuration-nodejs-only)も参照してください。 - -### クエリID {#query-id} - -クエリやステートメントを送信するすべてのメソッド(`command`、`exec`、`insert`、`select`)は、結果に `query_id` を提供します。このユニーク識別子は、クエリごとにクライアントによって割り当てられ、`system.query_log` からデータを取得する際に役立つ可能性があります。これは、[サーバー設定](/operations/server-configuration-parameters/settings)で有効になっている場合、または長時間実行されているクエリをキャンセルする際に役立ちます([例を参照](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/cancel_query.ts))。必要に応じて、`query_id` は `command`、`query`、`exec`、`insert` メソッドのパラメータでユーザーによって上書きすることができます。 - -:::tip -`query_id` パラメータを上書きしている場合は、各呼び出しに対してその一意性を確保する必要があります。ランダムUUIDは良い選択です。 -::: - -### すべてのクライアントメソッドの基本パラメータ {#base-parameters-for-all-client-methods} - -すべてのクライアントメソッドに適用できるいくつかのパラメータがあります([query](./js.md#query-method)/[command](./js.md#command-method)/[insert](./js.md#insert-method)/[exec](./js.md#exec-method))。 - -```ts -interface BaseQueryParams { - // クエリレベルで適用できるClickHouse設定。 - clickhouse_settings?: ClickHouseSettings - // クエリバインディングのためのパラメータ。 - query_params?: Record - // 実行中のクエリをキャンセルするためのAbortSignalインスタンス。 - abort_signal?: AbortSignal - // query_idの上書き; 指定されていない場合、ランダム識別子が自動的に生成されます。 - query_id?: string - // session_idの上書き; 指定されていない場合、セッションIDはクライアント設定から取得します。 - session_id?: string - // credentialsの上書き; 指定されていない場合、クライアントの資格情報が使用されます。 - auth?: { username: string, password: string } - // このクエリに使用する特定のロールのリスト。クライアント設定で設定されたロールを上書きします。 - role?: string | Array -} -``` - -### クエリメソッド {#query-method} - -これは、`SELECT`などの応答を持つ可能性のあるほとんどのステートメントや、`CREATE TABLE`のようなDDLを送信するために使用され、待機する必要があります。戻り値の結果セットはアプリケーションで消費されることが期待されます。 - -:::note -データ挿入用には専用のメソッド [insert](./js.md#insert-method) があり、DDL用には [command](./js.md#command-method) があります。 -::: - -```ts -interface QueryParams extends BaseQueryParams { - // 実行するクエリ(データを返す可能性があります)。 - query: string - // 結果データセットのフォーマット。デフォルト: JSON。 - format?: DataFormat -} - -interface ClickHouseClient { - query(params: QueryParams): Promise -} -``` - -さらに情報: [すべてのクライアントメソッドの基本パラメータ](./js.md#base-parameters-for-all-client-methods)。 - -:::tip -`query` にはFORMAT句を指定しないでください。 `format` パラメータを使用してください。 -::: - -#### 結果セットと行の抽象化 {#result-set-and-row-abstractions} - -`ResultSet` は、アプリケーション内のデータ処理のためにいくつかの便利なメソッドを提供します。 - -Node.jsの `ResultSet` 実装は内部で `Stream.Readable` を使っていますが、WebバージョンはWeb APIの `ReadableStream` を使用しています。 - -`ResultSet` を消費するには、 `text` または `json` メソッドを呼び出して、クエリによって返されたすべての行のセットをメモリにロードできます。 - -`ResultSet` はできるだけ早く消費し始めるべきです。これはレスポンスストリームをオープンに保ち、結果として基礎となる接続をビジー状態にします。クライアントはアプリケーションが潜在的に過剰なメモリ使用量を避けるために、受信データをバッファリングしません。 - -一方、大きすぎて一度にメモリに収まらない場合は、 `stream` メソッドを呼び出し、ストリーミングモードでデータを処理できます。レスポンスチャンクのそれぞれは、各チャンクのサイズによって異なるおおよそ小さな行の配列に変換され、サーバーから受け取ります(一度に一つのチャンク)。チャンクサイズは特定のチャンク、個別の行のサイズに依存します。 - -ストリーミングに適したデータフォーマットのリストについては、[サポートされるデータフォーマット](./js.md#supported-data-formats)を参照して、あなたのケースに最適なフォーマットを決定してください。たとえば、JSONオブジェクトをストリーミングしたい場合は、[JSONEachRow](/sql-reference/formats#jsoneachrow)を選択すると、各行がJSオブジェクトとして解析されます。また、各行が値のコンパクトな配列になるよりコンパクトな[JSONCompactColumns](/sql-reference/formats#jsoncompactcolumns)フォーマットも選択できます。ストリーミングファイルも参照してください。[streaming files](./js.md#streaming-files-nodejs-only)。 - -:::important -`ResultSet` またはそのストリームが完全に消費されない場合、非活動期間の `request_timeout` の後に破棄されます。 -::: - -```ts -interface BaseResultSet { - // 上記の「クエリID」セクションを参照してください。 - query_id: string - - // ストリーム全体を消費し、内容を文字列として取得します。 - // これは任意のDataFormatで使用できます。 - // 一度だけ呼び出す必要があります。 - text(): Promise - - // ストリーム全体を消費し、内容をJSオブジェクトとして解析します。 - // JSONフォーマットでのみ使用できます。 - // 一度だけ呼び出す必要があります。 - json(): Promise - - // ストリーム可能なレスポンスのための読み取りストリームを返します。 - // ストリームの各反復は、選択したDataFormatの行の配列を提供します。 - // 一度だけ呼び出す必要があります。 - stream(): Stream -} - -interface Row { - // 行の内容をプレーンな文字列として取得します。 - text: string - - // 行の内容をJSオブジェクトとして解析します。 - json(): T -} -``` - -**例:** (Node.js/Web) `JSONEachRow`フォーマットでの結果データセットを持つクエリで、ストリーム全体を消費し、内容をJSオブジェクトとして解析する。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/array_json_each_row.ts)。 - -```ts -const resultSet = await client.query({ - query: 'SELECT * FROM my_table', - format: 'JSONEachRow', -}) -const dataset = await resultSet.json() // または `row.text` でJSONの解析を避ける -``` - -**例:** (Node.jsのみ) `JSONEachRow`フォーマットでのクエリ結果をストリーミングする、古典的な `on('data')` アプローチを使用。これは `for await const` 構文と交換可能です。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_json_each_row.ts)。 - -```ts -const rows = await client.query({ - query: 'SELECT number FROM system.numbers_mt LIMIT 5', - format: 'JSONEachRow', // または JSONCompactEachRow, JSONStringsEachRow など。 -}) -const stream = rows.stream() -stream.on('data', (rows: Row[]) => { - rows.forEach((row: Row) => { - console.log(row.json()) // または `row.text` でJSONの解析を避ける - }) -}) -await new Promise((resolve, reject) => { - stream.on('end', () => { - console.log('完了しました!') - resolve(0) - }) - stream.on('error', reject) -}) -``` - -**例:** (Node.jsのみ) `CSV`フォーマットでのクエリ結果をストリーミングする、古典的な `on('data')` アプローチを使用。これは `for await const` 構文と交換可能です。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_text_line_by_line.ts)。 - -```ts -const resultSet = await client.query({ - query: 'SELECT number FROM system.numbers_mt LIMIT 5', - format: 'CSV', // または TabSeparated, CustomSeparated など。 -}) -const stream = resultSet.stream() -stream.on('data', (rows: Row[]) => { - rows.forEach((row: Row) => { - console.log(row.text) - }) -}) -await new Promise((resolve, reject) => { - stream.on('end', () => { - console.log('完了しました!') - resolve(0) - }) - stream.on('error', reject) -}) -``` - -**例:** (Node.jsのみ) `JSONEachRow`フォーマットでJSオブジェクトとしてストリーミングクエリ結果を消費する、 `for await const` 構文を使用。これは古典的な `on('data')` アプローチと交換可能です。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_json_each_row_for_await.ts)。 - -```ts -const resultSet = await client.query({ - query: 'SELECT number FROM system.numbers LIMIT 10', - format: 'JSONEachRow', // または JSONCompactEachRow, JSONStringsEachRow など。 -}) -for await (const rows of resultSet.stream()) { - rows.forEach(row => { - console.log(row.json()) - }) -} -``` - -:::note -`for await const` 構文は、 `on('data')` アプローチよりもコードが少なくなりますが、パフォーマンスに悪影響を与える可能性があります。 -詳細は [Node.jsリポジトリのこの問題](https://github.com/nodejs/node/issues/31979) を参照してください。 -::: - -**例:** (Webのみ) オブジェクトの `ReadableStream` を反復処理します。 - -```ts -const resultSet = await client.query({ - query: 'SELECT * FROM system.numbers LIMIT 10', - format: 'JSONEachRow' -}) - -const reader = resultSet.stream().getReader() -while (true) { - const { done, value: rows } = await reader.read() - if (done) { break } - rows.forEach(row => { - console.log(row.json()) - }) -} -``` - -### 挿入メソッド {#insert-method} - -これはデータ挿入のための主要なメソッドです。 - -```ts -export interface InsertResult { - query_id: string - executed: boolean -} - -interface ClickHouseClient { - insert(params: InsertParams): Promise -} -``` - -戻り値の型は最小限です。サーバーからデータが返されないことを期待しており、レスポンスストリームは即座に排出されます。 - -挿入メソッドに空の配列が提供された場合、INSERT文はサーバーに送信されません。その代わり、メソッドは即座に `{ query_id: '...', executed: false }` で解決されます。この場合、メソッドのパラメータに `query_id` が指定されていなければ、結果の中で空の文字列になります。クライアントによって生成されたランダムUUIDを返すと、そんな `query_id` のクエリは `system.query_log` テーブルに存在しないため、混乱を避けるためです。 - -もし挿入文がサーバーに送信された場合、 `executed` フラグは `true` になります。 - -#### 挿入メソッドとNode.jsでのストリーミング {#insert-method-and-streaming-in-nodejs} - -これは、指定された [データフォーマット](./js.md#supported-data-formats) に応じて `Stream.Readable` またはプレーンな `Array` のいずれかとして動作することができます。ファイルストリーミングに関するこのセクションも参照してください。[file streaming](./js.md#streaming-files-nodejs-only)。 - -挿入メソッドは待機されるべきですが、入力ストリームを指定し、ストリームが完成したときに `insert` 操作を待機することも可能です(これにより、`insert` プロミスが解決されます)。これは、イベントリスナーや類似のシナリオで有用である可能性がありますが、エラー処理はクライアント側で多くのエッジケースがあるため、重要でない場合があります。その代わりに、[非同期挿入](/optimize/asynchronous-inserts)の使用を検討してください。これについては [この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/async_insert_without_waiting.ts)が示されています。 - -:::tip -挿入文がこのメソッドでモデル化するのが難しい場合は、[commandメソッド](./js.md#command-method)の使用を検討してください。 - -[INSERT INTO ... VALUES](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_values_and_functions.ts) や [INSERT INTO ... SELECT](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_from_select.ts) の例での使用方法も面白いと思います。 -::: - -```ts -interface InsertParams extends BaseQueryParams { - // データを挿入するテーブル名 - table: string - // 挿入するデータセット。 - values: ReadonlyArray | Stream.Readable - // 挿入するデータセットのフォーマット。 - format?: DataFormat - // データが挿入されるカラムを指定できます。 - // - `['a', 'b']`のような配列は、`INSERT INTO table (a, b) FORMAT DataFormat`を生成します。 - // - `{ except: ['a', 'b'] }`のようなオブジェクトは、`INSERT INTO table (* EXCEPT (a, b)) FORMAT DataFormat`を生成します。 - // デフォルトでは、すべてのカラムにデータが挿入され、生成されるステートメントは `INSERT INTO table FORMAT DataFormat` になります。 - columns?: NonEmptyArray | { except: NonEmptyArray } -} -``` - -さらに情報: [すべてのクライアントメソッドの基本パラメータ](./js.md#base-parameters-for-all-client-methods)。 - -:::important -`abort_signal` でキャンセルされたリクエストは、挿入が行われなかったことを保証するものではありません。サーバーはキャンセルの前にストリーミングされたデータの一部を受け取っている可能性があるためです。 -::: - -**例:** (Node.js/Web) 値の配列を挿入します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/array_json_each_row.ts)。 - -```ts -await client.insert({ - table: 'my_table', - // 構造は、今回の例において希望するフォーマットに一致する必要があります、JSONEachRow - values: [ - { id: 42, name: 'foo' }, - { id: 42, name: 'bar' }, - ], - format: 'JSONEachRow', -}) -``` - -**例:** (Node.jsのみ) CSVファイルからのストリームを挿入します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/insert_file_stream_csv.ts)。また、[ファイルストリーミング](./js.md#streaming-files-nodejs-only)も参照してください。 - -```ts -await client.insert({ - table: 'my_table', - values: fs.createReadStream('./path/to/a/file.csv'), - format: 'CSV', -}) -``` - -**例:** 挿入文から特定のカラムを除外します。 - -次のようなテーブル定義があるとします: - -```sql -CREATE OR REPLACE TABLE mytable -(id UInt32, message String) -ENGINE MergeTree() -ORDER BY (id) -``` - -特定のカラムのみを挿入します: - -```ts -// 生成されるステートメント: INSERT INTO mytable (message) FORMAT JSONEachRow -await client.insert({ - table: 'mytable', - values: [{ message: 'foo' }], - format: 'JSONEachRow', - // この行の `id` カラムの値は0になります(UInt32のデフォルト) - columns: ['message'], -}) -``` - -特定のカラムを除外します: - -```ts -// 生成されるステートメント: INSERT INTO mytable (* EXCEPT (message)) FORMAT JSONEachRow -await client.insert({ - table: tableName, - values: [{ id: 144 }], - format: 'JSONEachRow', - // この行の `message` カラムの値は空の文字列になります - columns: { - except: ['message'], - }, -}) -``` - -詳細については [ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_exclude_columns.ts) を参照してください。 - -**例:** クライアントインスタンスに提供されたデータベースとは異なるデータベースに挿入します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_into_different_db.ts)。 - -```ts -await client.insert({ - table: 'mydb.mytable', // データベースを含む完全修飾名 - values: [{ id: 42, message: 'foo' }], - format: 'JSONEachRow', -}) -``` -#### Webバージョンの制限 {#web-version-limitations} - -現在、`@clickhouse/client-web`での挿入は`Array`および`JSON*`形式のみがサポートされています。 -ストリームの挿入は、ブラウザの互換性が低いため、まだウェブバージョンではサポートされていません。 - -その結果、ウェブバージョンの`InsertParams`インターフェースはNode.jsバージョンとは少し異なり、`values`は`ReadonlyArray`型のみに制限されています: - -```ts -interface InsertParams extends BaseQueryParams { - // データを挿入するテーブル名 - table: string - // 挿入するデータセット - values: ReadonlyArray - // 挿入するデータセットの形式 - format?: DataFormat - // データを挿入するカラムを指定できます。 - // - `['a', 'b']`のような配列は次のように生成します: `INSERT INTO table (a, b) FORMAT DataFormat` - // - `{ except: ['a', 'b'] }`のようなオブジェクトは次のように生成します: `INSERT INTO table (* EXCEPT (a, b)) FORMAT DataFormat` - // デフォルトでは、データはテーブルのすべてのカラムに挿入され、 - // 生成されるステートメントは次のようになります: `INSERT INTO table FORMAT DataFormat`。 - columns?: NonEmptyArray | { except: NonEmptyArray } -} -``` - -これは将来的に変更される可能性があります。詳細については、[すべてのクライアントメソッドの基本パラメーター](./js.md#base-parameters-for-all-client-methods)を参照してください。 -### コマンドメソッド {#command-method} - -出力がないステートメントや、形式句が適用できない場合、またはレスポンスにまったく興味がない場合に使用できます。このようなステートメントの例として、`CREATE TABLE`や`ALTER TABLE`があります。 - -awaitが必要です。 - -レスポンスストリームは即座に破棄され、基盤となるソケットは解放されます。 - -```ts -interface CommandParams extends BaseQueryParams { - // 実行するステートメント - query: string -} - -interface CommandResult { - query_id: string -} - -interface ClickHouseClient { - command(params: CommandParams): Promise -} -``` - -詳細については、[すべてのクライアントメソッドの基本パラメーター](./js.md#base-parameters-for-all-client-methods)を参照してください。 - -**例:** (Node.js/Web) ClickHouse Cloudでテーブルを作成します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_cloud.ts)。 - -```ts -await client.command({ - query: ` - CREATE TABLE IF NOT EXISTS my_cloud_table - (id UInt64, name String) - ORDER BY (id) - `, - // レスポンスコードの後にクエリ処理エラーが発生した場合、クライアントにHTTPヘッダーがすでに送信されている事態を避けるために、クラスターの使用には推奨されます。 - // https://clickhouse.com/docs/interfaces/http/#response-buffering参照 - clickhouse_settings: { - wait_end_of_query: 1, - }, -}) -``` - -**例:** (Node.js/Web) セルフホストのClickHouseインスタンスでテーブルを作成します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_single_node.ts)。 - -```ts -await client.command({ - query: ` - CREATE TABLE IF NOT EXISTS my_table - (id UInt64, name String) - ENGINE MergeTree() - ORDER BY (id) - `, -}) -``` - -**例:** (Node.js/Web) INSERT FROM SELECT - -```ts -await client.command({ - query: `INSERT INTO my_table SELECT '42'`, -}) -``` - -:::important -`abort_signal`でキャンセルされたリクエストは、ステートメントがサーバーによって実行されなかったことを保証しません。 -::: -### Execメソッド {#exec-method} - -`query`/`insert`に適合しないカスタムクエリがあり、その結果に興味がある場合、`command`の代わりに`exec`を使用できます。 - -`exec`は、アプリケーション側で消費するか、破棄する必要があるリーダブルストリームを返します。 - -```ts -interface ExecParams extends BaseQueryParams { - // 実行するステートメント - query: string -} - -interface ClickHouseClient { - exec(params: ExecParams): Promise -} -``` - -詳細については、[すべてのクライアントメソッドの基本パラメーター](./js.md#base-parameters-for-all-client-methods)を参照してください。 - -ストリームの戻り値の型はNode.jsとWebバージョンで異なります。 - -Node.js: - -```ts -export interface QueryResult { - stream: Stream.Readable - query_id: string -} -``` - -Web: - -```ts -export interface QueryResult { - stream: ReadableStream - query_id: string -} -``` -### Ping {#ping} - -接続状態を確認するために提供される`ping`メソッドは、サーバーに到達可能であれば`true`を返します。 - -サーバーに到達できない場合、基盤となるエラーも結果に含まれます。 - -```ts -type PingResult = - | { success: true } - | { success: false; error: Error } - -interface ClickHouseClient { - ping(): Promise -} -``` - -Pingは、アプリケーションのスタート時にサーバーが利用可能かどうかを確認するのに役立つツールです。特にClickHouse Cloudでは、インスタンスがアイドル状態でping後に起動する可能性があります。 - -**例:** (Node.js/Web) ClickHouseサーバーインスタンスにpingを送信します。注意: Webバージョンでは、キャプチャされたエラーは異なります。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/ping.ts)。 - -```ts -const result = await client.ping(); -if (!result.success) { - // 結果エラーを処理する -} -``` - -注意: `/ping`エンドポイントはCORSを実装していないため、Webバージョンでは同様の結果を得るために`SELECT 1`を使用します。 -### Close (Node.jsのみ) {#close-nodejs-only} - -すべてのオープン接続を閉じ、リソースを解放します。Webバージョンでは効果がありません。 - -```ts -await client.close() -``` -## ストリーミングファイル (Node.jsのみ) {#streaming-files-nodejs-only} - -クライアントリポジトリには、一般的なデータ形式(NDJSON、CSV、Parquet)のいくつかのファイルストリーミングの例があります。 - -- [NDJSONファイルからストリーミング](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/insert_file_stream_ndjson.ts) -- [CSVファイルからストリーミング](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/insert_file_stream_csv.ts) -- [Parquetファイルからストリーミング](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/insert_file_stream_parquet.ts) -- [Parquetファイルにストリーミング](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_parquet_as_file.ts) - -他の形式をファイルにストリーミングするのはParquetと似たようなもので、唯一の違いは`query`呼び出しで使用される形式(`JSONEachRow`、`CSV`など)と出力ファイル名のみです。 -## サポートされているデータ形式 {#supported-data-formats} - -クライアントはデータ形式をJSONまたはテキストとして扱います。 - -`format`をJSONファミリーのいずれか(`JSONEachRow`、`JSONCompactEachRow`など)として指定すると、クライアントはワイヤー経由の通信中にデータをシリアライズおよびデシリアライズします。 - -「生」のテキスト形式(`CSV`、`TabSeparated`および`CustomSeparated`ファミリー)で提供されるデータは、追加の変換なしでワイヤーを介して送信されます。 - -:::tip -JSONを一般的な形式として扱うことと、[ClickHouse JSON形式](/sql-reference/formats#json)との間で混乱が生じる可能性があります。 - -クライアントは、[JSONEachRow](/sql-reference/formats#jsoneachrow)などの形式でストリーミングJSONオブジェクトをサポートしています(他のストリーミングフレンドリーな形式のテーブル概要も参照;クライアントリポジトリの`select_streaming_` [例も参照](https://github.com/ClickHouse/clickhouse-js/tree/main/examples/node))。 - -[ClickHouse JSON](/sql-reference/formats#json)やその他のいくつかの形式は、レスポンス内で単一オブジェクトとして表され、クライアントによってストリーミングできないことに注意してください。 -::: - -| フォーマット | 入力 (配列) | 入力 (オブジェクト) | 入力/出力 (ストリーム) | 出力 (JSON) | 出力 (テキスト) | -|--------------------------------------------|---------------|----------------|-----------------------|---------------|----------------| -| JSON | ❌ | ✔️ | ❌ | ✔️ | ✔️ | -| JSONCompact | ❌ | ✔️ | ❌ | ✔️ | ✔️ | -| JSONObjectEachRow | ❌ | ✔️ | ❌ | ✔️ | ✔️ | -| JSONColumnsWithMetadata | ❌ | ✔️ | ❌ | ✔️ | ✔️ | -| JSONStrings | ❌ | ❌️ | ❌ | ✔️ | ✔️ | -| JSONCompactStrings | ❌ | ❌ | ❌ | ✔️ | ✔️ | -| JSONEachRow | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONEachRowWithProgress | ❌️ | ❌ | ✔️ ❗- 詳細は以下を参照 | ✔️ | ✔️ | -| JSONStringsEachRow | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactEachRow | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactStringsEachRow | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactEachRowWithNames | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactEachRowWithNamesAndTypes | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactStringsEachRowWithNames | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| JSONCompactStringsEachRowWithNamesAndTypes | ✔️ | ❌ | ✔️ | ✔️ | ✔️ | -| CSV | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| CSVWithNames | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| CSVWithNamesAndTypes | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| TabSeparated | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| TabSeparatedRaw | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| TabSeparatedWithNames | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| TabSeparatedWithNamesAndTypes | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| CustomSeparated | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| CustomSeparatedWithNames | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| CustomSeparatedWithNamesAndTypes | ❌ | ❌ | ✔️ | ❌ | ✔️ | -| Parquet | ❌ | ❌ | ✔️ | ❌ | ✔️❗- 詳細は以下を参照 | - -Parquetでは、SELECTの主な使用ケースは、結果のストリームをファイルに書き込むことになるでしょう。クライアントリポジトリの[例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_parquet_as_file.ts)を参照してください。 - -`JSONEachRowWithProgress`は、ストリーム内で進行状況を報告することをサポートする出力専用形式です。詳細については[この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_json_each_row_with_progress.ts)を参照してください。 - -ClickHouseの入力および出力形式の完全なリストは [ここ](https://interfaces/formats)で入手できます。 -## サポートされているClickHouseデータ型 {#supported-clickhouse-data-types} - -:::note -関連するJS型は、すべての`JSON*`形式に関連していますが、すべてを文字列として表す形式(例:`JSONStringEachRow`)を除きます。 -::: - -| 型 | ステータス | JS型 | -|--------------------|-----------------|----------------------------| -| UInt8/16/32 | ✔️ | number | -| UInt64/128/256 | ✔️ ❗- 以下を参照 | string | -| Int8/16/32 | ✔️ | number | -| Int64/128/256 | ✔️ ❗- 以下を参照 | string | -| Float32/64 | ✔️ | number | -| Decimal | ✔️ ❗- 以下を参照 | number | -| Boolean | ✔️ | boolean | -| String | ✔️ | string | -| FixedString | ✔️ | string | -| UUID | ✔️ | string | -| Date32/64 | ✔️ | string | -| DateTime32/64 | ✔️ ❗- 以下を参照 | string | -| Enum | ✔️ | string | -| LowCardinality | ✔️ | string | -| Array(T) | ✔️ | T[] | -| (新) JSON | ✔️ | object | -| Variant(T1, T2...) | ✔️ | T (バリアントによって異なる) | -| Dynamic | ✔️ | T (バリアントによって異なる) | -| Nested | ✔️ | T[] | -| Tuple | ✔️ | Tuple | -| Nullable(T) | ✔️ | TまたはnullのJS型 | -| IPv4 | ✔️ | string | -| IPv6 | ✔️ | string | -| Point | ✔️ | [ number, number ] | -| Ring | ✔️ | Array<Point\> | -| Polygon | ✔️ | Array<Ring\> | -| MultiPolygon | ✔️ | Array<Polygon\> | -| Map(K, V) | ✔️ | Record<K, V\> | - -ClickHouseのサポートされている形式の完全なリストは [ここ](https://sql-reference/data-types/)で入手できます。 -### Date/Date32型の注意点 {#datedate32-types-caveats} - -クライアントは追加の型変換なしで値を挿入するため、`Date`/`Date32`型のカラムには文字列としてのみ挿入できます。 - -**例:** `Date`型の値を挿入します。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/ba387d7f4ce375a60982ac2d99cb47391cf76cec/__tests__/integration/date_time.test.ts)。 - -```ts -await client.insert({ - table: 'my_table', - values: [ { date: '2022-09-05' } ], - format: 'JSONEachRow', -}) -``` - -ただし、`DateTime`または`DateTime64`型のカラムを使用している場合、文字列とJS日付オブジェクトの両方を使用できます。JS日付オブジェクトは、そのまま`insert`に渡すことができ、`date_time_input_format`が`best_effort`に設定されています。詳細については[この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_js_dates.ts)を参照してください。 -### Decimal*型の注意点 {#decimal-types-caveats} - -`JSON*`ファミリー形式を使用してDecimalを挿入することが可能です。次のように定義されたテーブルがあるとします: - -```sql -CREATE TABLE my_table -( - id UInt32, - dec32 Decimal(9, 2), - dec64 Decimal(18, 3), - dec128 Decimal(38, 10), - dec256 Decimal(76, 20) -) -ENGINE MergeTree() -ORDER BY (id) -``` - -値を文字列表現を使用して精度損失なく挿入できます: - -```ts -await client.insert({ - table: 'my_table', - values: [{ - id: 1, - dec32: '1234567.89', - dec64: '123456789123456.789', - dec128: '1234567891234567891234567891.1234567891', - dec256: '12345678912345678912345678911234567891234567891234567891.12345678911234567891', - }], - format: 'JSONEachRow', -}) -``` - -ただし、`JSON*`形式でデータをクエリすると、ClickHouseはデフォルトでDecimalsを_数字_として返すため、精度が損なわれる可能性があります。これを避けるために、クエリでDecimalsを文字列にキャストできます: - -```ts -await client.query({ - query: ` - SELECT toString(dec32) AS decimal32, - toString(dec64) AS decimal64, - toString(dec128) AS decimal128, - toString(dec256) AS decimal256 - FROM my_table - `, - format: 'JSONEachRow', -}) -``` - -詳細については[この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_decimals.ts)を参照してください。 -### 整数型: Int64, Int128, Int256, UInt64, UInt128, UInt256 {#integral-types-int64-int128-int256-uint64-uint128-uint256} - -サーバーは数値として受け入れることができますが、`JSON*`ファミリー出力形式ではオーバーフローを避けるために文字列として返されます。これらの型の最大値は`Number.MAX_SAFE_INTEGER`よりも大きいためです。 - -ただし、この動作は[`output_format_json_quote_64bit_integers`設定](/operations/settings/formats#output_format_json_quote_64bit_integers)で変更できます。 - -**例:** 64ビット数のJSON出力形式を調整します。 - -```ts -const resultSet = await client.query({ - query: 'SELECT * from system.numbers LIMIT 1', - format: 'JSONEachRow', -}) - -expect(await resultSet.json()).toEqual([ { number: '0' } ]) -``` - -```ts -const resultSet = await client.query({ - query: 'SELECT * from system.numbers LIMIT 1', - format: 'JSONEachRow', - clickhouse_settings: { output_format_json_quote_64bit_integers: 0 }, -}) - -expect(await resultSet.json()).toEqual([ { number: 0 } ]) -``` -## ClickHouseの設定 {#clickhouse-settings} - -クライアントは[設定](/operations/settings/settings/)メカニズムを介してClickHouseの動作を調整できます。 -設定はクライアントインスタンスレベルで設定でき、すべてのリクエストに対して適用されます。 - -```ts -const client = createClient({ - clickhouse_settings: {} -}) -``` - -または、リクエストレベルで設定できます: - -```ts -client.query({ - clickhouse_settings: {} -}) -``` - -サポートされているClickHouse設定のすべての型宣言ファイルは [こちら](https://github.com/ClickHouse/clickhouse-js/blob/main/packages/client-common/src/settings.ts)で見つけることができます。 - -:::important -クエリが行われるユーザーが設定を変更するための十分な権限を持っていることを確認してください。 -::: -## 高度なトピック {#advanced-topics} -### パラメーター付きクエリ {#queries-with-parameters} - -パラメーター付きのクエリを作成し、クライアントアプリケーションからその値を渡すことができます。これにより、クライアント側で特定の動的値でクエリをフォーマットすることを避けることができます。 - -クエリを通常通りフォーマットし、アプリパラメーターからクエリに渡す値を波括弧内に以下の形式で置きます: - -```text -{: } -``` - -ここで: - -- `name` — プレースホルダー識別子。 -- `data_type` - アプリパラメーター値の[データ型](/sql-reference/data-types/)。 - -**例:** パラメーター付きクエリ。 -[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/query_with_parameter_binding.ts)。 - -```ts -await client.query({ - query: 'SELECT plus({val1: Int32}, {val2: Int32})', - format: 'CSV', - query_params: { - val1: 10, - val2: 20, - }, -}) -``` - -詳細については https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax を確認してください。 -### 圧縮 {#compression} - -注意: リクエストの圧縮は現在Webバージョンで利用できません。レスポンスの圧縮は通常通り機能します。Node.jsバージョンは両方をサポートしています。 - -大規模データセットをワイヤー経由で処理するアプリケーションは、圧縮を有効にすることで利点を得ることができます。現在、サポートされているのは`GZIP`のみで、[zlib](https://nodejs.org/docs/latest-v14.x/api/zlib.html)を使用します。 - -```typescript -createClient({ - compression: { - response: true, - request: true - } -}) -``` - -構成パラメーターは次の通りです: - -- `response: true`は、ClickHouseサーバーに圧縮されたレスポンスボディで応答するように指示します。デフォルト値: `response: false` -- `request: true`は、クライアントリクエストボディの圧縮を有効にします。デフォルト値: `request: false` -### ロギング (Node.jsのみ) {#logging-nodejs-only} - -:::important -ロギングは実験的機能であり、将来的に変更される可能性があります。 -::: - -デフォルトのロガー実装は、`stdout`に`console.debug/info/warn/error`メソッドを介してログレコードを出力します。 -`LoggerClass`を提供することでロギングロジックをカスタマイズでき、`level`パラメーター(デフォルトは`OFF`)を介して希望のログレベルを選択できます。 - -```typescript -// 3つのLogParams型がすべてクライアントによってエクスポートされています -interface LogParams { - module: string - message: string - args?: Record -} -type ErrorLogParams = LogParams & { err: Error } -type WarnLogParams = LogParams & { err?: Error } - -class MyLogger implements Logger { - trace({ module, message, args }: LogParams) { - // ... - } - debug({ module, message, args }: LogParams) { - // ... - } - info({ module, message, args }: LogParams) { - // ... - } - warn({ module, message, args }: WarnLogParams) { - // ... - } - error({ module, message, args, err }: ErrorLogParams) { - // ... - } -} - -const client = createClient({ - log: { - LoggerClass: MyLogger, - level: ClickHouseLogLevel - } -}) -``` - -現在、クライアントは以下のイベントをログに記録します: - -- `TRACE` - Keep-Aliveソケットのライフサイクルに関する低レベルの情報 -- `DEBUG` - レスポンス情報(認証ヘッダーとホスト情報は除く) -- `INFO` - 主に未使用で、クライアントが初期化されると現在のログレベルが表示されます -- `WARN` - 非致命的なエラー;pingリクエストの失敗が警告としてログに記録され、基盤となるエラーが返された結果に含まれます -- `ERROR` - `query`/`insert`/`exec`/`command`メソッドからの致命的なエラー、例えばリクエストの失敗など - -デフォルトのロガー実装は[こちら](https://github.com/ClickHouse/clickhouse-js/blob/main/packages/client-common/src/logger.ts)で見つけることができます。 -### TLS証明書 (Node.jsのみ) {#tls-certificates-nodejs-only} - -Node.jsクライアントは、オプションで基本(証明書機関のみ)および相互(証明書機関およびクライアント証明書)TLSをサポートします。 - -基本TLSの構成例。証明書が`certs`フォルダーにあり、CAファイル名が`CA.pem`であると仮定します: - -```ts -const client = createClient({ - url: 'https://:', - username: '', - password: '', // 必要に応じて - tls: { - ca_cert: fs.readFileSync('certs/CA.pem'), - }, -}) -``` - -クライアント証明書を使用した相互TLS構成の例: - -```ts -const client = createClient({ - url: 'https://:', - username: '', - tls: { - ca_cert: fs.readFileSync('certs/CA.pem'), - cert: fs.readFileSync(`certs/client.crt`), - key: fs.readFileSync(`certs/client.key`), - }, -}) -``` - -基本的なTLSと相互TLSの完全な例については[基本](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/basic_tls.ts)と[相互](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/mutual_tls.ts)をリポジトリで参照してください。 -### Keep-Aliveの構成 (Node.jsのみ) {#keep-alive-configuration-nodejs-only} - -クライアントはデフォルトで基盤となるHTTPエージェントのKeep-Aliveを有効にしているため、接続されたソケットはその後のリクエストに再利用され、`Connection: keep-alive`ヘッダーが送信されます。アイドル状態のソケットはデフォルトで2500ミリ秒接続プールに保持されます(このオプションの調整に関する[ノート](./js.md#adjusting-idle_socket_ttl)を参照)。 - -`keep_alive.idle_socket_ttl`はサーバー/LBの設定よりもかなり低い値にする必要があります。主な理由は、HTTP/1.1がサーバーにソケットをクライアントに通知せずに閉じることを許可するためです。サーバーまたはロードバランサーがクライアントの前に接続を閉じる場合、クライアントが閉じたソケットを再利用しようとして`socket hang up`エラーが発生する可能性があります。 - -`keep_alive.idle_socket_ttl`を変更する場合、サーバー/LBのKeep-Alive設定と常に同期させ、常にそれよりも**低く**設定して、サーバーがオープン接続を先に閉じないようにする必要があります。 -#### `idle_socket_ttl`の調整 {#adjusting-idle_socket_ttl} - -クライアントは`keep_alive.idle_socket_ttl`を2500ミリ秒に設定しています。これは安全なデフォルトとみなされます; サーバー側では、`keep_alive_timeout`がClickHouseのバージョン23.11以前で[3秒以下に設定されている場合があります](https://github.com/ClickHouse/ClickHouse/commit/1685cdcb89fe110b45497c7ff27ce73cc03e82d1)が、`config.xml`の変更なしで行われます。 - -:::warning -パフォーマンスに満足していて、問題が発生しない場合は、`keep_alive.idle_socket_ttl`設定の値を**増やさないこと**をお勧めします。この設定を増やすと、潜在的な「Socket hang-up」エラーが発生する可能性があります。さらに、アプリケーションが多くのクエリを送信し、クエリ間のダウンタイムがあまりない場合、デフォルト値は十分です。ソケットが長時間アイドル状態になることはなく、クライアントはそれらをプール内に保持します。 -::: - -サーバーレスポンスヘッダーで正しいKeep-Aliveタイムアウト値を確認するには、以下のコマンドを実行します。 - -```sh -curl -v --data-binary "SELECT 1" -``` - -レスポンスで`Connection`と`Keep-Alive`ヘッダーの値を確認してください。例えば: - -```text -< Connection: Keep-Alive -< Keep-Alive: timeout=10 -``` - -この場合、`keep_alive_timeout`は10秒であり、`keep_alive.idle_socket_ttl`を9000ミリ秒または9500ミリ秒に増加させて、アイドル状態のソケットをデフォルトよりも少し長く開いたままにすることができます。サーバーがクライアントよりも先に接続を閉じる場合に発生する可能性のある「Socket hang-up」エラーに注意し、エラーが消えるまで値を下げてください。 -#### Keep-Aliveのトラブルシューティング {#keep-alive-troubleshooting} - -Keep-Aliveを使用しているときに`socket hang up`エラーが発生する場合は、次のオプションでこの問題を解決できます: - -* ClickHouseサーバー設定で`keep_alive.idle_socket_ttl`設定をわずかに減らします。クライアントとサーバーの間に高いネットワーク遅延がある場合、サーバーが閉じようとしているソケットを取得した場合に発生する可能性があります。この場合、`keep_alive.idle_socket_ttl`を200〜500ミリ秒減らすことが有効な場合があります。 - -* このエラーが、データが出入りしないまま長時間実行されているクエリ(例えば、長時間の`INSERT FROM SELECT`)中に発生する場合、ロードバランサーがアイドル接続を閉じている可能性があります。この場合、長時間実行されるクエリの間にデータを強制的に送信することをお勧めします。これを次のClickHouse設定の組み合わせを使用して行うことができます: - - ```ts - const client = createClient({ - // ここでは、5分以上実行時間のあるクエリがあることを前提としています - request_timeout: 400_000, - /** これは、データが出入りしない長時間実行されるクエリ(たとえば、`INSERT FROM SELECT`など)の場合に - * LBタイムアウトの問題を回避するための設定です。LBがアイドル接続タイムアウトを120秒持っていると仮定し、 - * 110秒を「安全な」値として設定します。 */ - clickhouse_settings: { - send_progress_in_http_headers: 1, - http_headers_progress_interval_ms: '110000', // UInt64、文字列として渡す必要があります - }, - }) - ``` - ただし、最近のNode.jsバージョンで受信するヘッダーの合計サイズには制限があり、特定の進捗ヘッダーを受信後、約70〜80回のテストまで例外が発生します。 - - また、ワイヤ上の待機時間を完全に回避するまったく異なるアプローチを利用することも可能です。この機能により、接続が失われた場合に変異がキャンセルされることはありません。詳細については[この例(パート2)](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/long_running_queries_timeouts.ts)を参照してください。 - -* Keep-Alive機能を完全に無効にすることも可能です。この場合、クライアントはすべてのリクエストに`Connection: close`ヘッダーを追加し、基盤となるHTTPエージェントは接続を再利用しません。`keep_alive.idle_socket_ttl`設定は無視され、アイドル状態のソケットは存在しなくなります。これにより、すべてのリクエストに新しい接続を確立する追加のオーバーヘッドが発生します。 - - ```ts - const client = createClient({ - keep_alive: { - enabled: false, - }, - }) - ``` -### 読み取り専用ユーザー {#read-only-users} - -[readonly=1 ユーザー](/operations/settings/permissions-for-queries#readonly)を使用してクライアントを使用すると、レスポンス圧縮は有効にできません。これは、`enable_http_compression`設定が必要です。この構成はエラーになります: - -```ts -const client = createClient({ - compression: { - response: true, // readonly=1 ユーザーでは機能しません - }, -}) -``` - -詳細なreadonly=1ユーザーの制限事項については[この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/read_only_user.ts)を参照してください。 -### パス名を持つプロキシ {#proxy-with-a-pathname} - -ClickHouseインスタンスがプロキシの背後にあり、URLにパス名がある場合(例えば、http://proxy:8123/clickhouse_serverのように)、`clickhouse_server`を`pathname`構成オプションとして指定します(先頭スラッシュがあってもなくても可)。そうでなければ、`url`に直接指定すると、`database`オプションとみなされます。複数のセグメントがサポートされています。例:`/my_proxy/db`。 - -```ts -const client = createClient({ - url: 'http://proxy:8123', - pathname: '/clickhouse_server', -}) -``` -### 認証のあるリバースプロキシ {#reverse-proxy-with-authentication} - -ClickHouseデプロイの前に認証を持つリバースプロキシがある場合、`http_headers`設定を使用して、そこに必要なヘッダーを提供できます。 - -```ts -const client = createClient({ - http_headers: { - 'My-Auth-Header': '...', - }, -}) -``` -### カスタム HTTP/HTTPS エージェント (実験的, Node.js のみ) {#custom-httphttps-agent-experimental-nodejs-only} - -:::warning -これは実験的な機能であり、将来のリリースで後方互換性のない方法で変更される可能性があります。クライアントが提供するデフォルトの実装と設定は、ほとんどのユースケースに対して十分であるはずです。この機能は、本当に必要な場合にのみ使用してください。 -::: - -デフォルトでは、クライアントはクライアント設定(`max_open_connections`, `keep_alive.enabled`, `tls` など)で提供された設定を使用して、基礎となる HTTP(s) エージェントを構成し、ClickHouse サーバーへの接続を処理します。さらに、TLS 証明書が使用される場合、基礎となるエージェントは必要な証明書で構成され、正しい TLS 認証ヘッダーが適用されます。 - -1.2.0以降、カスタム HTTP(s) エージェントをクライアントに提供して、デフォルトの基礎エージェントを置き換えることが可能です。これは、複雑なネットワーク構成の場合に便利です。カスタムエージェントが提供された場合、次の条件が適用されます: -- `max_open_connections` および `tls` オプションは _無効_ となり、クライアントによって無視されます。これは基礎エージェントの構成の一部だからです。 -- `keep_alive.enabled` は `Connection` ヘッダーのデフォルト値だけを調整します(`true` -> `Connection: keep-alive`, `false` -> `Connection: close`)。 -- アイドルキープアライブソケット管理はまだ機能します(これはエージェントに結びついているのではなく、特定のソケットに結びついているため)が、`keep_alive.idle_socket_ttl` の値を `0` に設定することで、完全に無効にすることが可能になりました。 -#### カスタムエージェント使用例 {#custom-agent-usage-examples} - -証明書なしでカスタム HTTP(s) エージェントを使用する: - -```ts -const agent = new http.Agent({ // または https.Agent - keepAlive: true, - keepAliveMsecs: 2500, - maxSockets: 10, - maxFreeSockets: 10, -}) -const client = createClient({ - http_agent: agent, -}) -``` - -基本的な TLS と CA 証明書を使用したカスタム HTTPS エージェント: - -```ts -const agent = new https.Agent({ - keepAlive: true, - keepAliveMsecs: 2500, - maxSockets: 10, - maxFreeSockets: 10, - ca: fs.readFileSync('./ca.crt'), -}) -const client = createClient({ - url: 'https://myserver:8443', - http_agent: agent, - // カスタム HTTPS エージェントを使用すると、クライアントはデフォルトの HTTPS 接続実装を使用せず; ヘッダーは手動で提供する必要があります - http_headers: { - 'X-ClickHouse-User': 'username', - 'X-ClickHouse-Key': 'password', - }, - // 重要: 認証ヘッダーは TLS ヘッダーと競合するため、無効にします。 - set_basic_auth_header: false, -}) -``` - -相互 TLS を使用したカスタム HTTPS エージェント: - -```ts -const agent = new https.Agent({ - keepAlive: true, - keepAliveMsecs: 2500, - maxSockets: 10, - maxFreeSockets: 10, - ca: fs.readFileSync('./ca.crt'), - cert: fs.readFileSync('./client.crt'), - key: fs.readFileSync('./client.key'), -}) -const client = createClient({ - url: 'https://myserver:8443', - http_agent: agent, - // カスタム HTTPS エージェントを使用すると、クライアントはデフォルトの HTTPS 接続実装を使用せず; ヘッダーは手動で提供する必要があります - http_headers: { - 'X-ClickHouse-User': 'username', - 'X-ClickHouse-Key': 'password', - 'X-ClickHouse-SSL-Certificate-Auth': 'on', - }, - // 重要: 認証ヘッダーは TLS ヘッダーと競合するため、無効にします。 - set_basic_auth_header: false, -}) -``` - -証明書 _および_ カスタム _HTTPS_ エージェントを使用する場合、TLS ヘッダーと競合するため、デフォルトの認証ヘッダーを `set_basic_auth_header` 設定(1.2.0で導入)で無効にする必要があるでしょう。すべての TLS ヘッダーは手動で提供する必要があります。 -## 既知の制限 (Node.js/Web) {#known-limitations-nodejsweb} - -- 結果セットのデータマッパーはないため、言語のプリミティブのみが使用されます。特定のデータ型マッパーは、[RowBinary 形式のサポート](https://github.com/ClickHouse/clickhouse-js/issues/216)で計画されています。 -- 一部の [Decimal* と Date* / DateTime* データ型に関する注意事項](./js.md#datedate32-types-caveats) があります。 -- JSON* 系フォーマットを使用している場合、Int32 よりも大きい数は文字列として表現されます。これは、Int64+ 型の最大値が `Number.MAX_SAFE_INTEGER` より大きいためです。詳細は [整数型](./js.md#integral-types-int64-int128-int256-uint64-uint128-uint256) セクションをご覧ください。 -## 既知の制限 (Web) {#known-limitations-web} - -- SELECT クエリのストリーミングは機能しますが、INSERT では無効になっています(タイプレベルでも)。 -- リクエスト圧縮は無効で、構成は無視されます。レスポンス圧縮は機能します。 -- ロギングサポートはまだありません。 -## パフォーマンス最適化のためのヒント {#tips-for-performance-optimizations} - -- アプリケーションのメモリ消費を減らすためには、大きな INSERT (例えばファイルから) や SELECT の際にストリームを使用することを検討してください。イベントリスナーやそれに類するユースケースでは、[非同期 INSERT](/optimize/asynchronous-inserts) がもう一つの良い選択肢となり、クライアント側のバッチ処理を最小限に抑えるか、完全に回避することができます。非同期 INSERT の例は、[クライアントリポジトリ](https://github.com/ClickHouse/clickhouse-js/tree/main/examples) に、ファイル名のプレフィックスが `async_insert_` として提供されています。 -- クライアントはデフォルトでリクエストまたはレスポンス圧縮を有効にしません。ただし、大規模なデータセットを選択または挿入する際に、`ClickHouseClientConfigOptions.compression` を介して有効にすることを検討できます(リクエストまたはレスポンスのいずれか、または両方のために)。 -- 圧縮は重大なパフォーマンスペナルティを伴います。リクエストまたはレスポンスで圧縮を有効にすると、それぞれの SELECT または INSERT の速度に悪影響を与えるが、アプリケーションによって転送されるネットワークトラフィックの量を減少させます。 -## お問い合わせ {#contact-us} - -ご質問がある場合や支援が必要な場合は、[コミュニティ Slack](https://clickhouse.com/slack) (`#clickhouse-js` チャンネル)や [GitHub Issues](https://github.com/ClickHouse/clickhouse-js/issues) を通じてお気軽にお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md.hash deleted file mode 100644 index 7a6e3b441b5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md.hash +++ /dev/null @@ -1 +0,0 @@ -5f63d0f65cf1da0f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md deleted file mode 100644 index d942571595d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md +++ /dev/null @@ -1,904 +0,0 @@ ---- -sidebar_label: 'Python' -sidebar_position: 10 -keywords: -- 'clickhouse' -- 'python' -- 'client' -- 'connect' -- 'integrate' -slug: '/integrations/python' -description: 'The ClickHouse Connect project suite for connecting Python to ClickHouse' -title: 'Python Integration with ClickHouse Connect' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - - -# Python Integration with ClickHouse Connect -## Introduction {#introduction} - -ClickHouse Connectは、広範囲なPythonアプリケーションとの相互運用性を提供するコアデータベースドライバです。 - -- 主なインターフェースは、パッケージ `clickhouse_connect.driver` にある `Client` オブジェクトです。このコアパッケージには、ClickHouseサーバと通信するためのさまざまなヘルパークラスやユーティリティ関数、そして挿入および選択クエリの高度な管理のための「context」実装が含まれています。 -- `clickhouse_connect.datatypes` パッケージは、すべての非実験的なClickHouseデータ型の基本実装とサブクラスを提供します。その主な機能は、ClickHouseのデータをClickHouseの「ネイティブ」バイナリ列形式にシリアル化および逆シリアル化することです。この形式は、ClickHouseとクライアントアプリケーション間での効率的なデータ転送を実現します。 -- `clickhouse_connect.cdriver` パッケージのCython/Cクラスは、最も一般的なシリアル化および逆シリアル化を最適化し、純粋なPythonよりも大幅にパフォーマンスを向上させます。 -- パッケージ `clickhouse_connect.cc_sqlalchemy` には、`datatypes` および `dbi` パッケージに基づいて構築された制限付きの [SQLAlchemy](https://www.sqlalchemy.org/) ダイアレクトがあります。この制限された実装は、クエリ/カーソル機能に焦点を当てており、一般的にはSQLAlchemy DDLおよびORM操作をサポートしません。(SQLAlchemyはOLTPデータベースを対象としており、ClickHouseのOLAP指向データベースを管理するためには、より専門的なツールとフレームワークを推奨します。) -- コアドライバとClickHouse Connect SQLAlchemy実装は、ClickHouseをApache Supersetに接続するための推奨方法です。`ClickHouse Connect` データベース接続または `clickhousedb` SQLAlchemyダイアレクト接続文字列を使用してください。 - -このドキュメントは、ベータリリース0.8.2の時点において最新のものです。 - -:::note -公式のClickHouse Connect Pythonドライバは、ClickHouseサーバーとの通信にHTTPプロトコルを使用しています。これには、柔軟性の向上、HTTPバランサのサポート、JDBCベースのツールとの互換性向上などの利点があり、一方で、圧縮やパフォーマンスがわずかに低下することや、ネイティブTCPプロトコルの一部の複雑な機能がサポートされていないなどの欠点があります。一部のユースケースでは、ネイティブTCPプロトコルを使用する[Community Python drivers](/interfaces/third-party/client-libraries.md)の使用を検討することができます。 -::: -### Requirements and Compatibility {#requirements-and-compatibility} - -| Python | | Platform¹ | | ClickHouse | | SQLAlchemy² | | Apache Superset | | -|----------:|:--|----------------:|:--|-----------:|:---|------------:|:--|----------------:|:--| -| 2.x, <3.8 | ❌ | Linux (x86) | ✅ | <24.3³ | 🟡 | <1.3 | ❌ | <1.4 | ❌ | -| 3.8.x | ✅ | Linux (Aarch64) | ✅ | 24.3.x | ✅ | 1.3.x | ✅ | 1.4.x | ✅ | -| 3.9.x | ✅ | macOS (x86) | ✅ | 24.4-24.6³ | 🟡 | 1.4.x | ✅ | 1.5.x | ✅ | -| 3.10.x | ✅ | macOS (ARM) | ✅ | 24.7.x | ✅ | >=2.x | ❌ | 2.0.x | ✅ | -| 3.11.x | ✅ | Windows | ✅ | 24.8.x | ✅ | | | 2.1.x | ✅ | -| 3.12.x | ✅ | | | 24.9.x | ✅ | | | 3.0.x | ✅ | - -¹ClickHouse Connectは、リストされたプラットフォームに対して明示的にテストされています。さらに、優れた [`cibuildwheel`](https://cibuildwheel.readthedocs.io/en/stable/) プロジェクトに対して、すべてのアーキテクチャに対する未テストのバイナリホイール(C最適化)も構築されています。最後に、ClickHouse Connectは純粋なPythonとしても動作できるため、ソースインストールは最近のPythonインストールでも機能するはずです。 - -²SQLAlchemyのサポートは主にクエリ機能に限られています。完全なSQLAlchemy APIはサポートされていません。 - -³ClickHouse Connectは、現在サポートされているすべてのClickHouseバージョンに対してテストされています。HTTPプロトコルを使用しているため、ほとんどのその他のClickHouseバージョンでも正常に動作するはずですが、特定の高度なデータ型に対していくつかの不整合があるかもしれません。 -### Installation {#installation} - -PyPIからpipを使用してClickHouse Connectをインストールします: - -`pip install clickhouse-connect` - -ClickHouse Connectはソースからもインストールできます: -* [GitHubリポジトリ](https://github.com/ClickHouse/clickhouse-connect)を`git clone`します。 -* (オプション)C/Cythonの最適化をビルドして有効にするために、`pip install cython`を実行します。 -* プロジェクトのルートディレクトリに`cd`し、`pip install .`を実行します。 -### Support Policy {#support-policy} - -ClickHouse Connectは現在ベータ版であり、現在のベータリリースのみが積極的にサポートされています。問題を報告する前に、最新のバージョンに更新してください。問題は[GitHubプロジェクト](https://github.com/ClickHouse/clickhouse-connect/issues)に提出してください。ClickHouse Connectの将来のリリースは、リリース時点でアクティブにサポートされているClickHouseバージョンと互換性があることが保証されています(通常、最新の3つの `stable` と2つの最新の `lts` リリース)。 -### Basic Usage {#basic-usage} -### Gather your connection details {#gather-your-connection-details} - - -#### Establish a connection {#establish-a-connection} - -ClickHouseへの接続には2つの例が示されています: -- localhostでのClickHouseサーバーへの接続。 -- ClickHouse Cloudサービスへの接続。 -##### Use a ClickHouse Connect client instance to connect to a ClickHouse server on localhost: {#use-a-clickhouse-connect-client-instance-to-connect-to-a-clickhouse-server-on-localhost} - -```python -import clickhouse_connect - -client = clickhouse_connect.get_client(host='localhost', username='default', password='password') -``` -##### Use a ClickHouse Connect client instance to connect to a ClickHouse Cloud service: {#use-a-clickhouse-connect-client-instance-to-connect-to-a-clickhouse-cloud-service} - -:::tip -前に収集した接続詳細を使用してください。ClickHouse CloudサービスではTLSが必要なため、ポート8443を使用します。 -::: - -```python -import clickhouse_connect - -client = clickhouse_connect.get_client(host='HOSTNAME.clickhouse.cloud', port=8443, username='default', password='your password') -``` -#### Interact with your database {#interact-with-your-database} - -ClickHouse SQLコマンドを実行するには、クライアントの`command` メソッドを使用します: - -```python -client.command('CREATE TABLE new_table (key UInt32, value String, metric Float64) ENGINE MergeTree ORDER BY key') -``` - -バッチデータを挿入するには、クライアントの`insert` メソッドを使用して、行と値の二次元配列を指定します: - -```python -row1 = [1000, 'String Value 1000', 5.233] -row2 = [2000, 'String Value 2000', -107.04] -data = [row1, row2] -client.insert('new_table', data, column_names=['key', 'value', 'metric']) -``` - -ClickHouse SQLを使用してデータを取得するには、クライアントの`query` メソッドを使用します: - -```python -result = client.query('SELECT max(key), avg(metric) FROM new_table') -result.result_rows -Out[13]: [(2000, -50.9035)] -``` -## ClickHouse Connect Driver API {#clickhouse-connect-driver-api} - -***Note:*** ほとんどのAPIメソッドはオプション引数が多くあるため、キーワード引数を渡すことを推奨します。 - -*ここに文書化されていないメソッドはAPIの一部とは見なされず、削除または変更される可能性があります。* -### Client Initialization {#client-initialization} - -`clickhouse_connect.driver.client` クラスは、PythonアプリケーションとClickHouseデータベースサーバー間の主なインターフェースを提供します。`clickhouse_connect.get_client` 関数を使用してClientインスタンスを取得し、以下の引数を受け取ります: -#### Connection Arguments {#connection-arguments} - -| Parameter | Type | Default | Description | -|-----------------------|-------------|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| interface | str | http | httpまたはhttpsである必要があります。 | -| host | str | localhost | ClickHouseサーバーのホスト名またはIPアドレス。設定しない場合、`localhost` が使用されます。 | -| port | int | 8123または8443 | ClickHouseのHTTPまたはHTTPSポート。設定しない場合は8123にデフォルト設定され、*secure*=*True*または*interface*=*https*の場合は8443に設定されます。 | -| username | str | default | ClickHouseユーザー名。設定しない場合、`default`のClickHouseユーザーが使用されます。 | -| password | str | *<空文字列>* | *username* のパスワード。 | -| database | str | *None* | 接続のデフォルトデータベース。設定しない場合、ClickHouse Connectは*username* に対してデフォルトのデータベースを使用します。 | -| secure | bool | False | https/TLSを使用します。これにより、インターフェースまたはポート引数からの推測値が上書きされます。 | -| dsn | str | *None* | 標準DSN(データソース名)形式の文字列。この文字列から他の接続値(ホストやユーザーなど)が抽出されます。 | -| compress | bool or str | True | ClickHouseのHTTP挿入およびクエリ結果に対して圧縮を有効にします。[追加オプション(圧縮)](#compression)を参照してください。 | -| query_limit | int | 0(無制限) | 任意の`query`応答に対して返される最大行数。この値をゼロに設定すると無制限の行が返されます。大きなクエリ制限は、結果がストリームされない場合、すべての結果が一度にメモリに読み込まれるため、メモリエラーが発生する可能性があります。 | -| query_retries | int | 2 | `query`リクエストの最大再試行回数。再試行可能なHTTP応答のみが再試行されます。`command`または`insert`リクエストは、意図しない重複リクエストを防ぐために自動的には再試行されません。 | -| connect_timeout | int | 10 | HTTP接続のタイムアウト(秒)。 | -| send_receive_timeout | int | 300 | HTTP接続の送受信タイムアウト(秒)。 | -| client_name | str | *None* | HTTPユーザーエージェントヘッダーに付加されるclient_name。この設定を使用してClickHouseのsystem.query_logでクライアントクエリを追跡します。 | -| pool_mgr | obj | *<デフォルトのPoolManager>* | 使用する`urllib3`ライブラリのPoolManager。異なるホストに対する複数の接続プールが必要な高度なユースケース向け。 | -| http_proxy | str | *None* | HTTPプロキシアドレス(HTTP_PROXY環境変数を設定するのと同等)。 | -| https_proxy | str | *None* | HTTPSプロキシアドレス(HTTPS_PROXY環境変数を設定するのと同等)。 | -| apply_server_timezone | bool | True | タイムゾーンに対応したクエリ結果にサーバーのタイムゾーンを使用します。[タイムゾーンの優先順位](#time-zones)を参照してください。 | -#### HTTPS/TLS Arguments {#httpstls-arguments} - -| Parameter | Type | Default | Description | -|------------------|------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| verify | bool | True | HTTPS/TLSを使用している場合にClickHouseサーバーTLS/SSL証明書を検証します(ホスト名、有効期限など)。 | -| ca_cert | str | *None* | *verify*=*True*の場合、ClickHouseサーバー証明書を検証するための認証局のルートを含むファイルパス(.pem形式)。verifyがFalseの場合は無視されます。これが不要なのはClickHouseサーバー証明書がオペレーティングシステムによって確認されたグローバルに信頼されたルートである場合です。 | -| client_cert | str | *None* | TLSクライアント証明書のファイルパス(.pem形式、相互TLS認証用)。ファイルには、中間証明書を含む完全な証明書チェーンが含まれている必要があります。 | -| client_cert_key | str | *None* | クライアント証明書のプライベートキーのファイルパス。プライベートキーがクライアント証明書のキー・ファイルに含まれていない場合は必要です。 | -| server_host_name | str | *None* | TLS証明書のCNまたはSNIで識別されたClickHouseサーバーのホスト名。これを設定すると、異なるホスト名でプロキシやトンネルを介して接続する際のSSLエラーを回避できます。 | -| tls_mode | str | *None* | 高度なTLS動作を制御します。`proxy`および `strict` はClickHouseの相互TLS接続を呼び出しませんが、クライアント証明書とキーを送信します。`mutual`はClickHouse相互TLS認証をクライアント証明書で前提とします。 *None* /デフォルト動作は`mutual`です。 | -#### Settings Argument {#settings-argument} - -最後に、`get_client`への`settings`引数は、各クライアントリクエストのために追加のClickHouse設定をサーバーに渡すために使用されます。一般的に、*readonly*=*1* アクセスを持つユーザーはクエリと共に送信された設定を変更できないため、ClickHouse Connectはそのような設定を最終リクエストで削除し、警告をログに記録します。以下の設定は、ClickHouse Connectによって使用されるHTTPクエリ/セッションにのみ適用され、一般的なClickHouse設定として文書化されていません。 - -| Setting | Description | -|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| buffer_size | HTTPチャネルに書き込む前にClickHouseサーバーによって使用されるバッファサイズ(バイト)。 | -| session_id | サーバー上で関連するクエリを関連付けるための一意のセッションID。一時テーブルには必須です。 | -| compress | ClickHouseサーバーがPOSTレスポンスデータを圧縮するかどうか。これは「生」クエリに対してのみ使用する必要があります。 | -| decompress | ClickHouseサーバーに送信されたデータが逆シリアル化されるべきかどうか。この設定は「生」挿入に対してのみ使用する必要があります。 | -| quota_key | このリクエストに関連付けられているクォータキー。クォータに関するClickHouseサーバーのドキュメントを参照してください。 | -| session_check | セッションの状態を確認するために使用されます。 | -| session_timeout | セッションIDで識別されたセッションがタイムアウトし、もはや有効と見なされなくなるまでの非アクティブ状態の秒数。デフォルトは60秒です。 | -| wait_end_of_query | ClickHouseサーバー上の応答全体をバッファする。この設定はサマリー情報を返すために必要であり、非ストリーミングクエリに対して自動的に設定されます。 | - -各クエリと共に送信される他のClickHouse設定については、[ClickHouseのドキュメント](/operations/settings/settings.md)を参照してください。 -#### Client Creation Examples {#client-creation-examples} - -- 引数なしで、ClickHouse Connectクライアントは`localhost`のデフォルトHTTPポートにデフォルトユーザー(パスワードなし)で接続します: - -```python -import clickhouse_connect - -client = clickhouse_connect.get_client() -client.server_version -Out[2]: '22.10.1.98' -``` - -- セキュア(https)な外部ClickHouseサーバーへの接続 - -```python -import clickhouse_connect - -client = clickhouse_connect.get_client(host='play.clickhouse.com', secure=True, port=443, user='play', password='clickhouse') -client.command('SELECT timezone()') -Out[2]: 'Etc/UTC' -``` - -- セッションIDやその他のカスタム接続パラメータ、ClickHouse設定を使用した接続。 - -```python -import clickhouse_connect - -client = clickhouse_connect.get_client(host='play.clickhouse.com', - user='play', - password='clickhouse', - port=443, - session_id='example_session_1', - connect_timeout=15, - database='github', - settings={'distributed_ddl_task_timeout':300}) -client.database -Out[2]: 'github' -``` -### Common Method Arguments {#common-method-arguments} - -いくつかのクライアントメソッドは、共通の`parameters`および`settings`引数のいずれかまたは両方を使用します。これらのキーワード引数は以下に説明します。 -#### Parameters Argument {#parameters-argument} - -ClickHouse Connect Clientの`query*`および`command`メソッドは、ClickHouseの値式にPython式をバインドするためにオプションの`parameters`キーワード引数を受け取ります。バインディングには2種類あります。 -##### Server Side Binding {#server-side-binding} - -ClickHouseはほとんどのクエリ値に対して[サーバー側のバインディング](/interfaces/cli.md#cli-queries-with-parameters)をサポートしており、バインドされた値はクエリとは別にHTTPクエリパラメータとして送信されます。ClickHouse Connectは、`{<name>:<datatype>}`の形式のバインディング式を検出した場合、適切なクエリパラメータを追加します。サーバー側のバインディングでは、`parameters`引数はPythonの辞書である必要があります。 - -- Python辞書、DateTime値、文字列値を用いたサーバー側のバインディング - -```python -import datetime - -my_date = datetime.datetime(2022, 10, 1, 15, 20, 5) - -parameters = {'table': 'my_table', 'v1': my_date, 'v2': "a string with a single quote'"} -client.query('SELECT * FROM {table:Identifier} WHERE date >= {v1:DateTime} AND string ILIKE {v2:String}', parameters=parameters) - - -# サーバー上で以下のクエリが生成されます - -# SELECT * FROM my_table WHERE date >= '2022-10-01 15:20:05' AND string ILIKE 'a string with a single quote\'' -``` - -**重要** -- サーバー側のバインディングはClickHouseサーバーによって`SELECT`クエリのみにサポートされています。`ALTER`、`DELETE`、`INSERT`、または他の種類のクエリには機能しません。将来的に変更される可能性がありますので、https://github.com/ClickHouse/ClickHouse/issues/42092を参照してください。 -##### Client Side Binding {#client-side-binding} - -ClickHouse Connectはまた、クライアント側のパラメータバインディングもサポートし、テンプレート化されたSQLクエリ生成での柔軟性を高めることができます。クライアント側のバインディングでは、`parameters`引数は辞書またはシーケンスである必要があります。クライアント側のバインディングは、パラメータの置き換えにPythonの["printf"スタイル](https://docs.python.org/3/library/stdtypes.html#old-string-formatting)の文字列整形を使用します。 - -サーバー側のバインディングとは異なり、クライアント側のバインディングは、データベース識別子(データベース、テーブル、カラム名など)には機能しません。Python式整形は異なるタイプの文字列を区別できないため、これらは異なる形で整形する必要があります(データベース識別子にはバックティックまたは二重引用符、データ値には単一引用符を使用します)。 - -- Python辞書、DateTime値、および文字列エスケープの例 - -```python -import datetime - -my_date = datetime.datetime(2022, 10, 1, 15, 20, 5) - -parameters = {'v1': my_date, 'v2': "a string with a single quote'"} -client.query('SELECT * FROM some_table WHERE date >= %(v1)s AND string ILIKE %(v2)s', parameters=parameters) - - -# 以下のクエリが生成されます: - -# SELECT * FROM some_table WHERE date >= '2022-10-01 15:20:05' AND string ILIKE 'a string with a single quote\'' -``` - -- Pythonシーケンス(タプル)、Float64、およびIPv4アドレスの例 - -```python -import ipaddress - -parameters = (35200.44, ipaddress.IPv4Address(0x443d04fe)) -client.query('SELECT * FROM some_table WHERE metric >= %s AND ip_address = %s', parameters=parameters) - - -# 以下のクエリが生成されます: - -# SELECT * FROM some_table WHERE metric >= 35200.44 AND ip_address = '68.61.4.254'' -``` - -:::note -DateTime64引数をバインドする(サブ秒精度を持つClickHouseタイプ)には、次の2つのカスタムアプローチのいずれかが必要です: -- Pythonの`datetime.datetime`値を新しいDT64Paramクラスでラップする。 - ```python - query = 'SELECT {p1:DateTime64(3)}' # 辞書を使用したサーバー側のバインディング - parameters={'p1': DT64Param(dt_value)} - - query = 'SELECT %s as string, toDateTime64(%s,6) as dateTime' # リストを使用したクライアント側のバインディング - parameters=['a string', DT64Param(datetime.now())] - ``` - - パラメータ名の末尾に`_64`を付加して辞書のパラメータ値を使用する場合 - ```python - query = 'SELECT {p1:DateTime64(3)}, {a1:Array(DateTime(3))}' # 辞書を使用したサーバー側のバインディング - - parameters={'p1_64': dt_value, 'a1_64': [dt_value1, dt_value2]} - ``` -::: -#### Settings Argument {#settings-argument-1} - -すべての主要なClickHouse Connect Clientの「insert」と「select」メソッドは、含まれるSQLステートメントに対してClickHouseサーバーの[ユーザー設定](/operations/settings/settings.md)を渡すためのオプションの`settings`キーワード引数を受け取ります。`settings`引数は辞書でなければなりません。各アイテムはClickHouse設定名とその関連値でなければなりません。値は、クエリパラメータとしてサーバーに送信される際に文字列に変換されます。 - -クライアントレベルの設定と同様に、ClickHouse Connectはサーバーによって*readonly*=*1*とマークされた設定を削除し、関連するログメッセージを記録します。ClickHouse HTTPインターフェースを介ってのクエリにのみ適用される設定は常に有効です。これらの設定は、`get_client` [API](#settings-argument)の下で説明されています。 - -ClickHouse設定を使用する例: - -```python -settings = {'merge_tree_min_rows_for_concurrent_read': 65535, - 'session_id': 'session_1234', - 'use_skip_indexes': False} -client.query("SELECT event_type, sum(timeout) FROM event_errors WHERE event_time > '2022-08-01'", settings=settings) -``` -### Client _command_ Method {#client-_command_-method} - -`Client.command`メソッドを使用して、通常はデータを返さないか、単一のプリミティブまたは配列値を返すClickHouseサーバーにSQLクエリを送信します。このメソッドは以下のパラメータを取ります: - -| Parameter | Type | Default | Description | -|---------------|------------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| cmd | str | *必須* | 単一の値または単一行の値を返すClickHouse SQLステートメント。 | | -| parameters | dict or iterable | *None* | [parametersの説明](#parameters-argument)を参照してください。 | -| data | str or bytes | *None* | POSTボディとしてコマンドに含めるオプションデータ。 | -| settings | dict | *None* | [settingsの説明](#settings-argument)を参照してください。 | -| use_database | bool | True | クライアントデータベースを使用します(クライアント作成時に指定)。Falseの場合、接続ユーザーのためにデフォルトのClickHouseサーバーデータベースが使用されます。 | -| external_data | ExternalData | *None* | クエリで使用するファイルまたはバイナリデータを含むExternalDataオブジェクト。詳細は[高度なクエリ(外部データ)](#external-data)を参照してください。 | - -- `_command_`はDDL文にも使用できます。SQL "コマンド" がデータを返さない場合、"query summary" 辞書が代わりに返されます。この辞書は、ClickHouseのX-ClickHouse-SummaryおよびX-ClickHouse-Query-Idヘッダーをカプセル化しており、`written_rows`、`written_bytes`、`query_id`のキー/値ペアを含みます。 - -```python -client.command('CREATE TABLE test_command (col_1 String, col_2 DateTime) Engine MergeTree ORDER BY tuple()') -client.command('SHOW CREATE TABLE test_command') -Out[6]: 'CREATE TABLE default.test_command\\n(\\n `col_1` String,\\n `col_2` DateTime\\n)\\nENGINE = MergeTree\\nORDER BY tuple()\\nSETTINGS index_granularity = 8192' -``` - -- `_command_`は、単一行のみを返す簡単なクエリにも使用できます。 - -```python -result = client.command('SELECT count() FROM system.tables') -result -Out[7]: 110 -``` -### Client _query_ メソッド {#client-_query_-method} - -`Client.query` メソッドは、ClickHouse Server から単一の「バッチ」データセットを取得する主な方法です。このメソッドは、HTTP 経由でネイティブ ClickHouse フォーマットを利用して大規模なデータセット(約 100 万行まで)を効率的に送信します。このメソッドは次のパラメータを受け取ります。 - -| パラメータ | 型 | デフォルト | 説明 | -|---------------------|------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| query | str | *必須* | ClickHouse SQL SELECT または DESCRIBE クエリ。 | -| parameters | dict or iterable | *なし* | [パラメータの説明](#parameters-argument)を参照してください。 | -| settings | dict | *なし* | [設定の説明](#settings-argument)を参照してください。 | -| query_formats | dict | *なし* | 結果値のデータ型フォーマット仕様。詳細については、Advanced Usage (Read Formats) を参照してください。 | -| column_formats | dict | *なし* | カラムごとのデータ型フォーマット。詳細については、Advanced Usage (Read Formats) を参照してください。 | -| encoding | str | *なし* | ClickHouse String カラムを Python 文字列にエンコードするために使用されるエンコーディング。設定しない場合、Python は `UTF-8` をデフォルトで使用します。 | -| use_none | bool | True | ClickHouse の null に対して Python の *None* 型を使用します。False の場合、ClickHouse の null に対してデフォルトのデータ型(例えば 0)を使用します。NumPy/Pandas の場合、性能の理由からデフォルトは False に設定されています。 | -| column_oriented | bool | False | 結果を行のシーケンスではなく、カラムのシーケンスとして返します。Python データを他の列指向データ形式に変換するのに役立ちます。 | -| query_tz | str | *なし* | `zoneinfo` データベースのタイムゾーン名。このタイムゾーンは、クエリによって返されるすべての datetime または Pandas Timestamp オブジェクトに適用されます。 | -| column_tzs | dict | *なし* | カラム名とタイムゾーン名の辞書。`query_tz` のように、異なるカラムに異なるタイムゾーンを指定することができます。 | -| use_extended_dtypes | bool | True | Pandas の拡張データ型(例えば StringArray)や ClickHouse の NULL 値に対して pandas.NA および pandas.NaT を使用します。これは `query_df` および `query_df_stream` メソッドにのみ適用されます。 | -| external_data | ExternalData | *なし* | クエリで使用するファイルまたはバイナリデータを含む ExternalData オブジェクト。詳細については、[Advanced Queries (External Data)](#external-data)を参照してください。 | -| context | QueryContext | *なし* | 上記のメソッド引数をカプセル化するために使用できる再利用可能な QueryContext オブジェクト。[Advanced Queries (QueryContexts)](#querycontexts)を参照してください。 | - -#### QueryResult オブジェクト {#the-queryresult-object} - -基本の `query` メソッドは、次の公開プロパティを持つ QueryResult オブジェクトを返します。 - -- `result_rows` -- 行のシーケンスの形式で返されたデータのマトリクス。各行要素はカラム値のシーケンスです。 -- `result_columns` -- カラムのシーケンスの形式で返されたデータのマトリクス。各カラム要素は、そのカラムの行値のシーケンスです。 -- `column_names` -- `result_set` 内のカラム名を表す文字列のタプル。 -- `column_types` -- `result_columns` 内の各カラムに対する ClickHouse 型を表す ClickHouseType インスタンスのタプル。 -- `query_id` -- ClickHouseの query_id(`system.query_log` テーブルでクエリを調査するのに便利)。 -- `summary` -- `X-ClickHouse-Summary` HTTP レスポンスヘッダーによって返されたデータ。 -- `first_item` -- レスポンスの最初の行を辞書として取得するための便利なプロパティ(キーはカラム名です)。 -- `first_row` -- 結果の最初の行を返す便利なプロパティ。 -- `column_block_stream` -- カラム指向形式でのクエリ結果のジェネレーター。このプロパティは直接参照するべきではありません(下記参照)。 -- `row_block_stream` -- 行指向形式でのクエリ結果のジェネレーター。このプロパティは直接参照するべきではありません(下記参照)。 -- `rows_stream` -- 呼び出しごとに単一の行を生成するクエリ結果のジェネレーター。このプロパティは直接参照するべきではありません(下記参照)。 -- `summary` -- `command` メソッドの下で説明される ClickHouse によって返されたサマリー情報の辞書。 - -`*_stream` プロパティは、返されたデータのイテレータとして使用できる Python コンテキストを返します。これらは、Client の `*_stream` メソッドを通じて間接的にアクセスすべきです。 - -クエリ結果のストリーミングの完全な詳細(StreamContext オブジェクトを使用)は、[Advanced Queries (Streaming Queries)](#streaming-queries) に詳述されています。 - -### NumPy、Pandas または Arrow でのクエリ結果の消費 {#consuming-query-results-with-numpy-pandas-or-arrow} - -メインの `query` メソッドには、3つの特殊化バージョンがあります。 - -- `query_np` -- このバージョンは、ClickHouse Connect QueryResult の代わりに NumPy Array を返します。 -- `query_df` -- このバージョンは、ClickHouse Connect QueryResult の代わりに Pandas Dataframe を返します。 -- `query_arrow` -- このバージョンは、PyArrow テーブルを返します。ClickHouse の `Arrow` フォーマットを直接利用しているため、メインの `query` メソッドと共通して受け付ける引数は `query`、`parameters`、`settings` の3つです。さらに、Arrow テーブルが ClickHouse の String 型を文字列(True の場合)またはバイト(False の場合)としてレンダリングするかどうかを決定する追加の引数 `use_strings` があります。 - -### Client ストリーミングクエリメソッド {#client-streaming-query-methods} - -ClickHouse Connect Client は、データをストリームとして取得するための複数のメソッドを提供します(Python ジェネレーターとして実装されています)。 - -- `query_column_block_stream` -- ネイティブ Python オブジェクトを使用したカラムのシーケンスとしてクエリデータをブロックで返します。 -- `query_row_block_stream` -- ネイティブ Python オブジェクトを使用した行のブロックとしてクエリデータを返します。 -- `query_rows_stream` -- ネイティブ Python オブジェクトを使用した行のシーケンスとしてクエリデータを返します。 -- `query_np_stream` -- 各 ClickHouse ブロックのクエリデータを NumPy 配列として返します。 -- `query_df_stream` -- 各 ClickHouse ブロックのクエリデータを Pandas Dataframe として返します。 -- `query_arrow_stream` -- PyArrow RecordBlocks でクエリデータを返します。 - -これらの各メソッドは、ストリームの消費を開始するために `with` ステートメントを使用して開く必要のある `ContextStream` オブジェクトを返します。詳細と例については、[Advanced Queries (Streaming Queries)](#streaming-queries) を参照してください。 - -### Client _insert_ メソッド {#client-_insert_-method} - -ClickHouse に複数のレコードを挿入する一般的なユースケースのために `Client.insert` メソッドがあります。このメソッドは次のパラメータを受け取ります。 - -| パラメータ | 型 | デフォルト | 説明 | -|-------------------|-----------------------------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| table | str | *必須* | 挿入先の ClickHouse テーブル。完全なテーブル名(データベースを含む)を指定できます。 | -| data | Sequence of Sequences | *必須* | 挿入するデータのマトリクス。各行がカラム値のシーケンスである行のシーケンス、または各カラムが行値のシーケンスであるカラムのシーケンスのいずれかです。 | -| column_names | Sequence of str, or str | '*' | データマトリクスのカラム名のリスト。`'*'` を使用すると、ClickHouse Connect がテーブルのすべてのカラム名を取得する「プレクエリ」を実行します。 | -| database | str | '' | 挿入の対象データベース。指定しない場合、クライアントのデータベースが使用されます。 | -| column_types | Sequence of ClickHouseType | *なし* | ClickHouseType インスタンスのリスト。`column_types` または `column_type_names` のいずれも指定しない場合、ClickHouse Connect がテーブルのすべてのカラム型を取得する「プレクエリ」を実行します。 | -| column_type_names | Sequence of ClickHouse type names | *なし* | ClickHouse データ型名のリスト。`column_types` または `column_type_names` のいずれも指定しない場合、ClickHouse Connect がテーブルのすべてのカラム型を取得する「プレクエリ」を実行します。 | -| column_oriented | bool | False | True の場合、`data` 引数はカラムのシーケンスであると見なされ(データを挿入するために「ピボット」を行う必要はありません)、それ以外は `data` は行のシーケンスとして解釈されます。 | -| settings | dict | *なし* | [設定の説明](#settings-argument)を参照してください。 | -| insert_context | InsertContext | *なし* | 上記のメソッド引数をカプセル化するために使用できる再利用可能な InsertContext オブジェクト。[Advanced Inserts (InsertContexts)](#insertcontexts)を参照してください。 | - -このメソッドは、「クエリサマリー」辞書を返します。挿入が何らかの理由で失敗した場合、例外が発生します。 - -メインの `insert` メソッドの特殊化バージョンが2つあります。 - -- `insert_df` -- Python の Sequences of Sequences `data` 引数の代わりに、このメソッドの2番目のパラメータは Pandas Dataframe インスタンスである必要がある `df` 引数を要求します。ClickHouse Connect は Dataframe をカラム指向のデータソースとして自動的に処理するため、`column_oriented` パラメータは必要ありませんし、利用可能でもありません。 -- `insert_arrow` -- Python の Sequences of Sequences `data` 引数の代わりに、このメソッドは `arrow_table` を要求します。ClickHouse Connect は Arrow テーブルを変更せずに ClickHouse サーバーに渡して処理するため、`database` と `settings` 引数は `table` と `arrow_table` に加えて利用可能です。 - -*注意:* NumPy 配列は有効な Sequences of Sequences であり、メインの `insert` メソッドの `data` 引数として使用できるため、特殊なメソッドは必要ありません。 - -### ファイル挿入 {#file-inserts} - -`clickhouse_connect.driver.tools` には、ファイルシステムから既存の ClickHouse テーブルに直接データを挿入する `insert_file` メソッドが含まれています。解析は ClickHouse サーバーに委任されます。`insert_file` は次のパラメータを受け取ります。 - -| パラメータ | 型 | デフォルト | 説明 | -|--------------|----------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------| -| client | Client | *必須* | 挿入を実行するために使用する `driver.Client` | -| table | str | *必須* | 挿入先の ClickHouse テーブル。完全なテーブル名(データベースを含む)も指定できます。 | -| file_path | str | *必須* | データファイルへのネイティブファイルシステムパス | -| fmt | str | CSV, CSVWithNames | ファイルの ClickHouse 入力フォーマット。`column_names` が指定されていない場合は CSVWithNames が暗黙的に仮定されます。 | -| column_names | Sequence of str | *なし* | データファイル内のカラム名のリスト。カラム名を含むフォーマットには必要ありません。 | -| database | str | *なし* | テーブルのデータベース。テーブルが完全に指定されている場合は無視されます。指定されていない場合、挿入はクライアントデータベースを使用します。 | -| settings | dict | *なし* | [設定の説明](#settings-argument)を参照してください。 | -| compression | str | *なし* | Content-Encoding HTTP ヘッダーに使用される認識された ClickHouse 圧縮タイプ(zstd、lz4、gzip)。 | - -不一致なデータや異常なフォーマットの日付/時間の値を持つファイルの場合、データインポートに適用される設定(`input_format_allow_errors_num` や `input_format_allow_errors_num` など)がこのメソッドで認識されます。 - -```python -import clickhouse_connect -from clickhouse_connect.driver.tools import insert_file - -client = clickhouse_connect.get_client() -insert_file(client, 'example_table', 'my_data.csv', - settings={'input_format_allow_errors_ratio': .2, - 'input_format_allow_errors_num': 5}) -``` - -### クエリ結果をファイルとして保存する {#saving-query-results-as-files} - -クエリの結果を CSV ファイルに保存したい場合、`raw_stream` メソッドを使用して ClickHouse からローカルファイルシステムにファイルをストリーミングすることができます。例えば、以下のコードスニペットを使用できます。 - -```python -import clickhouse_connect - -if __name__ == '__main__': - client = clickhouse_connect.get_client() - query = 'SELECT number, toString(number) AS number_as_str FROM system.numbers LIMIT 5' - fmt = 'CSVWithNames' # または CSV、CSVWithNamesAndTypes、TabSeparated など。 - stream = client.raw_stream(query=query, fmt=fmt) - with open("output.csv", "wb") as f: - for chunk in stream: - f.write(chunk) -``` - -上記のコードは、次の内容を持つ `output.csv` ファイルを生成します。 - -```csv -"number","number_as_str" -0,"0" -1,"1" -2,"2" -3,"3" -4,"4" -``` - -同様に、[TabSeparated](/interfaces/formats#tabseparated) や他のフォーマットでデータを保存することもできます。利用可能なすべてのフォーマットオプションの概要については、[Input and Output Data のフォーマット](/interfaces/formats) を参照してください。 - -### Raw API {#raw-api} - -ClickHouse データとネイティブまたはサードパーティデータ型および構造の間での変換を必要としないユースケース向けに、ClickHouse Connect クライアントは ClickHouse 接続の直接使用のための 2 つのメソッドを提供します。 - -#### Client _raw_query_ メソッド {#client-_raw_query_-method} - -`Client.raw_query` メソッドは、クライアント接続を使用して ClickHouse HTTP クエリインターフェイスを直接使用することを可能にします。返り値は未処理の `bytes` オブジェクトです。パラメータバインディング、エラーハンドリング、リトライ、および設定管理を最小限のインターフェースで行うための便利なラッパーを提供します。 - -| パラメータ | 型 | デフォルト | 説明 | -|---------------|------------------|------------|------------------------------------------------------------------------------------------------------------------------------------------| -| query | str | *必須* | 有効な ClickHouse クエリ | -| parameters | dict or iterable | *なし* | [パラメータの説明](#parameters-argument)を参照してください。 | -| settings | dict | *なし* | [設定の説明](#settings-argument)を参照してください。 | | -| fmt | str | *なし* | 結果のバイト用の ClickHouse 出力フォーマット。(指定しない場合、ClickHouse は TSV を使用します) | -| use_database | bool | True | クエリコンテキストのために clickhouse-connect クライアントで割り当てられたデータベースを使用します。 | -| external_data | ExternalData | *なし* | クエリで使用するファイルまたはバイナリデータを含む ExternalData オブジェクト。詳細については、[Advanced Queries (External Data)](#external-data)を参照してください。 | - -結果の `bytes` オブジェクトの管理は呼び出し元の責任です。`Client.query_arrow` は、このメソッドをクリックハウスの `Arrow` 出力フォーマットを使用してラップしたものであることに注意してください。 - -#### Client _raw_stream_ メソッド {#client-_raw_stream_-method} - -`Client.raw_stream` メソッドは `raw_query` メソッドと同じ API を持っていますが、`bytes` オブジェクトのジェネレーター/ストリームソースとして使用できる `io.IOBase` オブジェクトを返します。これは現在 `query_arrow_stream` メソッドによって利用されています。 - -#### Client _raw_insert_ メソッド {#client-_raw_insert_-method} - -`Client.raw_insert` メソッドは、クライアント接続を使用して `bytes` オブジェクトまたは `bytes` オブジェクトジェネレーターを直接挿入できるようにします。挿入ペイロードを処理しないため、非常に高いパフォーマンスを実現しています。このメソッドは、設定と挿入形式を指定するためのオプションを提供します。 - -| パラメータ | 型 | デフォルト | 説明 | -|--------------|----------------------------------------|------------|---------------------------------------------------------------------------------------------------------------------------------| -| table | str | *必須* | シンプルまたはデータベース修飾されたテーブル名 | -| column_names | Sequence[str] | *なし* | 挿入ブロックのためのカラム名。`fmt` パラメータに名前が含まれていない場合、必須です | -| insert_block | str, bytes, Generator[bytes], BinaryIO | *必須* | 挿入するデータ。文字列はクライアントエンコーディングでエンコードされます。 | -| settings | dict | *なし* | [設定の説明](#settings-argument)を参照してください。 | | -| fmt | str | *なし* | `insert_block` バイトの ClickHouse 入力フォーマット。(指定しない場合、ClickHouse は TSV を使用します) | - -`insert_block` が指定されたフォーマットと圧縮方式を使用していることは、呼び出し元の責任です。ClickHouse Connect はファイルアップロードや PyArrow テーブルのためにこれらの生挿入を使用し、解析は ClickHouse サーバーに委任しています。 - -### ユーティリティクラスと関数 {#utility-classes-and-functions} - -次のクラスと関数も「公開」`clickhouse-connect` APIの一部と見なされ、上記に文書化されたクラスとメソッドと同様にマイナーリリース間で安定しています。これらのクラスと関数への破壊的変更は、マイナーリリース(パッチリリースではない)でのみ発生し、少なくとも 1 回のマイナーリリースで非推奨の状態で利用可能です。 - -#### 例外 {#exceptions} - -すべてのカスタム例外(DB API 2.0 仕様で定義されたものを含む)は、`clickhouse_connect.driver.exceptions` モジュールで定義されています。実際にドライバーによって検出された例外は、これらの型のいずれかを使用します。 - -#### Clickhouse SQL ユーティリティ {#clickhouse-sql-utilities} - -`clickhouse_connect.driver.binding` モジュール内の関数および DT64Param クラスを使用して、ClickHouse SQL クエリを適切に構築しエスケープできます。同様に、`clickhouse_connect.driver.parser` モジュール内の関数を使用して ClickHouse データ型名を解析できます。 - -### マルチスレッド、マルチプロセス、非同期/イベント駆動型ユースケース {#multithreaded-multiprocess-and-asyncevent-driven-use-cases} - -ClickHouse Connect はマルチスレッド、マルチプロセス、イベントループ駆動型/非同期アプリケーションでうまく機能します。すべてのクエリおよび挿入処理は単一のスレッド内で行われるため、オペレーションは一般的にスレッドセーフです。 (低レベルでの一部のオペレーションの並列処理は、単一スレッドのパフォーマンスペナルティを克服するための将来の改善の可能性がありますが、その場合でもスレッドの安全性は維持されます。) - -各クエリまたは挿入がそれぞれ独自の QueryContext または InsertContext オブジェクト内に状態を維持するため、これらのヘルパーオブジェクトはスレッドセーフではなく、複数の処理ストリーム間で共有するべきではありません。コンテキストオブジェクトに関する追加の議論は、次のセクションで行います。 - -さらに、同時に「進行中」の2つ以上のクエリおよび/または挿入があるアプリケーションの場合、以下の2つの考慮事項があります。最初は、クエリ/挿入に関連付けられた ClickHouse「セッション」であり、2つ目は、ClickHouse Connect Client インスタンスによって使用される HTTP 接続プールです。 - -### AsyncClient ラッパー {#asyncclient-wrapper} - -0.7.16 以降、ClickHouse Connect は通常の `Client` の非同期ラッパーを提供し、`asyncio` 環境でクライアントを使用できるようにしました。 - -`AsyncClient` のインスタンスを取得するには、標準の `get_client` と同じパラメータを受け付ける `get_async_client` ファクトリ関数を使用できます。 - -```python -import asyncio - -import clickhouse_connect - - -async def main(): - client = await clickhouse_connect.get_async_client() - result = await client.query("SELECT name FROM system.databases LIMIT 1") - print(result.result_rows) - - -asyncio.run(main()) -``` - -`AsyncClient` は、標準の `Client` と同じメソッドとパラメータを持っていますが、適用可能な場合はコルーチンです。内部的には、I/O 操作を実行する `Client` のこれらのメソッドは、[run_in_executor](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) 呼び出しでラップされています。 - -I/O 操作が完了するのを待っている間に実行スレッドと GIL が解放されるため、`AsyncClient` ラッパーを使用することでマルチスレッドパフォーマンスが向上します。 - -注意: 通常の `Client` とは異なり、`AsyncClient` はデフォルトで `autogenerate_session_id` を `False` に設定します。 - -詳しくは: [run_async の例](https://github.com/ClickHouse/clickhouse-connect/blob/main/examples/run_async.py)を参照してください。 - -### ClickHouse セッション ID の管理 {#managing-clickhouse-session-ids} - -各 ClickHouse クエリは ClickHouse の「セッション」のコンテキスト内で発生します。セッションは現在、2つの目的に使用されています。 - -- 複数のクエリに特定の ClickHouse 設定を関連付けるため([ユーザー設定](/operations/settings/settings.md)を参照)。ClickHouse の `SET` コマンドを使用して、ユーザーセッションのスコープに設定を変更できます。 -- [一時テーブル](/sql-reference/statements/create/table#temporary-tables)を追跡します。 - -デフォルトでは、ClickHouse Connect クライアントインスタンスで実行される各クエリは、セッション機能を有効にするために同じセッション ID を使用します。つまり、`SET` 文や一時テーブルは、単一の ClickHouse クライアントを使用している場合に予想どおりに機能します。ただし、設計上、ClickHouse サーバーは同じセッション内での同時クエリを許可していません。この結果、同時にクエリを実行する ClickHouse Connect アプリケーションには 2 つのオプションがあります。 - -- 各実行スレッド(スレッド、プロセス、またはイベントハンドラー)ごとに別の `Client` インスタンスを作成し、それぞれが独自のセッション ID を持つようにします。これが一般に最良のアプローチであり、各クライアントのセッション状態を保持します。 -- 各クエリにユニークなセッション ID を使用します。これは、一時テーブルや共有セッション設定が必要ない状況で同時セッションの問題を回避します。(共有設定はクライアント作成時に提供することもできますが、これらは各リクエストと共に送信され、セッションに関連付けられません)。ユニークな session_id は、各リクエストの `settings` 辞書に追加できるか、`autogenerate_session_id` 共通設定を無効にすることができます。 - -```python -from clickhouse_connect import common - -common.set_setting('autogenerate_session_id', False) # クライアントを作成する前に常に設定してください -client = clickhouse_connect.get_client(host='somehost.com', user='dbuser', password=1234) -``` - -この場合、ClickHouse Connect はセッション ID を送信せず、ClickHouse サーバーによってランダムなセッション ID が生成されます。再度、一時テーブルとセッションレベル設定は利用できません。 - -### HTTP 接続プールのカスタマイズ {#customizing-the-http-connection-pool} - -ClickHouse Connect は、サーバーへの基礎となる HTTP 接続を処理するために `urllib3` 接続プールを使用します。デフォルトでは、すべてのクライアントインスタンスは同じ接続プールを共有し、これはほとんどのユースケースに十分です。このデフォルトプールは、アプリケーションによって使用される各 ClickHouse サーバーに最大 8 の HTTP Keep Alive 接続を維持します。 - -大規模なマルチスレッドアプリケーションには、別々の接続プールが適切な場合があります。カスタマイズされた接続プールは、メインの `clickhouse_connect.get_client` 関数の `pool_mgr` キーワード引数として提供できます。 - -```python -import clickhouse_connect -from clickhouse_connect.driver import httputil - -big_pool_mgr = httputil.get_pool_manager(maxsize=16, num_pools=12) - -client1 = clickhouse_connect.get_client(pool_mgr=big_pool_mgr) -client2 = clickhouse_connect.get_client(pool_mgr=big_pool_mgr) -``` - -上記の例のように、クライアントはプールマネージャを共有することも、各クライアントのために別々のプールマネージャを作成することもできます。プールマネージャの作成時に利用可能なオプションの詳細については、[`urllib3` ドキュメント](https://urllib3.readthedocs.io/en/stable/advanced-usage.html#customizing-pool-behavior)を参照してください。 - -## ClickHouse Connect でのデータクエリ: 高度な使用法 {#querying-data-with-clickhouse-connect--advanced-usage} - -### QueryContexts {#querycontexts} - -ClickHouse Connect は標準クエリを QueryContext 内で実行します。QueryContext には、ClickHouse データベースに対してクエリを構築するために使用される主要な構造体と、クエリ結果を QueryResult またはその他の応答データ構造に変換するために使用される設定が含まれています。これには、クエリ自体、パラメータ、設定、読み取りフォーマット、およびその他の属性が含まれます。 - -QueryContext は、クライアントの `create_query_context` メソッドを使用して取得できます。このメソッドは、コアクエリメソッドと同じパラメータを受け取ります。このクエリコンテキストは、他のすべての引数の代わりに`context` キーワード引数として `query`、`query_df`、または `query_np` メソッドに渡すことができます。メソッド呼び出しに指定された追加の引数は、QueryContext のプロパティをオーバーライドします。 - -QueryContext の明確なユースケースは、異なるバインディングパラメータ値で同じクエリを送信することです。すべてのパラメータ値は、辞書を持って `QueryContext.set_parameters` メソッドを呼び出すことで更新できます。または、`key` と `value` のペアで `QueryContext.set_parameter` を呼び出すことで、単一の値を更新できます。 - -```python -client.create_query_context(query='SELECT value1, value2 FROM data_table WHERE key = {k:Int32}', - parameters={'k': 2}, - column_oriented=True) -result = client.query(context=qc) -assert result.result_set[1][0] == 'second_value2' -qc.set_parameter('k', 1) -result = test_client.query(context=qc) -assert result.result_set[1][0] == 'first_value2' -``` - -QueryContexts はスレッドセーフではないことに注意してください。ただし、`QueryContext.updated_copy` メソッドを呼び出すことで、マルチスレッド環境でコピーを取得できます。 - -### ストリーミングクエリ {#streaming-queries} -#### Data Blocks {#data-blocks} -ClickHouse Connect は、すべてのデータを ClickHouse サーバーから受け取るブロックのストリームとして、主な `query` メソッドから処理します。これらのブロックは、ClickHouse との間でカスタム「ネイティブ」形式で送信されます。「ブロック」とは、バイナリデータのカラムのシーケンスに過ぎず、各カラムには指定されたデータ型のデータ値が同数含まれています。(列指向データベースである ClickHouse は、このデータを同様の形で保存します。)クエリから返されるブロックのサイズは、いくつかのレベル(ユーザープロファイル、ユーザー、セッション、またはクエリ)で設定できる 2 つのユーザー設定によって制御されます。それらは次のとおりです: - -- [max_block_size](/operations/settings/settings#max_block_size) -- 行数におけるブロックサイズの制限。デフォルトは 65536。 -- [preferred_block_size_bytes](/operations/settings/settings#preferred_block_size_bytes) -- バイト単位のソフトリミット。デフォルトは 1,000,0000。 - -`preferred_block_size_setting` に関係なく、各ブロックは `max_block_size` 行を超えることはありません。クエリのタイプに応じて、実際に返されるブロックは任意のサイズになる可能性があります。例えば、多くのシャードをカバーする分散テーブルへのクエリは、各シャードから直接取得された小さなブロックを含む場合があります。 - -Client `query_*_stream` メソッドのいずれかを使用するとき、結果はブロックごとに返されます。ClickHouse Connect は一度に1つのブロックだけを読み込みます。これにより、大きな結果セットのすべてをメモリに読み込むことなく、大量のデータを処理できます。アプリケーションは、任意の数のブロックを処理する準備が必要であり、各ブロックの正確なサイズを制御することはできません。 - -#### HTTP Data Buffer for Slow Processing {#http-data-buffer-for-slow-processing} - -HTTP プロトコルの制限により、ブロックが ClickHouse サーバーがデータをストリーミングしている速度よりもかなり遅い速度で処理されると、ClickHouse サーバーは接続を閉じ、結果として処理スレッドで例外がスローされます。これの一部は、一般的な `http_buffer_size` 設定を使用して、HTTP ストリーミングバッファのバッファサイズを増やすことで軽減できます(デフォルトは 10 メガバイト)。十分なメモリがアプリケーションに割り当てられている場合、大きな `http_buffer_size` 値は問題ありません。バッファ内のデータは、`lz4` または `zstd` 圧縮を使用して圧縮されているため、これらの圧縮形式を使用することで、全体のバッファが増加します。 - -#### StreamContexts {#streamcontexts} - -各 `query_*_stream` メソッド(例えば `query_row_block_stream`)は、ClickHouse の `StreamContext` オブジェクトを返します。これは、Python のコンテキスト/ジェネレーターを組み合わせたものです。基本的な使用法は次の通りです: - -```python -with client.query_row_block_stream('SELECT pickup, dropoff, pickup_longitude, pickup_latitude FROM taxi_trips') as stream: - for block in stream: - for row in block: - <データに対して何かをする> -``` - -`with` ステートメントなしで StreamContext を使用しようとするとエラーが発生することに注意してください。Python コンテキストの使用は、ストリーム(この場合、ストリーミング HTTP レスポンス)が完全に消費されない場合や処理中に例外が発生する場合でも、適切に閉じられることを保証します。また、StreamContexts は、ストリームを消費するために一度だけ使用することができます。Exit した後に StreamContext を使用しようとすると `StreamClosedError` が発生します。 - -StreamContext の `source` プロパティを使用すると、カラム名と型を含む親 `QueryResult` オブジェクトにアクセスできます。 - -#### Stream Types {#stream-types} - -`query_column_block_stream` メソッドは、ブロックをネイティブ Python データ型として保存されたカラムデータのシーケンスとして返します。上記の `taxi_trips` クエリを使用すると、返されるデータはリストで、それぞれのリストの要素は関連するカラムのすべてのデータを含む別のリスト(またはタプル)になります。したがって、`block[0]` は文字列のみを含むタプルになります。カラム指向の形式は、カラム内のすべての値に対して集計操作を行うために最もよく使用されます。 - -`query_row_block_stream` メソッドは、従来のリレーショナルデータベースのように、行のシーケンスとしてブロックを返します。タクシーの旅行の場合、返されるデータはリストであり、それぞれのリストの要素はデータの行を表す別のリストになります。したがって、`block[0]` には最初のタクシー旅行のすべてのフィールド(順番通り)が含まれ、`block[1]` には2番目のタクシー旅行のすべてのフィールドの行が含まれます。行指向の結果は通常、表示または変換プロセスに使用されます。 - -`query_row_stream` は、ストリームを反復処理する際に自動的に次のブロックに移動する便利なメソッドです。それ以外は、`query_row_block_stream` と同じです。 - -`query_np_stream` メソッドは、各ブロックを二次元 NumPy 配列として返します。内部的に NumPy 配列は(通常)カラムとして保存されるため、明示的な行またはカラムメソッドは必要ありません。NumPy 配列の「形状」は (カラム, 行) として表現されます。NumPy ライブラリは、NumPy 配列を操作するための多くのメソッドを提供します。すべてのカラムが同じ NumPy dtype を共有している場合、返された NumPy 配列も単一の dtype となり、内部構造を変更することなく再形成/回転が可能です。 - -`query_df_stream` メソッドは、各 ClickHouse ブロックを二次元 Pandas Dataframe として返します。次の例は、StreamContext オブジェクトが遅延的にコンテキストとして使用できることを示しています(ただし、一度だけ)。 - -最後に、`query_arrow_stream` メソッドは、ClickHouse の `ArrowStream` 形式の結果を pyarrow.ipc.RecordBatchStreamReader として StreamContext にラップして返します。ストリームの各反復は、PyArrow RecordBlock を返します。 - -```python -df_stream = client.query_df_stream('SELECT * FROM hits') -column_names = df_stream.source.column_names -with df_stream: - for df in df_stream: - -``` - -### Read Formats {#read-formats} - -読み取り形式は、クライアント `query`、`query_np`、および `query_df` メソッドから返される値のデータ型を制御します。(`raw_query` と `query_arrow` は、ClickHouse からの受信データを変更しないため、形式制御は適用されません。)例えば、UUID の読み取り形式がデフォルトの `native` 形式から代替の `string` 形式に変更されると、ClickHouse の `UUID` カラムのクエリが Python の UUID オブジェクトではなく、文字列値(標準の 8-4-4-4-12 RFC 1422 形式)として返されます。 - -任意のフォーマット関数の「データ型」引数にはワイルドカードを含めることができます。形式は単一の小文字の文字列です。 - -読み取り形式は、いくつかのレベルで設定できます: - -- グローバルに、`clickhouse_connect.datatypes.format` パッケージに定義されたメソッドを使用して。これにより、すべてのクエリに対して構成されたデータ型の形式が制御されます。 -```python -from clickhouse_connect.datatypes.format import set_read_format - - -# IPv6 と IPv4 の値を文字列として返す -set_read_format('IPv*', 'string') - - -# すべての Date 型を基礎となるエポック秒またはエポック日として返す -set_read_format('Date*', 'int') -``` -- 全体のクエリについて、オプションの `query_formats` 辞書引数を使用して。この場合、指定されたデータ型の任意のカラム(またはサブカラム)は、構成された形式を使用します。 -```python - -# すべての UUID カラムを文字列として返す -client.query('SELECT user_id, user_uuid, device_uuid from users', query_formats={'UUID': 'string'}) -``` -- 特定のカラムの値について、オプションの `column_formats` 辞書引数を使用することもできます。キーは ClickHouse によって返されるカラム名で、データカラムや ClickHouse の型名およびクエリ形式の値の二次元の「フォーマット」辞書の形式を指定します。この二次元辞書は、タプルやマップのようなネスト型にも使用できます。 -```python - -# `dev_address` カラムの IPv6 値を文字列として返す -client.query('SELECT device_id, dev_address, gw_address from devices', column_formats={'dev_address':'string'}) -``` - -#### Read Format Options (Python Types) {#read-format-options-python-types} - -| ClickHouse Type | Native Python Type | Read Formats | Comments | -|-----------------------|-----------------------|--------------|-------------------------------------------------------------------------------------------------------------------| -| Int[8-64], UInt[8-32] | int | - | | -| UInt64 | int | signed | Superset は現在、大きな符号なし UInt64 値を処理していません | -| [U]Int[128,256] | int | string | Pandas および NumPy の int 値は最大 64 ビットまでなので、これらは文字列として返される可能性があります | -| Float32 | float | - | すべての Python float は内部的に 64 ビットです | -| Float64 | float | - | | -| Decimal | decimal.Decimal | - | | -| String | string | bytes | ClickHouse の String カラムには固有のエンコーディングはないため、可変長のバイナリデータにも使用されます | -| FixedString | bytes | string | FixedStrings は固定サイズのバイト配列ですが、時々 Python の文字列として扱われます | -| Enum[8,16] | string | string, int | Python の enum は空文字列を受け付けないので、すべての enum は文字列または基になる int 値のいずれかとして表示されます。 | -| Date | datetime.date | int | ClickHouse は、日付を 1970 年 01 月 01 日からの日数として保存します。この値は int として利用可能です | -| Date32 | datetime.date | int | Date と同様ですが、より広範囲の日付用です | -| DateTime | datetime.datetime | int | ClickHouse は DateTime をエポック秒で保存します。この値は int として利用可能です | -| DateTime64 | datetime.datetime | int | Python の datetime.datetime はマイクロ秒の精度に限定されています。生の 64 ビット int 値が利用可能です | -| IPv4 | `ipaddress.IPv4Address` | string | IP アドレスは文字列として読み取り、適切にフォーマットされた文字列は IP アドレスとして挿入できます | -| IPv6 | `ipaddress.IPv6Address` | string | IP アドレスは文字列として読み取り、適切にフォーマットされた文字列は IP アドレスとして挿入できます | -| Tuple | dict or tuple | tuple, json | 指名付きタプルはデフォルトでは辞書として返されます。指名付きタプルは JSON 文字列として返すこともできます | -| Map | dict | - | | -| Nested | Sequence[dict] | - | | -| UUID | uuid.UUID | string | UUID は RFC 4122 に従ってフォーマットされた文字列として読み取ることができます。 | -| JSON | dict | string | デフォルトで Python 辞書が返されます。`string` 形式では JSON 文字列が返されます | -| Variant | object | - | 値に格納されている ClickHouse データ型に対する一致する Python 型を返します | -| Dynamic | object | - | 値に格納されている ClickHouse データ型に対する一致する Python 型を返します | - -### External Data {#external-data} - -ClickHouse のクエリは、任意の ClickHouse 形式の外部データを受け入れることができます。このバイナリデータは、データの処理に使用されるクエリ文字列とともに送信されます。外部データ機能の詳細は [こちら](/engines/table-engines/special/external-data.md) です。クライアント `query*` メソッドは、これらの機能を利用するためのオプションの `external_data` パラメータを受け入れます。`external_data` パラメータの値は、`clickhouse_connect.driver.external.ExternalData` オブジェクトである必要があります。そのオブジェクトのコンストラクタは、次の引数を受け付けます: - -| 名前 | 型 | 説明 | -|--------------|----------------------|------------------------------------------------------------------------------------------------------------------------------------------| -| file_path | str | 外部データを読むためのローカルシステムパスにあるファイルのパス。`file_path` か `data` のどちらかが必要です | -| file_name | str | 外部データ「ファイル」の名前。提供されない場合は、`file_path` から決定されます(拡張子なし) | -| data | bytes | ファイルからではなく、バイナリ形式での外部データ。この場合は、`data` か `file_path` のどちらかが必要です | -| fmt | str | データの ClickHouse [Input Format](/sql-reference/formats.mdx)。デフォルトは `TSV` | -| types | str または seq of str| 外部データのカラムデータ型のリスト。文字列の場合はコンマで区切ります。`types` または `structure` のどちらかが必要です | -| structure | str または seq of str| データ内のカラム名 + データ型のリスト(例を参照)。 `structure` または `types` のどちらかが必要です | -| mime_type | str | ファイルデータのオプション MIME タイプ。現在 ClickHouse はこの HTTP サブヘッダーを無視します | - -外部 CSV ファイルに「映画」データを含め、ClickHouse サーバーに既に存在する `directors` テーブルとそのデータを結合してクエリを送信するには、次のようにします: - -```python -import clickhouse_connect -from clickhouse_connect.driver.external import ExternalData - -client = clickhouse_connect.get_client() -ext_data = ExternalData(file_path='/data/movies.csv', - fmt='CSV', - structure=['movie String', 'year UInt16', 'rating Decimal32(3)', 'director String']) -result = client.query('SELECT name, avg(rating) FROM directors INNER JOIN movies ON directors.name = movies.director GROUP BY directors.name', - external_data=ext_data).result_rows -``` - -初期の ExternalData オブジェクトに追加の外部データファイルを追加するには、コンストラクタと同じパラメータを受け付ける `add_file` メソッドを使用します。HTTP の場合、すべての外部データは `multi-part/form-data` ファイルのアップロードの一部として送信されます。 - -### Time Zones {#time-zones} -ClickHouse DateTime および DateTime64 値にタイムゾーンを適用するための複数のメカニズムがあります。内部的に、ClickHouse サーバーは、任意の DateTime または DateTime64 オブジェクトをエポック(1970 年 01 月 01 日 00:00:00 UTC 時間)以降の秒を表すタイムゾーンナイーブな数として常に保存します。DateTime64 値の場合、表現は精度に応じてエポック以降のミリ秒、マイクロ秒、またはナノ秒になります。そのため、任意のタイムゾーン情報の適用は常にクライアント側で発生します。これは意味のある追加の計算を伴うため、パフォーマンスが重要なアプリケーションでは、ユーザー表示または変換(例えば、Pandas Timestamps は、パフォーマンスを向上させるために常にエポックナノ秒を表す 64 ビット整数として扱われる際を除いて)を除き、DateTime 型をエポック タイムスタンプとして扱うことが推奨されます。 - -クエリ中にタイムゾーンを意識したデータ型(特に Python `datetime.datetime` オブジェクト)を使用すると、`clickhouse-connect` は次の優先ルールを使用してクライアント側のタイムゾーンを適用します: - -1. クエリのために指定された `client_tzs` クエリメソッドパラメータが指定された場合は、特定のカラムのタイムゾーンが適用されます。 -2. ClickHouse カラムにタイムゾーンメタデータがある場合(つまり、`DateTime64(3, 'America/Denver')` のような型)、ClickHouse カラムのタイムゾーンが適用されます。(このクリックハウスのカラムのタイムゾーンメタデータは、ClickHouse バージョン 23.2 より前の DateTime カラムに対しては clickhouse-connect に対しては利用できません。) -3. クエリメソッドパラメータ `query_tz` が指定されたクエリには、「クエリタイムゾーン」が適用されます。 -4. クエリまたはセッションに適用されるタイムゾーン設定がある場合には、そのタイムゾーンが適用されます。(この機能はまだ ClickHouse サーバーにリリースされていません。) -5. 最後に、クライアントの `apply_server_timezone` パラメータが True に設定されている場合(デフォルトの場合)、ClickHouse サーバーのタイムゾーンが適用されます。 - -これらのルールに基づいて適用されたタイムゾーンが UTC の場合、`clickhouse-connect` は常にタイムゾーンナイーブな Python `datetime.datetime` オブジェクトを返します。その後、このタイムゾーンナイーブなオブジェクトに任意で追加のタイムゾーン情報をアプリケーションコードによって追加することができます。 - -## Inserting Data with ClickHouse Connect: Advanced Usage {#inserting-data-with-clickhouse-connect--advanced-usage} -### InsertContexts {#insertcontexts} - -ClickHouse Connect は、すべての挿入を InsertContext 内で実行します。InsertContext には、クライアント `insert` メソッドに送信されたすべての値が引数として含まれます。さらに、InsertContext がもともと構築される際に、ClickHouse Connect はネイティブ形式の挿入を効率的に行うために必要な挿入カラムのデータ型を取得します。InsertContext を複数の挿入に再利用することで、この「事前クエリ」を回避し、より迅速かつ効率的に挿入が実行されます。 - -InsertContext は、クライアント `create_insert_context` メソッドを使用して取得できます。このメソッドは、`insert` 関数と同じ引数を取ります。再利用のために InsertContexts の `data` プロパティだけを変更することに注意してください。これは新しいデータの同じテーブルへの繰り返し挿入のために再利用可能なオブジェクトを提供する目的と一致しています。 - -```python -test_data = [[1, 'v1', 'v2'], [2, 'v3', 'v4']] -ic = test_client.create_insert_context(table='test_table', data='test_data') -client.insert(context=ic) -assert client.command('SELECT count() FROM test_table') == 2 -new_data = [[3, 'v5', 'v6'], [4, 'v7', 'v8']] -ic.data = new_data -client.insert(context=ic) -qr = test_client.query('SELECT * FROM test_table ORDER BY key DESC') -assert qr.row_count == 4 -assert qr[0][0] == 4 -``` - -InsertContexts には、挿入プロセス中に更新される可変状態が含まれているため、スレッドセーフではありません。 - -### Write Formats {#write-formats} -書き込み形式は、現在限られた数の型に対して実装されています。ほとんどの場合、ClickHouse Connect は、最初の(非 null)データ値の型を確認することで列に対する適切な書き込み形式を自動的に決定しようとします。たとえば、DateTime カラムに挿入する際に、カラムの最初の挿入値が Python の整数である場合、ClickHouse Connect は、その整数値がエポック秒であると仮定して直接整数値を挿入します。 - -ほとんどの場合、データ型の書き込み形式をオーバーライドする必要はありませんが、`clickhouse_connect.datatypes.format` パッケージの関連メソッドを使用してグローバルレベルで行うことができます。 - -#### Write Format Options {#write-format-options} - -| ClickHouse Type | Native Python Type | Write Formats | Comments | -|-----------------------|-----------------------|---------------|-------------------------------------------------------------------------------------------------------------| -| Int[8-64], UInt[8-32] | int | - | | -| UInt64 | int | | | -| [U]Int[128,256] | int | | | -| Float32 | float | | | -| Float64 | float | | | -| Decimal | decimal.Decimal | | | -| String | string | | | -| FixedString | bytes | string | 文字列として挿入される場合、追加のバイトはゼロに設定されます | -| Enum[8,16] | string | | | -| Date | datetime.date | int | ClickHouse は日付を 1970 年 01 月 01 日からの日数として保存します。int 型はこの「エポック日付」値であると見なされます| -| Date32 | datetime.date | int | Date と同様ですが、より広範囲の日付用です | -| DateTime | datetime.datetime | int | ClickHouse は DateTime をエポック秒で保存します。int 型はこの「エポック秒」の値であると見なされます | -| DateTime64 | datetime.datetime | int | Python の datetime.datetime はマイクロ秒の精度に限定されています。生の 64 ビット int 値が利用可能です | -| IPv4 | `ipaddress.IPv4Address` | string | 適切にフォーマットされた文字列は IPv4 アドレスとして挿入できます | -| IPv6 | `ipaddress.IPv6Address` | string | 適切にフォーマットされた文字列は IPv6 アドレスとして挿入できます | -| Tuple | dict or tuple | | | -| Map | dict | | | -| Nested | Sequence[dict] | | | -| UUID | uuid.UUID | string | 適切にフォーマットされた文字列は ClickHouse UUID として挿入できます | -| JSON/Object('json') | dict | string | 辞書または JSON 文字列が JSON カラムに挿入できます(注意:`Object('json')` は非推奨) | -| Variant | object | | 現在すべてのバリアントは文字列として挿入され、ClickHouse サーバーによって解析されます | -| Dynamic | object | | 警告:現在 Dynamic カラムへの挿入は ClickHouse String として保存されます | - -## Additional Options {#additional-options} - -ClickHouse Connect は、高度なユースケースに対する追加のオプションを多数提供します。 - -### Global Settings {#global-settings} - -ClickHouse Connect の動作をグローバルに制御する少しの設定があります。これらは、最上位 `common` パッケージからアクセスされます: - -```python -from clickhouse_connect import common - -common.set_setting('autogenerate_session_id', False) -common.get_setting('invalid_setting_action') -'drop' -``` - -:::note -これらの共通設定 `autogenerate_session_id`、`product_name`、および `readonly` は、`clickhouse_connect.get_client` メソッドを使ってクライアントを作成する前に必ず変更されるべきです。クライアント作成後にこれらの設定を変更しても、既存のクライアントの動作には影響しません。 -::: - -現在、10 のグローバル設定が定義されています: - -| Setting Name | Default | Options | Description | -|-------------------------|---------|-------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| autogenerate_session_id | True | True, False | 各クライアントセッションのための新しい UUID(1) セッション ID を自動生成します(提供されていない場合)。セッション ID が提供されない場合(クライアントまたはクエリレベルのいずれかで)、ClickHouse は各クエリのためのランダムな内部 ID を生成します。 | -| invalid_setting_action | 'error' | 'drop', 'send', 'error' | 無効または読み取り専用の設定が提供された場合(クライアントセッションまたはクエリのいずれかに対して)、どのアクションを取るか。`drop` の場合、その設定は無視され、`send` の場合は設定が ClickHouse に送信され、`error` の場合はクライアント側で ProgrammingError がスローされます。| -| dict_parameter_format | 'json' | 'json', 'map' | これは、パラメータ化されたクエリが Python 辞書を JSON 形式または ClickHouse マップ構文に変換するかを制御します。`json` は JSON カラムへの挿入のために使用され、`map` は ClickHouse マップカラムのために使用されます。 | -| product_name | | | ClickHouse にクエリを渡す際に、ClickHouse Connect を使用するアプリを追跡するために渡される文字列。形式は <product name;&gl/<product version> にするべきです。 | -| max_connection_age | 600 | | HTTP Keep Alive 接続が開かれて再利用される最大秒数。この設定により、負荷分散装置/プロキシの背後にある単一の ClickHouse ノードへの接続の束を防ぎます。デフォルトは 10 分です。 | -| readonly | 0 | 0, 1 | 19.17 より前のバージョンに対する暗黙の「read_only」ClickHouse 設定。 ClickHouse の「read_only」値に一致させるために設定でき、非常に古い ClickHouse バージョンとの操作を許可します。 | -| use_protocol_version | True | True, False | クライアントプロトコルバージョンを使用します。これは DateTime タイムゾーンカラムに必要ですが、現在の chproxy のバージョンでは壊れます。 | -| max_error_size | 1024 | | クライアントエラーメッセージで返される最大文字数。この設定を 0 にすると、完全な ClickHouse エラーメッセージが取得されます。デフォルトは 1024 文字です。 | -| send_os_user | True | True, False | ClickHouse に送信されるクライアント情報に検出されたオペレーティングシステムユーザーを含めます(HTTP User-Agent 文字列)。 | -| http_buffer_size | 10MB | | HTTP ストリーミングクエリ用の「メモリ内」バッファのサイズ(バイト単位)。 | -### 圧縮 {#compression} - -ClickHouse Connect は、クエリ結果とインサートの両方に対して lz4、zstd、brotli、gzip 圧縮をサポートしています。圧縮を使用することは、ネットワーク帯域幅/転送速度と CPU 使用率(クライアントおよびサーバーの両方)との間で通常トレードオフがあることを常に念頭に置いてください。 - -圧縮されたデータを受け取るには、ClickHouse サーバーの `enable_http_compression` を 1 に設定する必要があるか、ユーザーがクエリごとに設定を変更する権限を持っている必要があります。 - -圧縮は、`clickhouse_connect.get_client` ファクトリメソッドを呼び出す際の `compress` パラメータによって制御されます。デフォルトでは `compress` は `True` に設定されており、これによりデフォルトの圧縮設定がトリガーされます。`query`、`query_np`、`query_df` クライアントメソッドを使用して実行されたクエリに対して、ClickHouse Connect は `Accept-Encoding` ヘッダーに `lz4`、`zstd`、`br`(brotli、brotli ライブラリがインストールされている場合)、`gzip`、および `deflate` エンコーディングを追加します。 (ほとんどのリクエストに対して、ClickHouse サーバーは `zstd` 圧縮ペイロードで応答します。)インサートに関しては、デフォルトで ClickHouse Connect はインサートブロックを `lz4` 圧縮で圧縮し、`Content-Encoding: lz4` HTTP ヘッダーを送信します。 - -`get_client` の `compress` パラメータは、`lz4`、`zstd`、`br`、または `gzip` の特定の圧縮方法に設定することもできます。その方法は、インサートとクエリ結果の両方に使用されます(ClickHouse サーバーがサポートしている場合)。必要な `zstd` および `lz4` 圧縮ライブラリは、ClickHouse Connect のデフォルトでインストールされています。`br`/brotli が指定されている場合、brotli ライブラリは別途インストールする必要があります。 - -`raw*` クライアントメソッドは、クライアント設定によって指定された圧縮を使用しないことに注意してください。 - -また、データの圧縮と解凍の両方において、代替手段よりも大幅に遅くなるため、`gzip` 圧縮の使用は推奨されません。 - -### HTTP プロキシサポート {#http-proxy-support} - -ClickHouse Connect は、`urllib3` ライブラリを使用して基本的な HTTP プロキシサポートを追加します。標準の `HTTP_PROXY` および `HTTPS_PROXY` 環境変数を認識します。これらの環境変数を使用すると、`clickhouse_connect.get_client` メソッドで作成された任意のクライアントに適用されます。代わりに、クライアントごとに構成する場合は、get_client メソッドの `http_proxy` または `https_proxy` 引数を使用できます。HTTP プロキシサポートの実装の詳細については、[urllib3](https://urllib3.readthedocs.io/en/stable/advanced-usage.html#http-and-https-proxies) ドキュメントを参照してください。 - -Socks プロキシを使用する場合は、`urllib3` SOCKSProxyManager を `get_client` の `pool_mgr` 引数として送信できます。この場合、PySocks ライブラリを直接インストールするか、`urllib3` 依存関係の `[socks]` オプションを使用する必要があります。 - -### "古い" JSON データ型 {#old-json-data-type} - -実験的な `Object`(または `Object('json')`)データ型は非推奨であり、生産環境では避けるべきです。ClickHouse Connect は後方互換性のためにこのデータ型の制限されたサポートを提供し続けます。注意すべきことは、このサポートには、辞書やそれに相当するものとして「トップレベル」または「親」JSON 値を返すことが期待されるクエリが含まれておらず、そのようなクエリは例外を引き起こすことです。 - -### "新しい" Variant/Dynamic/JSON データ型(実験的機能) {#new-variantdynamicjson-datatypes-experimental-feature} - -0.8.0 リリースから、`clickhouse-connect` は新しい(非実験的でもある)ClickHouse 型の Variant、Dynamic、JSON に対する実験的なサポートを提供します。 - -#### 使用上の注意 {#usage-notes} -- JSON データは、Python 辞書または JSON オブジェクト `{}` を含む JSON 文字列として挿入できます。他の形式の JSON データはサポートされていません。 -- これらの型のサブカラム/パスを使用したクエリは、サブカラムの型を返します。 -- 他の使用上の注意については、主な ClickHouse ドキュメントを参照してください。 - -#### 既知の制限事項: {#known-limitations} -- これらの型のそれぞれは、使用する前に ClickHouse 設定で有効にする必要があります。 -- 「新しい」JSON 型は、ClickHouse 24.8 リリースから使用可能です。 -- 内部フォーマットの変更により、`clickhouse-connect` は ClickHouse 24.7 リリースから始まる Variant 型とのみ互換性があります。 -- 返された JSON オブジェクトは、`max_dynamic_paths` 数の要素のみを返します(デフォルトは 1024)。これは将来のリリースで修正されます。 -- `Dynamic` カラムへのインサートは、Python 値の文字列表現となります。これは将来のリリースで修正される予定で、https://github.com/ClickHouse/ClickHouse/issues/70395 が修正された後に行われます。 -- 新しい型の実装は C コードで最適化されていないため、従来のシンプルなデータ型よりも性能がわずかに遅くなる場合があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md.hash deleted file mode 100644 index d896fd32b71..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -bf5975c7e5eb807d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md deleted file mode 100644 index 6a320ad57f3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md +++ /dev/null @@ -1,564 +0,0 @@ ---- -sidebar_label: 'Rust' -sidebar_position: 4 -keywords: -- 'clickhouse' -- 'rs' -- 'rust' -- 'cargo' -- 'crate' -- 'http' -- 'client' -- 'connect' -- 'integrate' -slug: '/integrations/rust' -description: 'The official Rust client for connecting to ClickHouse.' -title: 'ClickHouse Rust Client' ---- - - - - - -# ClickHouse Rust Client - -ClickHouseへの接続のための公式Rustクライアントで、元々は[Paul Loyd](https://github.com/loyd)によって開発されました。クライアントのソースコードは[GitHubリポジトリ](https://github.com/ClickHouse/clickhouse-rs)にあります。 - -## 概要 {#overview} - -* 行のエンコーディング/デコーディングに`serde`を使用。 -* `serde`属性をサポート:`skip_serializing`、`skip_deserializing`、`rename`。 -* [`RowBinary`](/interfaces/formats#rowbinary)形式をHTTPトランスポート上で使用。 - * TCP経由で[`Native`](/interfaces/formats#native)に切り替える計画があります。 -* TLS(`native-tls`および`rustls-tls`機能を通じて)をサポート。 -* 圧縮および解凍(LZ4)をサポート。 -* データの選択または挿入、DDLの実行、およびクライアントサイドのバッチ処理用のAPIを提供。 -* ユニットテスト用の便利なモックを提供。 - -## インストール {#installation} - -クレートを使用するには、`Cargo.toml`に以下を追加します。 - -```toml -[dependencies] -clickhouse = "0.12.2" - -[dev-dependencies] -clickhouse = { version = "0.12.2", features = ["test-util"] } -``` - -他の情報: [crates.ioページ](https://crates.io/crates/clickhouse)。 - -## Cargo機能 {#cargo-features} - -* `lz4`(デフォルトで有効) — `Compression::Lz4`と`Compression::Lz4Hc(_)`バリアントを有効にします。これが有効な場合、`WATCH`を除くすべてのクエリに対してデフォルトで`Compression::Lz4`が使用されます。 -* `native-tls` — OpenSSLにリンクして、`HTTPS`スキーマのURLをサポートします。 -* `rustls-tls` — OpenSSLにリンクせず、`hyper-rustls`を介して`HTTPS`スキーマのURLをサポートします。 -* `inserter` — `client.inserter()`を有効にします。 -* `test-util` — モックを追加します。[例](https://github.com/ClickHouse/clickhouse-rs/tree/main/examples/mock.rs)を参照。開発依存関係でのみ使用してください。 -* `watch` — `client.watch`機能を有効にします。詳細は該当するセクションを参照してください。 -* `uuid` — [uuid](https://docs.rs/uuid)クレートと連携するために`serde::uuid`を追加します。 -* `time` — [time](https://docs.rs/time)クレートと連携するために`serde::time`を追加します。 - -:::important -`HTTPS` URL経由でClickHouseに接続する際は、`native-tls`または`rustls-tls`機能のいずれかを有効にする必要があります。 -両方が有効な場合は、`rustls-tls`機能が優先されます。 -::: - -## ClickHouseバージョンの互換性 {#clickhouse-versions-compatibility} - -このクライアントは、LTSまたはそれ以降のClickHouseバージョン、ならびにClickHouse Cloudに対応しています。 - -ClickHouseサーバーがv22.6未満の場合、RowBinaryを[奇妙に処理](https://github.com/ClickHouse/ClickHouse/issues/37420)します。この問題を解決するには、v0.11以上を使用し、`wa-37420`機能を有効にすることができます。注:この機能は新しいClickHouseバージョンでは使用しないでください。 - -## 例 {#examples} - -クライアントの使用に関するさまざまなシナリオをカバーすることを目指して、クライアントリポジトリの[例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples)に提供しています。概要は[例のREADME](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/README.md#overview)にあります。 - -例や次のドキュメントに不明点や不足があれば、[お問い合わせ](./rust.md#contact-us)ください。 - -## 使用法 {#usage} - -:::note -[ch2rs](https://github.com/ClickHouse/ch2rs)クレートは、ClickHouseから行型を生成するのに便利です。 -::: - -### クライアントインスタンスの作成 {#creating-a-client-instance} - -:::tip -作成したクライアントを再利用するか、それらをクローンして、基盤となるhyper接続プールを再利用してください。 -::: - -```rust -use clickhouse::Client; - -let client = Client::default() - // プロトコルとポートの両方を含める必要があります - .with_url("http://localhost:8123") - .with_user("name") - .with_password("123") - .with_database("test"); -``` - -### HTTPSまたはClickHouse Cloud接続 {#https-or-clickhouse-cloud-connection} - -HTTPSは、`rustls-tls`または`native-tls`のCargo機能のいずれかで動作します。 - -その後、通常通りクライアントを作成します。この例では、環境変数を使用して接続の詳細を格納しています: - -:::important -URLには、プロトコルとポートの両方を含める必要があります。例:`https://instance.clickhouse.cloud:8443`。 -::: - -```rust -fn read_env_var(key: &str) -> String { - env::var(key).unwrap_or_else(|_| panic!("{key} env variable should be set")) -} - -let client = Client::default() - .with_url(read_env_var("CLICKHOUSE_URL")) - .with_user(read_env_var("CLICKHOUSE_USER")) - .with_password(read_env_var("CLICKHOUSE_PASSWORD")); -``` - -他にも: -- [ClickHouse CloudのHTTPS例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/clickhouse_cloud.rs)。これはオンプレミスのHTTPS接続にも適用可能です。 - -### 行を選択 {#selecting-rows} - -```rust -use serde::Deserialize; -use clickhouse::Row; -use clickhouse::sql::Identifier; - -#[derive(Row, Deserialize)] -struct MyRow<'a> { - no: u32, - name: &'a str, -} - -let table_name = "some"; -let mut cursor = client - .query("SELECT ?fields FROM ? WHERE no BETWEEN ? AND ?") - .bind(Identifier(table_name)) - .bind(500) - .bind(504) - .fetch::>()?; - -while let Some(row) = cursor.next().await? { .. } -``` - -* プレースホルダ`?fields`は`no, name`(`Row`のフィールド)に置き換えられます。 -* プレースホルダ`?`は次の`bind()`呼び出しで値に置き換えられます。 -* 最初の行またはすべての行を取得するために、便利な`fetch_one::()`および`fetch_all::()`メソッドが使用できます。 -* `sql::Identifier`を使用してテーブル名をバインドできます。 - -注意:応答全体がストリーミングされるため、カーソルは数行を生成した後でもエラーを返す可能性があります。この場合、クエリを`query(...).with_option("wait_end_of_query", "1")`を使用して、サーバー側での応答バッファリングを有効にしてください。[詳細](/interfaces/http/#response-buffering)。`buffer_size`オプションも便利です。 - -:::warning -行を選択する際は`wait_end_of_query`を慎重に使用してください。サーバー側でのメモリ消費が増加し、全体的なパフォーマンスが低下する可能性があります。 -::: - -### 行の挿入 {#inserting-rows} - -```rust -use serde::Serialize; -use clickhouse::Row; - -#[derive(Row, Serialize)] -struct MyRow { - no: u32, - name: String, -} - -let mut insert = client.insert("some")?; -insert.write(&MyRow { no: 0, name: "foo".into() }).await?; -insert.write(&MyRow { no: 1, name: "bar".into() }).await?; -insert.end().await?; -``` - -* `end()`が呼び出されない場合、`INSERT`は中止されます。 -* 行はストリームとして段階的に送信され、ネットワーク負荷を分散します。 -* ClickHouseは、すべての行が同じパーティションに収まる場合にのみバッチを原子的に挿入します。また、その数は[`max_insert_block_size`](https://clickhouse.tech/docs/operations/settings/settings/#settings-max_insert_block_size)より少なくなければなりません。 - -### 非同期挿入(サーバーサイドバッチ処理) {#async-insert-server-side-batching} - -[ClickHouseの非同期挿入](/optimize/asynchronous-inserts)を使用して、クライアントサイドのバッチ処理を回避できます。これは、`insert`メソッド(または、`Client`インスタンス自体)に`async_insert`オプションを提供することで行えます。 - -```rust -let client = Client::default() - .with_url("http://localhost:8123") - .with_option("async_insert", "1") - .with_option("wait_for_async_insert", "0"); -``` - -他に: -- [非同期挿入の例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/async_insert.rs)をクライアントリポジトリで確認してください。 - -### Inserter機能(クライアントサイドバッチ処理) {#inserter-feature-client-side-batching} - -`inserter` Cargo機能が必要です。 - -```rust -let mut inserter = client.inserter("some")? - .with_timeouts(Some(Duration::from_secs(5)), Some(Duration::from_secs(20))) - .with_max_bytes(50_000_000) - .with_max_rows(750_000) - .with_period(Some(Duration::from_secs(15))); - -inserter.write(&MyRow { no: 0, name: "foo".into() })?; -inserter.write(&MyRow { no: 1, name: "bar".into() })?; -let stats = inserter.commit().await?; -if stats.rows > 0 { - println!( - "{}バイト、{}行、{}トランザクションが挿入されました", - stats.bytes, stats.rows, stats.transactions, - ); -} - -// アプリケーションのシャットダウン時にInserterを最終化し、残りの行をコミットするのを忘れないでください。 -// `.end()`も統計を提供します。 -inserter.end().await?; -``` - -* `Inserter`は、任意の閾値(`max_bytes`、`max_rows`、`period`)に達した場合に`commit()`でアクティブな挿入を終了します。 -* アクティブな`INSERT`を終了させる間隔は、`with_period_bias`を使用してバイアスをかけることができ、並列挿入による負荷のスパイクを回避します。 -* `Inserter::time_left()`を使用して、現在の期間が終了するタイミングを検出できます。アイテムが稀にしか発生しない場合は、`Inserter::commit()`を再度呼び出して制限を確認します。 -* 時間閾値は、`inserter`を高速化するために[quanta](https://docs.rs/quanta)クレートを使用して実装されます。`test-util`が有効な場合は使用されません(したがって、カスタムテストでは`tokio::time::advance()`で時間を管理できます)。 -* `commit()`呼び出しの間のすべての行は、同じ`INSERT`ステートメントで挿入されます。 - -:::warning -挿入を終了/最終化したい場合は、フラッシュを忘れないでください: -```rust -inserter.end().await?; -``` -::: - -### DDLの実行 {#executing-ddls} - -単一ノードのデプロイメントでは、次のようにしてDDLを実行するだけで済みます: - -```rust -client.query("DROP TABLE IF EXISTS some").execute().await?; -``` - -しかし、ロードバランサーまたはClickHouse Cloudでのクラスターデプロイメントでは、すべてのレプリカにDDLが適用されるのを待つことをお勧めします。これには`wait_end_of_query`オプションを使用します。次のように行うことができます: - -```rust -client - .query("DROP TABLE IF EXISTS some") - .with_option("wait_end_of_query", "1") - .execute() - .await?; -``` - -### ClickHouse設定 {#clickhouse-settings} - -`with_option`メソッドを使用して、さまざまな[ClickHouse設定](/operations/settings/settings)を適用できます。例えば: - -```rust -let numbers = client - .query("SELECT number FROM system.numbers") - // この設定はこの特定のクエリにのみ適用されます。 - // グローバルクライアント設定を上書きします。 - .with_option("limit", "3") - .fetch_all::() - .await?; -``` - -`query`に加え、`insert`および`inserter`メソッドでも同様に機能します。さらに、同じメソッドを`Client`インスタンスで呼び出して、すべてのクエリに対するグローバル設定を設定できます。 - -### クエリID {#query-id} - -`.with_option`を使用して、ClickHouseのクエリログでクエリを識別するために`query_id`オプションを設定できます。 - -```rust -let numbers = client - .query("SELECT number FROM system.numbers LIMIT 1") - .with_option("query_id", "some-query-id") - .fetch_all::() - .await?; -``` - -`query`に加え、`insert`および`inserter`メソッドでも同様に機能します。 - -:::danger -`query_id`を手動で設定する場合は、それがユニークであることを確認してください。UUIDが良い選択です。 -::: - -他にも:[query_idの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/query_id.rs)をクライアントリポジトリで確認してください。 - -### セッションID {#session-id} - -`query_id`と同様に、同じセッション内でステートメントを実行するために`session_id`を設定できます。`session_id`は、クライアントレベルでグローバルに設定するか、`query`、`insert`、または`inserter`呼び出しごとに設定することができます。 - -```rust -let client = Client::default() - .with_url("http://localhost:8123") - .with_option("session_id", "my-session"); -``` - -:::danger -クラスターデプロイメントでは、"sticky sessions"がないため、この機能を適切に利用するには、_特定のクラスターノード_に接続する必要があります。例えば、ラウンドロビンロードバランサーは、次のリクエストが同じClickHouseノードによって処理されることを保証しません。 -::: - -他にも:[session_idの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/session_id.rs)をクライアントリポジトリで確認してください。 - -### カスタムHTTPヘッダー {#custom-http-headers} - -プロキシ認証を使用している場合やカスタムヘッダーを渡す必要がある場合、次のように行うことができます。 - -```rust -let client = Client::default() - .with_url("http://localhost:8123") - .with_header("X-My-Header", "hello"); -``` - -他にも:[カスタムHTTPヘッダーの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_headers.rs)をクライアントリポジトリで確認してください。 - -### カスタムHTTPクライアント {#custom-http-client} - -これは、基盤となるHTTP接続プールの設定を微調整するのに便利です。 - -```rust -use hyper_util::client::legacy::connect::HttpConnector; -use hyper_util::client::legacy::Client as HyperClient; -use hyper_util::rt::TokioExecutor; - -let connector = HttpConnector::new(); // またはHttpsConnectorBuilder -let hyper_client = HyperClient::builder(TokioExecutor::new()) - // クライアント側で特定のアイドルソケットをどれくらい保持するか(ミリ秒)。 - // ClickHouseサーバーのKeepAliveタイムアウトよりかなり小さいことが想定されています。 - // これは、前の23.11バージョンではデフォルトで3秒、以降は10秒でした。 - .pool_idle_timeout(Duration::from_millis(2_500)) - // プール内で保持される最大アイドルKeep-Alive接続を設定します。 - .pool_max_idle_per_host(4) - .build(connector); - -let client = Client::with_http_client(hyper_client).with_url("http://localhost:8123"); -``` - -:::warning -この例は古いHyper APIに依存しており、今後変更される可能性があります。 -::: - -他にも:[カスタムHTTPクライアントの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_client.rs)をクライアントリポジトリで確認してください。 - -## データ型 {#data-types} - -:::info -追加の例も見てください: -* [簡単なClickHouseデータ型](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/data_types_derive_simple.rs) -* [コンテナのようなClickHouseデータ型](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/data_types_derive_containers.rs) -::: - -* `(U)Int(8|16|32|64|128)`は、対応する`(u|i)(8|16|32|64|128)`型またはそれに基づく新しい型にマッピングされます。 -* `(U)Int256`は直接サポートされていませんが、[回避策があります](https://github.com/ClickHouse/clickhouse-rs/issues/48)。 -* `Float(32|64)`は、対応する`f(32|64)`型またはそれに基づく新しい型にマッピングされます。 -* `Decimal(32|64|128)`は、対応する`i(32|64|128)`型またはそれに基づく新しい型にマッピングされます。符号付き固定小数点数の[`fixnum`](https://github.com/loyd/fixnum)や他の実装を使用する方が便利です。 -* `Boolean`は、`bool`型またはその周りの新しい型にマッピングされます。 -* `String`は、任意の文字列またはバイト型にマッピングされます。例えば、`&str`、`&[u8]`、`String`、`Vec`、または[`SmartString`](https://docs.rs/smartstring/latest/smartstring/struct.SmartString.html)。新しい型もサポートされています。バイトを格納する場合は、より効率的な[`serde_bytes`](https://docs.rs/serde_bytes/latest/serde_bytes/)を使用することを検討してください。 - -```rust -#[derive(Row, Debug, Serialize, Deserialize)] -struct MyRow<'a> { - str: &'a str, - string: String, - #[serde(with = "serde_bytes")] - bytes: Vec, - #[serde(with = "serde_bytes")] - byte_slice: &'a [u8], -} -``` - -* `FixedString(N)`は、バイトの配列としてサポートされています。例えば、`[u8; N]`。 - -```rust -#[derive(Row, Debug, Serialize, Deserialize)] -struct MyRow { - fixed_str: [u8; 16], // FixedString(16) -} -``` -* `Enum(8|16)`は、[`serde_repr`](https://docs.rs/serde_repr/latest/serde_repr/)を使用してサポートされます。 - -```rust -use serde_repr::{Deserialize_repr, Serialize_repr}; - -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - level: Level, -} - -#[derive(Debug, Serialize_repr, Deserialize_repr)] -#[repr(u8)] -enum Level { - Debug = 1, - Info = 2, - Warn = 3, - Error = 4, -} -``` -* `UUID`は、[`uuid::Uuid`](https://docs.rs/uuid/latest/uuid/struct.Uuid.html)にマッピングされ、`serde::uuid`を使用します。`uuid`機能が必要です。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - #[serde(with = "clickhouse::serde::uuid")] - uuid: uuid::Uuid, -} -``` -* `IPv6`は、[`std::net::Ipv6Addr`](https://doc.rust-lang.org/stable/std/net/struct.Ipv6Addr.html)にマッピングされます。 -* `IPv4`は、[`std::net::Ipv4Addr`](https://doc.rust-lang.org/stable/std/net/struct.Ipv4Addr.html)にマッピングされ、`serde::ipv4`を使用します。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - #[serde(with = "clickhouse::serde::ipv4")] - ipv4: std::net::Ipv4Addr, -} -``` -* `Date`は、`u16`またはそれに基づく新しい型にマッピングされ、`1970-01-01`以来の経過日数を表します。また、`serde::time::date`を使用して、[`time::Date`](https://docs.rs/time/latest/time/struct.Date.html)もサポートされています。これには`time`機能が必要です。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - days: u16, - #[serde(with = "clickhouse::serde::time::date")] - date: Date, -} -``` -* `Date32`は、`i32`またはそれに基づく新しい型にマッピングされ、`1970-01-01`以来の経過日数を表します。また、`serde::time::date32`を使用して、[`time::Date`](https://docs.rs/time/latest/time/struct.Date.html)もサポートされています。これには`time`機能が必要です。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - days: i32, - #[serde(with = "clickhouse::serde::time::date32")] - date: Date, -} -``` -* `DateTime`は、`u32`またはそれに基づく新しい型にマッピングされ、UNIXエポックからの経過秒数を表します。また、`serde::time::datetime`を使用して、[`time::OffsetDateTime`](https://docs.rs/time/latest/time/struct.OffsetDateTime.html)もサポートされています。これには`time`機能が必要です。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - ts: u32, - #[serde(with = "clickhouse::serde::time::datetime")] - dt: OffsetDateTime, -} -``` - -* `DateTime64(_)`は、`i32`またはそれに基づく新しい型にマッピングされ、UNIXエポックからの経過時間を表します。また、`serde::time::datetime64::*`を使用して、[`time::OffsetDateTime`](https://docs.rs/time/latest/time/struct.OffsetDateTime.html)もサポートされています。これには`time`機能が必要です。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - ts: i64, // `DateTime64(X)`に応じて秒/µs/ms/nsの経過時間 - #[serde(with = "clickhouse::serde::time::datetime64::secs")] - dt64s: OffsetDateTime, // `DateTime64(0)` - #[serde(with = "clickhouse::serde::time::datetime64::millis")] - dt64ms: OffsetDateTime, // `DateTime64(3)` - #[serde(with = "clickhouse::serde::time::datetime64::micros")] - dt64us: OffsetDateTime, // `DateTime64(6)` - #[serde(with = "clickhouse::serde::time::datetime64::nanos")] - dt64ns: OffsetDateTime, // `DateTime64(9)` -} -``` - -* `Tuple(A, B, ...)`は`(A, B, ...)`またはそれに基づく新しい型にマッピングされます。 -* `Array(_)`は任意のスライスにマッピングされます。例えば`Vec<_>`、`&[_]`。新しい型もサポートされています。 -* `Map(K, V)`は`Array((K, V))`のように動作します。 -* `LowCardinality(_)`はシームレスにサポートされます。 -* `Nullable(_)`は`Option<_>`にマッピングされます。`clickhouse::serde::*`ヘルパーには`::option`を追加します。 - -```rust -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - #[serde(with = "clickhouse::serde::ipv4::option")] - ipv4_opt: Option, -} -``` -* `Nested`は、リネーミングを伴う複数の配列を提供することでサポートされます。 -```rust -// CREATE TABLE test(items Nested(name String, count UInt32)) -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - #[serde(rename = "items.name")] - items_name: Vec, - #[serde(rename = "items.count")] - items_count: Vec, -} -``` -* `Geo`タイプがサポートされています。`Point`はタプル`(f64, f64)`のように振る舞い、他のタイプは単なるポイントのスライスです。 -```rust -type Point = (f64, f64); -type Ring = Vec; -type Polygon = Vec; -type MultiPolygon = Vec; -type LineString = Vec; -type MultiLineString = Vec; - -#[derive(Row, Serialize, Deserialize)] -struct MyRow { - point: Point, - ring: Ring, - polygon: Polygon, - multi_polygon: MultiPolygon, - line_string: LineString, - multi_line_string: MultiLineString, -} -``` - -* `Variant`、`Dynamic`、(新しい) `JSON`データ型はまだサポートされていません。 - -## モック {#mocking} -このクレートは、CHサーバーをモックし、DDL、`SELECT`、`INSERT`および`WATCH`クエリをテストするためのユーティリティを提供します。この機能は`test-util`機能で有効にできます。**開発依存関係としてのみ使用してください**。 - -[例](https://github.com/ClickHouse/clickhouse-rs/tree/main/examples/mock.rs)を参照してください。 - -## トラブルシューティング {#troubleshooting} - -### CANNOT_READ_ALL_DATA {#cannot_read_all_data} - -`CANNOT_READ_ALL_DATA`エラーの最も一般的な原因は、アプリケーション側の行定義がClickHouseのものと一致しないことです。 - -次のテーブルを考えてみてください: - -```sql -CREATE OR REPLACE TABLE event_log (id UInt32) -ENGINE = MergeTree -ORDER BY timestamp -``` - -その後、`EventLog`がアプリケーション側で不一致な型とともに定義されている場合、例えば: - -```rust -#[derive(Debug, Serialize, Deserialize, Row)] -struct EventLog { - id: String, // <- 代わりにu32にする必要があります! -} -``` - -データを挿入する際に、次のようなエラーが発生する可能性があります: - -```response -Error: BadResponse("Code: 33. DB::Exception: Cannot read all data. Bytes read: 5. Bytes expected: 23.: (at row 1)\n: While executing BinaryRowInputFormat. (CANNOT_READ_ALL_DATA)") -``` - -この例では、`EventLog`構造体の定義を正しく修正することで解決されます: - -```rust -#[derive(Debug, Serialize, Deserialize, Row)] -struct EventLog { - id: u32 -} -``` - -## 既知の制限 {#known-limitations} - -* `Variant`、`Dynamic`、(新しい) `JSON`データ型はまだサポートされていません。 -* サーバーサイドのパラメータバインディングはまだサポートされていません。詳細は[this issue](https://github.com/ClickHouse/clickhouse-rs/issues/142)を参照してください。 - -## お問い合わせ {#contact-us} - -質問や支援が必要な場合は、[コミュニティSlack](https://clickhouse.com/slack)または[GitHubのIssues](https://github.com/ClickHouse/clickhouse-rs/issues)を通じて気軽にご連絡ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md.hash deleted file mode 100644 index 8eba9a95e47..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md.hash +++ /dev/null @@ -1 +0,0 @@ -d20631a8e22fda8b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/_category_.yml deleted file mode 100644 index a0f40febf2e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 500 -label: 'Migration' -collapsible: true -collapsed: true -link: - type: generated-index - title: Migration - slug: /cloud/migration diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md deleted file mode 100644 index 3a0c12ede0e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -sidebar_label: 'clickhouse-local の使用' -sidebar_position: 20 -keywords: -- 'clickhouse' -- 'migrate' -- 'migration' -- 'migrating' -- 'data' -- 'etl' -- 'elt' -- 'clickhouse-local' -- 'clickhouse-client' -slug: '/cloud/migration/clickhouse-local' -title: 'ClickHouse を使用して clickhouse-local に移行する' -description: 'clickhouse-local を使用して ClickHouse に移行する方法を示すガイド' ---- - -import Image from '@theme/IdealImage'; -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; -import AddARemoteSystem from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md'; -import ch_local_01 from '@site/static/images/integrations/migration/ch-local-01.png'; -import ch_local_02 from '@site/static/images/integrations/migration/ch-local-02.png'; -import ch_local_03 from '@site/static/images/integrations/migration/ch-local-03.png'; -import ch_local_04 from '@site/static/images/integrations/migration/ch-local-04.png'; - - -# ClickHouseへの移行方法:clickhouse-localを使用する - - - -ClickHouse、より具体的には[`clickhouse-local`](/operations/utilities/clickhouse-local.md)をETLツールとして利用して、現在のデータベースシステムからClickHouse Cloudへデータを移行できます。ただし、現在のデータベースシステムには、ClickHouseが提供する[インテグレーションエンジン](/engines/table-engines/#integration-engines)または[テーブル関数](/sql-reference/table-functions/)があるか、ベンダーが提供するJDBCドライバまたはODBCドライバが利用可能である必要があります。 - -この移行方法は「ピボット」方式と呼ばれることがあります。ソースデータベースからデスティネーションデータベースへのデータを移動させるための中間ピボットポイントまたはホップを利用するためです。例えば、セキュリティ要件によりプライベートまたは内部ネットワーク内からのアウトバウンド接続のみが許可されている場合、clickhouse-localを使用してソースデータベースからデータを取得し、その後データをデスティネーションのClickHouseデータベースへプッシュする必要があります。このとき、clickhouse-localがピボットポイントとして機能します。 - -ClickHouseは、[MySQL](/engines/table-engines/integrations/mysql/)、[PostgreSQL](/engines/table-engines/integrations/postgresql)、[MongoDB](/engines/table-engines/integrations/mongodb)、および[SQLite](/engines/table-engines/integrations/sqlite)用のインテグレーションエンジンと、テーブル関数(即座にインテグレーションエンジンを作成)を提供しています。他の一般的なデータベースシステムについては、システムのベンダーからJDBCドライバまたはODBCドライバが提供されています。 - -## clickhouse-localとは何ですか? {#what-is-clickhouse-local} - - - -通常、ClickHouseはクラスターの形式で実行され、複数のインスタンスが異なるサーバーで分散して実行されます。 - -単一サーバーでは、ClickHouseデータベースエンジンは`clickhouse-server`プログラムの一部として実行されます。データベースへのアクセス(パス、ユーザー、セキュリティなど)は、サーバー設定ファイルで設定されます。 - -`clickhouse-local`ツールを使用すると、ClickHouseデータベースエンジンをコマンドラインユーティリティとして孤立させ、設定やクリックハウスサーバーを起動せずに迅速なSQLデータ処理を実行できます。 - -## clickhouse-localのインストール {#installing-clickhouse-local} - -`clickhouse-local`を使用するには、現在のソースデータベースシステムおよびClickHouse Cloudターゲットサービスの両方にネットワークアクセス可能なホストマシンが必要です。 - -そのホストマシンで、コンピュータのオペレーティングシステムに基づいて適切な`clickhouse-local`ビルドをダウンロードします: - - - - -1. `clickhouse-local`をローカルにダウンロードする最も簡単な方法は、次のコマンドを実行することです: - ```bash - curl https://clickhouse.com/ | sh - ``` - -1. `clickhouse-local`を実行します(バージョンが表示されます): - ```bash - ./clickhouse-local - ``` - - - - -1. `clickhouse-local`をローカルにダウンロードする最も簡単な方法は、次のコマンドを実行することです: - ```bash - curl https://clickhouse.com/ | sh - ``` - -1. `clickhouse-local`を実行します(バージョンが表示されます): - ```bash - ./clickhouse local - ``` - - - - -:::info 注意 -このガイド全体での例は、`clickhouse-local`を実行するためのLinuxコマンド(`./clickhouse-local`)を使用しています。 -Macで`clickhouse-local`を実行するには、`./clickhouse local`を使用してください。 -::: - - -:::tip ClickHouse CloudサービスのIPアクセスリストにリモートシステムを追加する -`remoteSecure`関数がClickHouse Cloudサービスに接続できるようにするためには、リモートシステムのIPアドレスをIPアクセスリストで許可する必要があります。詳細については、このヒントの下にある**IPアクセスリストを管理**を展開してください。 -::: - - - -## 例1: MySQLからClickHouse Cloudへの移行:インテグレーションエンジンを使用する {#example-1-migrating-from-mysql-to-clickhouse-cloud-with-an-integration-engine} - -データをソースのMySQLデータベースから読み取るために[インテグレーションテーブルエンジン](/engines/table-engines/integrations/mysql/)([mysqlテーブル関数](/sql-reference/table-functions/mysql/)によって即座に作成されます)を使用し、データをClickHouse Cloudサービスのデスティネーションテーブルに書き込むために[remoteSecureテーブル関数](/sql-reference/table-functions/remote/)を使用します。 - - - -### ClickHouse Cloudサービスのデスティネーションにて: {#on-the-destination-clickhouse-cloud-service} - -#### デスティネーションデータベースを作成: {#create-the-destination-database} - - ```sql - CREATE DATABASE db - ``` - -#### MySQLテーブルと同等のスキーマを持つデスティネーションテーブルを作成: {#create-a-destination-table-that-has-a-schema-equivalent-to-the-mysql-table} - - ```sql - CREATE TABLE db.table ... - ``` - -:::note -ClickHouse CloudのデスティネーションテーブルのスキーマとソースMySQLテーブルのスキーマは整合している必要があります(カラム名と順序は同じで、カラムデータタイプは互換性がある必要があります)。 -::: - -### clickhouse-localホストマシンにて: {#on-the-clickhouse-local-host-machine} - -#### 移行クエリと共にclickhouse-localを実行: {#run-clickhouse-local-with-the-migration-query} - - ```sql - ./clickhouse-local --query " -INSERT INTO FUNCTION -remoteSecure('HOSTNAME.clickhouse.cloud:9440', 'db.table', 'default', 'PASS') -SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password');" - ``` - -:::note -データは`clickhouse-local`ホストマシンにローカルに保存されません。代わりに、ソースのMySQLテーブルからデータが読み取られ、その後すぐにClickHouse Cloudサービスのデスティネーションテーブルに書き込まれます。 -::: - - -## 例2: MySQLからClickHouse Cloudへの移行:JDBCブリッジを使用する {#example-2-migrating-from-mysql-to-clickhouse-cloud-with-the-jdbc-bridge} - -データをソースのMySQLデータベースから読み取るために[JDBCインテグレーションテーブルエンジン](/engines/table-engines/integrations/jdbc.md)([jdbcテーブル関数](/sql-reference/table-functions/jdbc.md)によって即座に作成されます)を使用し、[ClickHouse JDBC Bridge](https://github.com/ClickHouse/clickhouse-jdbc-bridge)およびMySQL JDBCドライバを用いて、データをClickHouse Cloudサービスのデスティネーションテーブルに書き込むために[remoteSecureテーブル関数](/sql-reference/table-functions/remote.md)を使用します。 - - - -### ClickHouse Cloudサービスのデスティネーションにて: {#on-the-destination-clickhouse-cloud-service-1} - -#### デスティネーションデータベースを作成: {#create-the-destination-database-1} - ```sql - CREATE DATABASE db - ``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md.hash deleted file mode 100644 index ae4dff184ca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-local-etl.md.hash +++ /dev/null @@ -1 +0,0 @@ -017ed232de15d503 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md deleted file mode 100644 index d6045d8facd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -sidebar_position: 10 -sidebar_label: 'ClickHouseからClickHouseクラウドへの移行' -slug: '/cloud/migration/clickhouse-to-cloud' -title: 'セルフマネージドClickHouseとClickHouseクラウド間の移行' -description: 'セルフマネージドClickHouseとClickHouseクラウド間を移行する方法について説明するページ' ---- - -import Image from '@theme/IdealImage'; -import AddARemoteSystem from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_remote_ip_access_list_detail.md'; -import self_managed_01 from '@site/static/images/integrations/migration/self-managed-01.png'; -import self_managed_02 from '@site/static/images/integrations/migration/self-managed-02.png'; -import self_managed_03 from '@site/static/images/integrations/migration/self-managed-03.png'; -import self_managed_04 from '@site/static/images/integrations/migration/self-managed-04.png'; -import self_managed_05 from '@site/static/images/integrations/migration/self-managed-05.png'; -import self_managed_06 from '@site/static/images/integrations/migration/self-managed-06.png'; - - -# セルフマネージド ClickHouse から ClickHouse Cloud への移行 - - - -このガイドでは、セルフマネージド ClickHouse サーバーから ClickHouse Cloud への移行方法と、ClickHouse Cloud サービス間の移行方法を説明します。 [`remoteSecure`](../../sql-reference/table-functions/remote.md) 関数は、リモート ClickHouse サーバーにアクセスするために `SELECT` および `INSERT` クエリで使用されており、テーブルの移行を `INSERT INTO` クエリに埋め込まれた `SELECT` のように簡単に行うことができます。 - -## セルフマネージド ClickHouse から ClickHouse Cloud への移行 {#migrating-from-self-managed-clickhouse-to-clickhouse-cloud} - - - -:::note -ソーステーブルがシャーディングされているか、レプリケーションされているかに関係なく、ClickHouse Cloud では単に宛先テーブルを作成するだけです(このテーブルのエンジンパラメータは省略できます。自動的に ReplicatedMergeTree テーブルになります)。そして ClickHouse Cloud は、自動的に垂直および水平方向のスケーリングを管理します。テーブルのレプリケーションやシャーディングについて考える必要はありません。 -::: - -この例では、セルフマネージド ClickHouse サーバーが *ソース* であり、ClickHouse Cloud サービスが *宛先* です。 - -### 概要 {#overview} - -プロセスは以下の通りです: - -1. ソースサービスに読み取り専用ユーザーを追加 -1. 宛先サービスにソーステーブル構造を複製 -1. ネットワークの可用性に応じて、ソースから宛先にデータをプルまたはプッシュ -1. 宛先の IP アクセスリストからソースサーバーを削除(該当する場合) -1. ソースサービスから読み取り専用ユーザーを削除 - - -### システム間でのテーブル移行: {#migration-of-tables-from-one-system-to-another} -この例では、セルフマネージド ClickHouse サーバーから ClickHouse Cloud へ 1 つのテーブルを移行します。 - -### ソース ClickHouse システムで(現在データをホストしているシステム) {#on-the-source-clickhouse-system-the-system-that-currently-hosts-the-data} - -- ソーステーブル(この例では `db.table`)を読み取ることができる読み取り専用ユーザーを追加 -```sql -CREATE USER exporter -IDENTIFIED WITH SHA256_PASSWORD BY 'password-here' -SETTINGS readonly = 1; -``` - -```sql -GRANT SELECT ON db.table TO exporter; -``` - -- テーブル定義をコピー -```sql -select create_table_query -from system.tables -where database = 'db' and table = 'table' -``` - -### 宛先 ClickHouse Cloud システムで: {#on-the-destination-clickhouse-cloud-system} - -- 宛先データベースを作成: -```sql -CREATE DATABASE db -``` - -- ソースからの CREATE TABLE 文を使用して宛先を作成します。 - -:::tip -CREATE 文を実行する際に、ENGINE を ReplicatedMergeTree に変更し、パラメータを指定しないでください。ClickHouse Cloud は常にテーブルをレプリケートし、適切なパラメータを提供します。ただし、`ORDER BY`、`PRIMARY KEY`、`PARTITION BY`、`SAMPLE BY`、`TTL`、および `SETTINGS` の句は保持してください。 -::: - -```sql -CREATE TABLE db.table ... -``` - - -- `remoteSecure` 関数を使用して、セルフマネージドソースからデータをプル - - - -```sql -INSERT INTO db.table SELECT * FROM -remoteSecure('source-hostname', db, table, 'exporter', 'password-here') -``` - -:::note -ソースシステムが外部ネットワークから利用できない場合は、データをプッシュすることができます。`remoteSecure` 関数は、SELECT と INSERT の両方で機能します。次のオプションを参照してください。 -::: - -- `remoteSecure` 関数を使用して、ClickHouse Cloud サービスにデータをプッシュします。 - - - -:::tip リモートシステムを ClickHouse Cloud サービスの IP アクセスリストに追加 -`remoteSecure` 関数が ClickHouse Cloud サービスに接続するためには、リモートシステムの IP アドレスが IP アクセスリストで許可されている必要があります。このヒントの下にある **IP アクセスリストを管理する** を展開して、詳細情報を得てください。 -::: - - - -```sql -INSERT INTO FUNCTION -remoteSecure('HOSTNAME.clickhouse.cloud:9440', 'db.table', -'default', 'PASS') SELECT * FROM db.table -``` - - - -## ClickHouse Cloud サービス間の移行 {#migrating-between-clickhouse-cloud-services} - - - -ClickHouse Cloud サービス間でのデータ移行の例: -- 復元されたバックアップからのデータ移行 -- 開発サービスからステージングサービスへのデータコピー(またはステージングから本番) - -この例では、2 つの ClickHouse Cloud サービスがあり、それぞれを *ソース* および *宛先* と呼びます。データはソースから宛先にプルされます。プッシュも可能ですが、読み取り専用ユーザーを使用しているためプルの方法が示されています。 - - - -移行にはいくつかのステップがあります: -1. 1 つの ClickHouse Cloud サービスを *ソース* とし、もう 1 つを *宛先* として識別 -1. ソースサービスに読み取り専用ユーザーを追加 -1. 宛先サービスにソーステーブル構造を複製 -1. 一時的にソースサービスへの IP アクセスを許可 -1. ソースから宛先へデータをコピー -1. 宛先の IP アクセスリストを再設定 -1. ソースサービスから読み取り専用ユーザーを削除 - - -#### ソースサービスに読み取り専用ユーザーを追加 {#add-a-read-only-user-to-the-source-service} - -- ソーステーブル(この例では `db.table`)を読み取ることができる読み取り専用ユーザーを追加 - ```sql - CREATE USER exporter - IDENTIFIED WITH SHA256_PASSWORD BY 'password-here' - SETTINGS readonly = 1; - ``` - - ```sql - GRANT SELECT ON db.table TO exporter; - ``` - -- テーブル定義をコピー - ```sql - select create_table_query - from system.tables - where database = 'db' and table = 'table' - ``` - -#### 宛先サービスでテーブル構造を複製 {#duplicate-the-table-structure-on-the-destination-service} - -宛先にデータベースがまだない場合は、作成します: - -- 宛先データベースを作成: - ```sql - CREATE DATABASE db - ``` - - - -- ソースからの CREATE TABLE 文を使用して宛先を作成します。 - - ソースからの `select create_table_query...` の出力を使用して、宛先にテーブルを作成します: - - ```sql - CREATE TABLE db.table ... - ``` - -#### ソースサービスへのリモートアクセスを許可 {#allow-remote-access-to-the-source-service} - -ソースから宛先にデータをプルするためには、ソースサービスが接続を許可する必要があります。一時的にソースサービスの「IP アクセスリスト」機能を無効にします。 - -:::tip -ソース ClickHouse Cloud サービスを引き続き使用する場合は、どこからでものアクセスを許可する前に、既存の IP アクセスリストを JSON ファイルにエクスポートしてください。これにより、データが移行された後にアクセスリストをインポートすることができます。 -::: - -許可リストを変更し、一時的に **Anywhere** からのアクセスを許可します。詳細については、[IP アクセスリスト](/cloud/security/setting-ip-filters) ドキュメントを参照してください。 - -#### ソースから宛先へデータをコピー {#copy-the-data-from-source-to-destination} - -- `remoteSecure` 関数を使用して、ソース ClickHouse Cloud サービスからデータをプル - 宛先に接続します。宛先 ClickHouse Cloud サービスでこのコマンドを実行します: - - ```sql - INSERT INTO db.table SELECT * FROM - remoteSecure('source-hostname', db, table, 'exporter', 'password-here') - ``` - -- 宛先サービスのデータを確認 - -#### ソースでの IP アクセスリストを再設定 {#re-establish-the-ip-access-list-on-the-source} - -もし早めにアクセスリストをエクスポートしていれば、**Share** を使用して再インポートできます。そうでない場合は、アクセスリストにエントリを再追加してください。 - -#### 読み取り専用の `exporter` ユーザーを削除 {#remove-the-read-only-exporter-user} - -```sql -DROP USER exporter -``` - -- サービスの IP アクセスリストを切り替え、アクセスを制限します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md.hash deleted file mode 100644 index 723deb3925a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/clickhouse-to-cloud.md.hash +++ /dev/null @@ -1 +0,0 @@ -ac9fc1545f6e928c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md deleted file mode 100644 index 301306badab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -sidebar_label: 'Using a 3rd-party ETL Tool' -sidebar_position: 20 -keywords: -- 'clickhouse' -- 'migrate' -- 'migration' -- 'migrating' -- 'data' -- 'etl' -- 'elt' -- 'clickhouse-local' -- 'clickhouse-client' -slug: '/cloud/migration/etl-tool-to-clickhouse' -title: 'Using a 3rd-party ETL Tool' -description: 'Page describing how to use a 3rd-party ETL tool with ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import third_party_01 from '@site/static/images/integrations/migration/third-party-01.png'; - - -# 3rd-party ETLツールの使用 - - - -外部データソースからClickHouseにデータを移動するための優れたオプションは、多くの人気のあるETLおよびELTツールのいずれかを使用することです。以下に関するドキュメントがあります: - -- [Airbyte](/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md) -- [dbt](/integrations/data-ingestion/etl-tools/dbt/index.md) -- [Vector](/integrations/data-ingestion/etl-tools/vector-to-clickhouse.md) - -しかし、ClickHouseと統合する他のETL/ELTツールも多数あるため、お気に入りのツールのドキュメントを確認して詳細を確認してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md.hash deleted file mode 100644 index 2ac9585762c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/etl-tool-to-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -08f5e19baafb6efd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md deleted file mode 100644 index 208ac39a655..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -sidebar_label: '概要' -sidebar_position: 1 -keywords: -- 'clickhouse' -- 'migrate' -- 'migration' -- 'migrating' -- 'data' -description: 'クラウドへの移行の目次' -title: 'クラウドへの移行' -slug: '/integrations/migration' ---- - - - -このドキュメントのセクションでは、ClickHouseからClickHouse Cloudへの移行方法について説明します。さらなる情報については、以下のページをご覧ください。 - -| ページ | 説明 | -|----------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| -| [概要](/integrations/migration/overview) | ClickHouseからClickHouse Cloudへの移行方法の概要を提供します。 | -| [clickhouse-localを使用したClickHouseへの移行](/cloud/migration/clickhouse-local) | clickhouse-localを使用してClickHouseに移行する方法を学びます。 | | -| [ClickHouseからClickHouse Cloud](/cloud/migration/clickhouse-to-cloud) | このガイドでは、セルフマネージドのClickHouseサーバーからClickHouse Cloudへの移行方法と、ClickHouse Cloudサービス間の移行方法を示します。 | -| [3rdパーティETLツールの使用](/cloud/migration/etl-tool-to-clickhouse) | 外部データソースからClickHouseにデータを移動するための人気のETLおよびELTツールについて詳しく学びます。 | -| [オブジェクトストレージからClickHouse Cloud](/integrations/migration/object-storage-to-clickhouse) | Cloud Object StorageからClickHouse Cloudにデータをインポートするためのさまざまなテーブル関数の使用方法を学びます。 | -| [CSVファイルのアップロード](/integrations/migration/upload-a-csv-file) | CSVまたはTSVファイルを使用してClickHouse Cloudにデータをアップロードする方法を学びます。 | -| [Rocksetからの移行](/migrations/rockset) | RocksetからClickHouse Cloudへの移行方法を学びます。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md.hash deleted file mode 100644 index c47f99d068f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -6b52fad481d07450 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md deleted file mode 100644 index 61a8dbc180a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: 'Object Storage to ClickHouse Cloud' -description: 'Moving data from object storage to ClickHouse Cloud' -keywords: -- 'object storage' -- 's3' -- 'azure blob' -- 'gcs' -- 'migration' -slug: '/integrations/migration/object-storage-to-clickhouse' ---- - -import Image from '@theme/IdealImage'; -import object_storage_01 from '@site/static/images/integrations/migration/object-storage-01.png'; - - -# CloudオブジェクトストレージからClickHouse Cloudへのデータ移行 - - - -Cloudオブジェクトストレージをデータレイクとして使用し、このデータをClickHouse Cloudにインポートしたい場合、または現在のデータベースシステムがデータをCloudオブジェクトストレージに直接オフロードできる場合は、Cloudオブジェクトストレージに保存されているデータをClickHouse Cloudテーブルに移行するためのテーブル関数の1つを使用できます: - -- [s3](/sql-reference/table-functions/s3.md) または [s3Cluster](/sql-reference/table-functions/s3Cluster.md) -- [gcs](/sql-reference/table-functions/gcs) -- [azureBlobStorage](/sql-reference/table-functions/azureBlobStorage) - -現在のデータベースシステムが直接Cloudオブジェクトストレージにデータをオフロードできない場合は、[サードパーティETL/ELTツール](./etl-tool-to-clickhouse.md)や[clickhouse-local](./clickhouse-local-etl.md)を使用して、現在のデータベースシステムからCloudオブジェクトストレージにデータを移動し、そのデータを2段階でClickHouse Cloudテーブルに移行することができます。 - -このプロセスは2ステップ(Cloudオブジェクトストレージにデータをオフロードし、次にClickHouseにロードする)ですが、その利点は、Cloudオブジェクトストレージからの高い並列読み取りをサポートする[堅牢なClickHouse Cloud](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)によってペタバイトにスケールすることができる点です。また、[Parquet](/interfaces/formats/#data-format-parquet)のような高度な圧縮形式を活用することもできます。 - -具体的なコード例を示す[ブログ記事](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)があり、S3を使用してClickHouse Cloudにデータを取り込む方法を説明しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md.hash deleted file mode 100644 index 99eefc916e4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/object-storage-to-clickhouse.md.hash +++ /dev/null @@ -1 +0,0 @@ -898d268d418fa0ac diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md deleted file mode 100644 index 4cced590fb7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_label: '概要' -sidebar_position: 1 -slug: '/integrations/migration/overview' -keywords: -- 'clickhouse' -- 'migrate' -- 'migration' -- 'migrating' -- 'data' -title: 'ClickHouseへのデータ移行' -description: 'ClickHouseへのデータ移行のオプションについて説明するページです。' ---- - - - - -# ClickHouse へのデータ移行 - -
    - -
    - -
    - -データが現在どこに存在するかに応じて、ClickHouse Cloud へのデータ移行にはいくつかのオプションがあります: - -- [セルフマネージドからクラウド](./clickhouse-to-cloud.md): `remoteSecure` 関数を使用してデータを転送する -- [別の DBMS](./clickhouse-local-etl.md): 現在の DBMS に適した ClickHouse テーブル関数とともに、[clickhouse-local] ETL ツールを使用する -- [どこでも!](./etl-tool-to-clickhouse.md): 様々なデータソースに接続する多くの人気 ETL/ELT ツールの1つを使用する -- [オブジェクトストレージ](./object-storage-to-clickhouse.md): S3 から ClickHouse にデータを簡単に挿入する - -例として、[Redshift からの移行](/integrations/data-ingestion/redshift/index.md) では、ClickHouse へのデータ移行のための 3 つの異なる方法を紹介しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md.hash deleted file mode 100644 index 1625f3b5b1f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/overview.md.hash +++ /dev/null @@ -1 +0,0 @@ -265a142fae4197db diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md deleted file mode 100644 index 83c944d36c3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: 'Rockset からの移行' -slug: '/migrations/rockset' -description: 'Rockset から ClickHouse への移行' -keywords: -- 'migrate' -- 'migration' -- 'migrating' -- 'data' -- 'etl' -- 'elt' -- 'Rockset' ---- - - - - - -# Rocksetからの移行 - -Rocksetはリアルタイム分析データベースであり、[2024年6月にOpenAIに買収されました](https://rockset.com/blog/openai-acquires-rockset/)。 -ユーザーは2024年9月30日午後5時PDTまでに[サービスからオフボードする必要があります](https://docs.rockset.com/documentation/docs/faq)。 - -私たちはClickHouse CloudがRocksetのユーザーにとって素晴らしい選択肢を提供すると考えています。このガイドでは、RocksetからClickHouseに移行する際の考慮すべきことを説明します。 - -早速始めましょう! - -## 即時サポート {#immediate-assistance} - -即時サポートが必要な場合は、[このフォーム](https://clickhouse.com/company/contact?loc=docs-rockest-migrations) に記入して、担当者と連絡を取ってください! - - -## ClickHouse対Rockset - 高レベルの比較 {#clickhouse-vs-rockset---high-level-comparison} - -まず、ClickHouseの強みとRocksetと比較して得られる利点を簡単に見ていきます。 - -ClickHouseは、スキーマファーストアプローチを通じてリアルタイムパフォーマンスとコスト効率に重点を置いています。 -半構造化データもサポートされていますが、私たちの哲学は、ユーザーがパフォーマンスとリソース効率を最大化するためにデータをどのように構造化するかを決定すべきであるということです。 -前述のスキーマファーストアプローチの結果、私たちのベンチマークでは、ClickHouseはスケーラビリティ、取り込みスループット、クエリパフォーマンス、コスト効率においてRocksetを上回っています。 - -他のデータシステムとの統合に関しては、ClickHouseは[広範な機能](/integrations)を持ち、Rocksetを上回っています。 - -RocksetとClickHouseの両方がクラウドベースの製品と関連するサポートサービスを提供しています。 -Rocksetとは異なり、ClickHouseにはオープンソースの製品とコミュニティもあります。 -ClickHouseのソースコードは[github.com/clickhouse/clickhouse](https://github.com/clickhouse/clickhouse)にあり、執筆時点で1,500人以上の貢献者がいます。 -[ClickHouseコミュニティSlack](https://clickhouse.com/slack)には7,000人以上のメンバーがいて、自分たちの経験やベストプラクティスを共有し、遭遇する問題についてお互いに助け合っています。 - -この移行ガイドは、RocksetからClickHouse Cloudへの移行に焦点を当てていますが、ユーザーはオープンソース機能に関する[私たちのその他のドキュメント](/)を参照できます。 - -## Rocksetの主要概念 {#rockset-key-concepts} - -まず、[Rocksetの主要概念](https://docs.rockset.com/documentation/docs/key-concepts)を見て、それらのClickHouse Cloudにおける対応物を説明します(存在する場合)。 - -### データソース {#data-sources} - -RocksetとClickHouseは、さまざまなソースからデータをロードすることをサポートしています。 - -Rocksetでは、データソースを作成し、そのデータソースに基づいて_コレクション_を作成します。 -イベントストリーニングプラットフォーム、OLTPデータベース、クラウドバケットストレージ用のフルマネージドインテグレーションがあります。 - -ClickHouse Cloudでは、フルマネージドインテグレーションに相当するのは[ClickPipes](/integrations/clickpipes)です。 -ClickPipesはイベントストリーミングプラットフォームやクラウドバケットストレージからのデータの継続的なロードをサポートします。 -ClickPipesはデータを_テーブル_にロードします。 - -### 取り込み変換 {#ingest-transformations} - -Rocksetの取り込み変換では、コレクションに保存される前にRocksetに入る生データを変換できます。 -ClickHouse Cloudは同様のことをClickPipesを介して行い、ClickHouseの[マテリアライズドビュー機能](/guides/developer/cascading-materialized-views)を利用してデータを変換します。 - -### コレクション {#collections} - -Rocksetではコレクションをクエリします。ClickHouse Cloudではテーブルをクエリします。 -両方のサービスで、クエリはSQLを使用して行われます。 -ClickHouseは、SQL標準の機能に加え、データを操作し変換するための追加機能を提供します。 - -### クエリラムダ {#query-lambdas} - -Rocksetはクエリラムダをサポートしており、Rocksetに保存された名前付きパラメータ化クエリは専用のRESTエンドポイントから実行できます。 -ClickHouse Cloudの[クエリAPIエンドポイント](/cloud/get-started/query-endpoints)は類似の機能を提供します。 - -### ビュー {#views} - -Rocksetでは、SQLクエリによって定義された仮想コレクションであるビューを作成できます。 -ClickHouse Cloudは、いくつかの種類の[ビュー](/sql-reference/statements/create/view)をサポートしています。 - -* _通常のビュー_はデータを保存しません。クエリ時に別のテーブルから読み取るだけです。 -* _パラメータ化されたビュー_は通常のビューに似ていますが、クエリ時に解決されるパラメータで作成できます。 -* _マテリアライズドビュー_は、対応する `SELECT` クエリによって変換されたデータを保存します。新しいデータが参照元のデータに追加されたときに実行されるトリガーのようなものです。 - -### エイリアス {#aliases} - -Rocksetのエイリアスは、コレクションに複数の名前を関連付けるために使用されます。 -ClickHouse Cloudは同等の機能をサポートしていません。 - -### ワークスペース {#workspaces} - -Rocksetのワークスペースは、リソース(コレクション、クエリラムダ、ビュー、エイリアスなど)や他のワークスペースを保持するコンテナーです。 - -ClickHouse Cloudでは、完全な分離のために異なるサービスを使用できます。 -異なるテーブルやビューへのRBACアクセスを簡素化するためにデータベースを作成することもできます。 - -## 設計時の考慮事項 {#design-considerations} - -このセクションでは、Rocksetの重要な機能のいくつかを見直し、ClickHouse Cloudを使用する際にそれらにどのように対処するかを学びます。 - -### JSONサポート {#json-support} - -Rocksetは、Rockset特有の型を許可するJSONフォーマットの拡張版をサポートしています。 - -ClickHouseでは、JSONを操作するための複数の方法があります: - -* JSON推論 -* クエリ時のJSON抽出 -* 挿入時のJSON抽出 - -あなたのユーザーケースに最適なアプローチを理解するには、[私たちのJSONドキュメント](/integrations/data-formats/json/overview)を参照してください。 - -さらに、ClickHouseは近日中に[半構造化カラムデータ型](https://github.com/ClickHouse/ClickHouse/issues/54864)を持つことになります。 -この新しい型は、RocksetのJSON型が提供する柔軟性をユーザーに提供するはずです。 - -### フルテキスト検索 {#full-text-search} - -Rocksetは`SEARCH`関数を使用したフルテキスト検索をサポートしています。 -ClickHouseは検索エンジンではありませんが、[文字列内の検索のためのさまざまな関数](/sql-reference/functions/string-search-functions)を持っています。 -ClickHouseはまた、[ブルームフィルタ](/optimize/skipping-indexes)をサポートしており、多くのシナリオで役立つことができます。 - -### ベクター検索 {#vector-search} - -Rocksetには、ベクター検索アプリケーションで使用される埋め込みをインデックス化するために使用できる類似性インデックスがあります。 - -ClickHouseも線形スキャンを使用してベクター検索に利用できます: -- [ClickHouseを使ったベクター検索 - パート1](https://clickhouse.com/blog/vector-search-clickhouse-p1?loc=docs-rockest-migrations) -- [ClickHouseを使ったベクター検索 - パート2](https://clickhouse.com/blog/vector-search-clickhouse-p2?loc=docs-rockest-migrations) - -ClickHouseには[ベクター検索の類似性インデックス](/engines/table-engines/mergetree-family/annindexes)もありますが、このアプローチは現在実験的であり、[新しいクエリアナライザー](/guides/developer/understanding-query-execution-with-the-analyzer)とはまだ互換性がありません。 - -### OLTPデータベースからのデータの取り込み {#ingesting-data-from-oltp-databases} - -Rocksetのマネージドインテグレーションは、MongoDBやDynamoDBのようなOLTPデータベースからデータを取り込むことをサポートしています。 - -DynamoDBからデータを取り込む場合は、こちらのDynamoDBインテグレーションガイドを参照してください[こちら](/integrations/data-ingestion/dbms/dynamodb/index.md)。 - -### コンピュート・コンピュート分離 {#compute-compute-separation} - -コンピュート・コンピュート分離は、リアルタイム分析システムにおけるアーキテクチャ設計パターンであり、突発的なデータやクエリの急増に対処するのを可能にします。 -単一のコンポーネントが取り込みとクエリを両方処理しているとしましょう。 -その場合、クエリの洪水があると、取り込みのレイテンシーが増加し、取り込むデータの洪水があると、クエリのレイテンシーが増加します。 - -コンピュート・コンピュート分離は、データの取り込みとクエリ処理のコードパスを分離してこの問題を回避します。これはRocksetが2023年3月に実装した機能です。 - -この機能は現在ClickHouse Cloudに実装中で、プライベートプレビューに近づいています。有効にするにはサポートに連絡してください。 - -## 無料の移行サービス {#free-migration-services} - -私たちは、Rocksetのユーザーにとってこれがストレスの多い時期であることを理解しています。誰もこの短期間にプロダクションデータベースを移行したいとは思いません! - -ClickHouseが適している場合、私たちは[無料の移行サービス](https://clickhouse.com/comparison/rockset?loc=docs-rockest-migrations)を提供して、移行をスムーズに行えるようにお手伝いします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md.hash deleted file mode 100644 index c7ff2684452..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/rockset.md.hash +++ /dev/null @@ -1 +0,0 @@ -1e689deccac79aa5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md deleted file mode 100644 index 0002b917105..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: 'CSVファイルをアップロード' -slug: '/integrations/migration/upload-a-csv-file' -description: 'CSVファイルのアップロードについて学ぶ' ---- - -import Image from '@theme/IdealImage'; -import uploadcsv1 from '@site/static/images/integrations/migration/uploadcsv1.png'; -import uploadcsv2 from '@site/static/images/integrations/migration/uploadcsv2.png'; -import uploadcsv3 from '@site/static/images/integrations/migration/uploadcsv3.png'; -import uploadcsv4 from '@site/static/images/integrations/migration/uploadcsv4.png'; -import uploadcsv5 from '@site/static/images/integrations/migration/uploadcsv5.png'; - - -# CSVファイルのアップロード - -ヘッダー行にカラム名を含むCSVまたはTSVファイルをアップロードすることができます。ClickHouseは行のバッチを前処理してカラムのデータ型を推測し、その後、行を新しいテーブルに挿入します。 - -1. まず、**詳細**ページに移動します。あなたのClickHouse Cloudサービスの: - - - -2. **アクション**ドロップダウンメニューから**データの読み込み**を選択します: - - - -3. **データソース**ページの**ファイルアップロード**ボタンをクリックし、表示されるダイアログウィンドウでアップロードしたいファイルを選択します。**開く**をクリックして続行します(以下の例はmacOS上のものです。他のオペレーティングシステムでは異なる場合があります)。 - - - -4. ClickHouseは推測したデータ型を表示します。 - - - -5. ***新しいテーブル名***を入力してデータを挿入し、次に**ClickHouseにインポート**ボタンをクリックします。 - - - -6. ClickHouseサービスに接続し、テーブルが正常に作成されたことを確認し、データの準備が整いました!データを視覚化したい場合は、ClickHouseに簡単に接続できる[BIツール](../data-visualization/index.md)をチェックしてみてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md.hash deleted file mode 100644 index 5743c09dc86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/migration/upload-a-csv-file.md.hash +++ /dev/null @@ -1 +0,0 @@ -74711908d184b9ba diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md deleted file mode 100644 index b71bd1d834c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -slug: '/integrations/misc' -keywords: -- 'Retool' -- 'Easypanel' -- 'Splunk' -title: 'ツール' -description: 'ツールセクションのランディングページ' ---- - - - - -# ツール - -| ページ | -|---------------------| -| [ビジュアルインターフェース](/interfaces/third-party/gui) | -| [プロキシ](/interfaces/third-party/proxy) | -| [統合](/interfaces/third-party/integrations) | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md.hash deleted file mode 100644 index 2f01a148cf8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/misc/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -f48cbcc20fa822d1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md deleted file mode 100644 index c24d1fe8f43..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -slug: '/integrations/prometheus' -sidebar_label: 'Prometheus' -title: 'Prometheus' -description: 'Export ClickHouse metrics to Prometheus' -keywords: -- 'prometheus' -- 'grafana' -- 'monitoring' -- 'metrics' -- 'exporter' ---- - -import prometheus_grafana_metrics_endpoint from '@site/static/images/integrations/prometheus-grafana-metrics-endpoint.png'; -import prometheus_grafana_dropdown from '@site/static/images/integrations/prometheus-grafana-dropdown.png'; -import prometheus_grafana_chart from '@site/static/images/integrations/prometheus-grafana-chart.png'; -import prometheus_grafana_alloy from '@site/static/images/integrations/prometheus-grafana-alloy.png'; -import prometheus_grafana_metrics_explorer from '@site/static/images/integrations/prometheus-grafana-metrics-explorer.png'; -import prometheus_datadog from '@site/static/images/integrations/prometheus-datadog.png'; -import Image from '@theme/IdealImage'; - - - -# Prometheus統合 - -この機能は、[Prometheus](https://prometheus.io/)を統合してClickHouse Cloudサービスを監視することをサポートします。Prometheusメトリクスへのアクセスは、[ClickHouse Cloud API](/cloud/manage/api/api-overview)エンドポイントを介して公開されており、ユーザーは安全に接続してメトリクスをPrometheusメトリクスコレクタにエクスポートできます。これらのメトリクスは、GrafanaやDatadogなどのダッシュボードと統合して視覚化することができます。 - -始めるには、[APIキーを生成する](/cloud/manage/openapi)必要があります。 - -## ClickHouse Cloudメトリクスを取得するためのPrometheusエンドポイントAPI {#prometheus-endpoint-api-to-retrieve-clickhouse-cloud-metrics} - -### APIリファレンス {#api-reference} - -| メソッド | パス | 説明 | -| ------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------ | -| GET | `https://api.clickhouse.cloud/v1/organizations/:organizationId/services/:serviceId/prometheus?filtered_metrics=[true \| false]` | 特定のサービスのメトリクスを返します | -| GET | `https://api.clickhouse.cloud/v1/organizations/:organizationId/prometheus?filtered_metrics=[true \| false]` | 組織内のすべてのサービスのメトリクスを返します | - -**リクエストパラメータ** - -| 名称 | 所在地 | 型 | -| ---------------- | ------------------ |------------------ | -| Organization ID | エンドポイントアドレス | uuid | -| Service ID | エンドポイントアドレス | uuid (オプション) | -| filtered_metrics | クエリパラメータ | boolean (オプション) | - - -### 認証 {#authentication} - -基本認証のためにClickHouse Cloud APIキーを使用してください: - -```bash -Username: -Password: -例のリクエスト -export KEY_SECRET= -export KEY_ID= -export ORG_ID= - - -# $ORG_IDのすべてのサービスのために -curl --silent --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations/$ORG_ID/prometheus?filtered_metrics=true - - -# 単一サービスのみに対して -export SERVICE_ID= -curl --silent --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations/$ORG_ID/services/$SERVICE_ID/prometheus?filtered_metrics=true -``` - -### サンプルレスポンス {#sample-response} - -```response - -# HELP ClickHouse_ServiceInfo サービスに関する情報、クラスタの状態とClickHouseのバージョンを含む - -# TYPE ClickHouse_ServiceInfo untyped -ClickHouse_ServiceInfo{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",clickhouse_cluster_status="running",clickhouse_version="24.5",scrape="full"} 1 - - -# HELP ClickHouseProfileEvents_Query 解釈され実行される可能性のあるクエリの数。解析に失敗したクエリやASTサイズ制限、クォータ制限、または同時実行クエリの制限により拒否されたクエリは含まれません。ClickHouse自体が起動した内部クエリを含む場合があります。サブクエリはカウントしません。 - -# TYPE ClickHouseProfileEvents_Query counter -ClickHouseProfileEvents_Query{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",hostname="c-cream-ma-20-server-3vd2ehh-0",instance="c-cream-ma-20-server-3vd2ehh-0",table="system.events"} 6 - - -# HELP ClickHouseProfileEvents_QueriesWithSubqueries サブクエリを含むクエリの数 - -# TYPE ClickHouseProfileEvents_QueriesWithSubqueries counter -ClickHouseProfileEvents_QueriesWithSubqueries{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",hostname="c-cream-ma-20-server-3vd2ehh-0",instance="c-cream-ma-20-server-3vd2ehh-0",table="system.events"} 230 - - -# HELP ClickHouseProfileEvents_SelectQueriesWithSubqueries サブクエリを含むSELECTクエリの数 - -# TYPE ClickHouseProfileEvents_SelectQueriesWithSubqueries counter -ClickHouseProfileEvents_SelectQueriesWithSubqueries{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",hostname="c-cream-ma-20-server-3vd2ehh-0",instance="c-cream-ma-20-server-3vd2ehh-0",table="system.events"} 224 - - -# HELP ClickHouseProfileEvents_FileOpen 開かれたファイルの数。 - -# TYPE ClickHouseProfileEvents_FileOpen counter -ClickHouseProfileEvents_FileOpen{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",hostname="c-cream-ma-20-server-3vd2ehh-0",instance="c-cream-ma-20-server-3vd2ehh-0",table="system.events"} 4157 - - -# HELP ClickHouseProfileEvents_Seek 'lseek'関数が呼び出された回数。 - -# TYPE ClickHouseProfileEvents_Seek counter -ClickHouseProfileEvents_Seek{clickhouse_org="c2ba4799-a76e-456f-a71a-b021b1fafe60",clickhouse_service="12f4a114-9746-4a75-9ce5-161ec3a73c4c",clickhouse_service_name="test service",hostname="c-cream-ma-20-server-3vd2ehh-0",instance="c-cream-ma-20-server-3vd2ehh-0",table="system.events"} 1840 - - -# HELP ClickPipes_Info 常に1に等しい。ラベル"clickpipe_state"には、パイプの現在の状態が含まれています:停止/プロビジョニング/実行中/一時停止/失敗 - -# TYPE ClickPipes_Info gauge -ClickPipes_Info{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent",clickpipe_status="Running"} 1 - - -# HELP ClickPipes_SentEvents_Total ClickHouseに送信されたレコードの総数 - -# TYPE ClickPipes_SentEvents_Total counter -ClickPipes_SentEvents_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 5534250 - - -# HELP ClickPipes_SentBytesCompressed_Total ClickHouseに送信された圧縮バイトの総数。 - -# TYPE ClickPipes_SentBytesCompressed_Total counter -ClickPipes_SentBytesCompressed_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 380837520 -ClickPipes_SentBytesCompressed_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name - - -# HELP ClickPipes_FetchedBytes_Total ソースから取得した未圧縮バイトの総数。 - -# TYPE ClickPipes_FetchedBytes_Total counter -ClickPipes_FetchedBytes_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 873286202 - - -# HELP ClickPipes_Errors_Total データの取り込み時に発生したエラーの総数。 - -# TYPE ClickPipes_Errors_Total counter -ClickPipes_Errors_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 0 - - -# HELP ClickPipes_SentBytes_Total ClickHouseに送信された未圧縮バイトの総数。 - -# TYPE ClickPipes_SentBytes_Total counter -ClickPipes_SentBytes_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 477187967 - - -# HELP ClickPipes_FetchedBytesCompressed_Total ソースから取得した圧縮バイトの総数。データがソースで未圧縮の場合は、ClickPipes_FetchedBytes_Totalに等しい。 - -# TYPE ClickPipes_FetchedBytesCompressed_Total counter -ClickPipes_FetchedBytesCompressed_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 873286202 - - -# HELP ClickPipes_FetchedEvents_Total ソースから取得したレコードの総数。 - -# TYPE ClickPipes_FetchedEvents_Total counter -ClickPipes_FetchedEvents_Total{clickhouse_org="11dfa1ec-767d-43cb-bfad-618ce2aaf959",clickhouse_service="82b83b6a-5568-4a82-aa78-fed9239db83f",clickhouse_service_name="ClickPipes demo instace",clickpipe_id="642bb967-940b-459e-9f63-a2833f62ec44",clickpipe_name="Confluent demo pipe",clickpipe_source="confluent"} 5535376 -``` - -### メトリクスラベル {#metric-labels} - -すべてのメトリクスには以下のラベルがあります: - -| ラベル | 説明 | -|---|---| -|clickhouse_org|組織ID| -|clickhouse_service|サービスID| -|clickhouse_service_name|サービス名| - -ClickPipesの場合、メトリクスには次のラベルも含まれます: - -| ラベル | 説明 | -| --- | --- | -| clickpipe_id | ClickPipe ID | -| clickpipe_name | ClickPipe名 | -| clickpipe_source | ClickPipeソースタイプ | - -### 情報メトリクス {#information-metrics} - -ClickHouse Cloudは、常に値が`1`の`gauge`である特別なメトリクス `ClickHouse_ServiceInfo` を提供します。このメトリクスには、すべての**メトリクスラベル**と次のラベルが含まれています: - -| ラベル | 説明 | -|---|---| -|clickhouse_cluster_status|サービスの状態。次のいずれかの状態です:[ `awaking` \| `running` \| `degraded` \| `idle` \| `stopped` ]| -|clickhouse_version|サービスが実行されているClickHouseサーバーのバージョン| -|scrape|最後のスクレイプの状態を示します。`full`または`partial`のいずれかです。| -|full|最後のメトリクススクレイプ中にエラーが発生しなかったことを示します。| -|partial|最後のメトリクススクレイプ中にいくつかのエラーが発生し、`ClickHouse_ServiceInfo`メトリクスのみが返されたことを示します。| - -メトリクスを取得するリクエストは、一時停止されたサービスを再開することはありません。サービスが`idle`状態の場合、`ClickHouse_ServiceInfo`メトリクスのみが返されます。 - -ClickPipesの場合、同様の`ClickPipes_Info`メトリクスの`gauge`があります。これは、**メトリクスラベル**に加えて次のラベルを含みます: - -| ラベル | 説明 | -| --- | --- | -| clickpipe_state | パイプの現在の状態 | - -### Prometheusの設定 {#configuring-prometheus} - -Prometheusサーバーは、設定されたターゲットから指定された間隔でメトリクスを収集します。以下は、ClickHouse Cloud Prometheusエンドポイントを使用するためのPrometheusサーバーの設定例です: - -```yaml -global: - scrape_interval: 15s - -scrape_configs: - - job_name: "prometheus" - static_configs: - - targets: ["localhost:9090"] - - job_name: "clickhouse" - static_configs: - - targets: ["api.clickhouse.cloud"] - scheme: https - params: - filtered_metrics: ["true"] - metrics_path: "/v1/organizations//prometheus" - basic_auth: - username: - password: - honor_labels: true -``` - -`honor_labels`構成パラメータは、インスタンスラベルが適切に設定されるように`true`に設定する必要があります。さらに、上記の例では`filtered_metrics`は`true`に設定されていますが、ユーザーの好みに基づいて構成する必要があります。 - -## Grafanaとの統合 {#integrating-with-grafana} - -ユーザーは、Grafanaとの統合に2つの主な方法があります: - -- **メトリクスエンドポイント** – このアプローチは、追加のコンポーネントやインフラストラクチャを必要としないという利点があります。この提供はGrafana Cloudに限定され、ClickHouse Cloud PrometheusエンドポイントのURLと認証情報のみが必要です。 -- **Grafana Alloy** - Grafana Alloyは、Grafana Agentの代わりとなるベンダー中立のOpenTelemetry (OTel) Collectorの配布版です。これはスクレイパーとして使用でき、自分のインフラストラクチャにデプロイ可能で、任意のPrometheusエンドポイントと互換性があります。 - -以下では、ClickHouse Cloud Prometheusエンドポイントに特有の詳細に焦点を当てたこれらのオプションの使用に関する手順を提供します。 - -### メトリクスエンドポイントを使用したGrafana Cloud {#grafana-cloud-with-metrics-endpoint} - -- Grafana Cloudアカウントにログインします -- **メトリクスエンドポイント**を選択して新しい接続を追加します -- スクレイプURLをPrometheusエンドポイントを指すように設定し、APIキー/シークレットで接続の基本認証を設定します -- 接続をテストして接続できることを確認します - - - -
    - -設定が完了すると、ダッシュボードを設定するために選択できるメトリクスがドロップダウンに表示されるはずです: - - - -
    - - - -### Alloyを使用したGrafana Cloud {#grafana-cloud-with-alloy} - -Grafana Cloudを使用している場合、GrafanaのAlloyメニューに移動し、画面上の指示に従うことでAlloyをインストールできます: - - - -
    - -これにより、認証トークンを使用してGrafana Cloudエンドポイントにデータを送信するための`prometheus.remote_write`コンポーネントを持つAlloyが設定されます。その後、ユーザーはClickHouse Cloud Prometheusエンドポイントのスクレイパーを含むようにAlloyの設定(Linuxでは`/etc/alloy/config.alloy`にあります)を修正するだけです。 - -以下は、ClickHouse Cloudエンドポイントからメトリクスをスクレイプするための`prometheus.scrape`コンポーネントを持つAlloyの設定例を示します。自動的に設定された`prometheus.remote_write`コンポーネントも含まれています。`basic_auth`構成コンポーネントには、Cloud APIキーIDとシークレットがそれぞれユーザー名とパスワードとして含まれていることに注意してください。 - -```yaml -prometheus.scrape "clickhouse_cloud" { - // デフォルトのリッスンアドレスからメトリクスを収集します。 - targets = [{ - __address__ = "https://api.clickhouse.cloud/v1/organizations/:organizationId/prometheus?filtered_metrics=true", -// 例: https://api.clickhouse.cloud/v1/organizations/97a33bdb-4db3-4067-b14f-ce40f621aae1/prometheus?filtered_metrics=true - }] - - honor_labels = true - - basic_auth { - username = "KEY_ID" - password = "KEY_SECRET" - } - - forward_to = [prometheus.remote_write.metrics_service.receiver] - // metrics_serviceに転送します -} - -prometheus.remote_write "metrics_service" { - endpoint { - url = "https://prometheus-prod-10-prod-us-central-0.grafana.net/api/prom/push" - basic_auth { - username = "" - password = "" - } - } -} -``` - -`honor_labels`構成パラメータは、インスタンスラベルが適切に設定されるように`true`に設定する必要があります。 - -### Alloyを使用したGrafanaのセルフマネージド {#grafana-self-managed-with-alloy} - -Grafanaのセルフマネージドユーザーは、Alloyエージェントのインストール手順を[ここ](https://grafana.com/docs/alloy/latest/get-started/install/)で見つけることができます。ユーザーがAlloyを構成してPrometheusメトリクスを希望の宛先に送信していると仮定します。以下の`prometheus.scrape`コンポーネントは、AlloyがClickHouse Cloudエンドポイントをスクレイプする原因となります。`prometheus.remote_write`がスクレイプされたメトリクスを受け取ることを仮定します。これが存在しない場合は、`forward_to`キーを目的の宛先に調整してください。 - -```yaml -prometheus.scrape "clickhouse_cloud" { - // デフォルトのリッスンアドレスからメトリクスを収集します。 - targets = [{ - __address__ = "https://api.clickhouse.cloud/v1/organizations/:organizationId/prometheus?filtered_metrics=true", -// 例: https://api.clickhouse.cloud/v1/organizations/97a33bdb-4db3-4067-b14f-ce40f621aae1/prometheus?filtered_metrics=true - }] - - honor_labels = true - - basic_auth { - username = "KEY_ID" - password = "KEY_SECRET" - } - - forward_to = [prometheus.remote_write.metrics_service.receiver] - // metrics_serviceに転送します。お好みの受信者に合わせてください -} -``` - -設定が完了すると、メトリクスエクスプローラーでClickHouse関連のメトリクスが表示されるはずです: - - - -
    - -`honor_labels`構成パラメータは、インスタンスラベルが適切に設定されるように`true`に設定する必要があります。 - -## Datadogとの統合 {#integrating-with-datadog} - -Datadogの[エージェント](https://docs.datadoghq.com/agent/?tab=Linux)と[OpenMetrics統合](https://docs.datadoghq.com/integrations/openmetrics/)を使用して、ClickHouse Cloudエンドポイントからメトリクスを収集できます。以下は、このエージェントと統合のシンプルな設定例です。ただし、最も重要なメトリクスだけを選択したい場合があります。以下の包括的な例では、Datadogがカスタムメトリクスと見なす多くのメトリクス・インスタンスの組み合わせがエクスポートされます。 - -```yaml -init_config: - -instances: - - openmetrics_endpoint: 'https://api.clickhouse.cloud/v1/organizations/97a33bdb-4db3-4067-b14f-ce40f621aae1/prometheus?filtered_metrics=true' - namespace: 'clickhouse' - metrics: - - '^ClickHouse.*' - username: username - password: password -``` - -
    - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md.hash deleted file mode 100644 index b7eb66b4077..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/prometheus.md.hash +++ /dev/null @@ -1 +0,0 @@ -7f0921575846cd6b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/_category_.yml b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/_category_.yml deleted file mode 100644 index 07b3594c601..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 300 -label: 'SQL clients' -collapsible: true -collapsed: true -link: - type: generated-index - title: SQL clients - slug: /integrations/sql-clients diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md deleted file mode 100644 index a4588021e2a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -sidebar_label: 'DataGrip' -slug: '/integrations/datagrip' -description: 'DataGripは、ボックスからClickHouseをサポートするデータベースIDEです。' -title: 'Connecting DataGrip to ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import datagrip_1 from '@site/static/images/integrations/sql-clients/datagrip-1.png'; -import datagrip_5 from '@site/static/images/integrations/sql-clients/datagrip-5.png'; -import datagrip_6 from '@site/static/images/integrations/sql-clients/datagrip-6.png'; -import datagrip_7 from '@site/static/images/integrations/sql-clients/datagrip-7.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# DataGripをClickHouseに接続する - - - -## 1. DataGripを開始またはダウンロードする {#start-or-download-datagrip} - -DataGripは https://www.jetbrains.com/datagrip/ で入手できます。 - -## 2. 接続情報を集める {#1-gather-your-connection-details} - - -## 3. ClickHouseドライバーを読み込む {#2-load-the-clickhouse-driver} - -1. DataGripを起動し、**データソース**タブの**データソースとドライバー**ダイアログで**+**アイコンをクリックします。 - - - - **ClickHouse**を選択します。 - - :::tip - 接続を確立する際に、順序が変更されるため、ClickHouseがリストの上部にない場合があります。 - ::: - - - -- **ドライバー**タブに切り替えてClickHouseドライバーを読み込みます。 - - DataGripはダウンロードサイズを最小限に抑えるために、ドライバーを同梱していません。**ドライバー**タブで、**完全サポート**リストから**ClickHouse**を選択し、**+**アイコンを展開します。**提供されたドライバー**オプションから**最新の安定版**ドライバーを選択します: - - - -## 4. ClickHouseに接続する {#3-connect-to-clickhouse} - -- データベース接続情報を指定し、**接続テスト**をクリックします: - - 最初のステップで接続情報を集めたら、ホストURL、ポート、ユーザー名、パスワード、データベース名を入力し、接続のテストを行います。 - - :::tip - DataGripダイアログの**HOST**エントリーは実際にはURLです。以下の画像を参照してください。 - - JDBC URL設定の詳細については、[ClickHouse JDBCドライバー](https://github.com/ClickHouse/clickhouse-java)リポジトリを参照してください。 - ::: - - - -## もっと学ぶ {#learn-more} - -DataGripに関する詳細情報はDataGripドキュメントを訪れてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md.hash deleted file mode 100644 index beee0ecead3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md.hash +++ /dev/null @@ -1 +0,0 @@ -505349095ac6416d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md deleted file mode 100644 index 77b9c6b9e04..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -slug: '/integrations/dbeaver' -sidebar_label: 'DBeaver' -description: 'DBeaver はマルチプラットフォームのデータベースツールです。' -title: 'ClickHouse への DBeaver の接続' ---- - -import Image from '@theme/IdealImage'; -import dbeaver_add_database from '@site/static/images/integrations/sql-clients/dbeaver-add-database.png'; -import dbeaver_host_port from '@site/static/images/integrations/sql-clients/dbeaver-host-port.png'; -import dbeaver_use_ssl from '@site/static/images/integrations/sql-clients/dbeaver-use-ssl.png'; -import dbeaver_test_connection from '@site/static/images/integrations/sql-clients/dbeaver-test-connection.png'; -import dbeaver_download_driver from '@site/static/images/integrations/sql-clients/dbeaver-download-driver.png'; -import dbeaver_sql_editor from '@site/static/images/integrations/sql-clients/dbeaver-sql-editor.png'; -import dbeaver_query_log_select from '@site/static/images/integrations/sql-clients/dbeaver-query-log-select.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# Connect DBeaver to ClickHouse - - - -DBeaver は複数のオファリングで利用可能です。このガイドでは [DBeaver Community](https://dbeaver.io/) を使用します。さまざまなオファリングと機能については [こちら](https://dbeaver.com/edition/) をご覧ください。 DBeaverはJDBCを使用してClickHouseに接続します。 - -:::note -ClickHouseの `Nullable` カラムの改善されたサポートのために、DBeaver バージョン 23.1.0 以上を使用してください。 -::: - -## 1. ClickHouseの詳細を集める {#1-gather-your-clickhouse-details} - -DBeaverは、HTTP(S)を介してJDBCを使用してClickHouseに接続します。必要な情報は以下の通りです: - -- エンドポイント -- ポート番号 -- ユーザー名 -- パスワード - -## 2. DBeaverをダウンロードする {#2-download-dbeaver} - -DBeaverは https://dbeaver.io/download/ からダウンロード可能です。 - -## 3. データベースを追加する {#3-add-a-database} - -- **Database > New Database Connection** メニューまたは **Database Navigator** の **New Database Connection** アイコンを使用して **Connect to a database** ダイアログを開きます: - - - -- **Analytical** を選択し、次に **ClickHouse** を選択します: - -- JDBC URLを構築します。**Main** タブでホスト、ポート、ユーザー名、パスワード、データベースを設定します: - - - -- デフォルトでは **SSL > Use SSL** プロパティは未設定ですが、ClickHouse Cloud またはHTTPポートでSSLを必要とするサーバーに接続する場合は、**SSL > Use SSL** をオンにします: - - - -- 接続をテストします: - - - -DBeaverがClickHouseドライバがインストールされていないことを検出すると、ダウンロードするよう提案します: - - - -- ドライバをダウンロードした後、再度接続を**テスト**します: - - - -## 4. ClickHouseにクエリを実行する {#4-query-clickhouse} - -クエリエディタを開いてクエリを実行します。 - -- 接続を右クリックし、**SQL Editor > Open SQL Script** を選択してクエリエディタを開きます: - - - -- `system.query_log` に対するサンプルクエリ: - - - -## 次のステップ {#next-steps} - -[DBeaver wiki](https://github.com/dbeaver/dbeaver/wiki) を参照してDBeaverの機能について学び、[ClickHouse documentation](https://clickhouse.com/docs) を参照してClickHouseの機能について学んでください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md.hash deleted file mode 100644 index 484c5da5de1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md.hash +++ /dev/null @@ -1 +0,0 @@ -0f970800924a82e2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md deleted file mode 100644 index ab2c0efa47d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -sidebar_label: 'DbVisualizer' -slug: '/integrations/dbvisualizer' -description: 'DbVisualizerはClickHouseに対する拡張サポートを備えたデータベースツールです。' -title: 'Connecting DbVisualizer to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import dbvisualizer_driver_manager from '@site/static/images/integrations/sql-clients/dbvisualizer-driver-manager.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connecting DbVisualizer to ClickHouse - - - -## Start or download DbVisualizer {#start-or-download-dbvisualizer} - -DbVisualizerはここから入手可能です: https://www.dbvis.com/download/ - -## 1. Gather your connection details {#1-gather-your-connection-details} - - - -## 2. Built-in JDBC driver management {#2-built-in-jdbc-driver-management} - -DbVisualizerにはClickHouse用の最新のJDBCドライバが含まれています。最新のリリースや過去のバージョンにポイントする完全なJDBCドライバ管理機能が組み込まれています。 - - - -## 3. Connect to ClickHouse {#3-connect-to-clickhouse} - -DbVisualizerでデータベースに接続するには、まずデータベース接続を作成して設定する必要があります。 - -1. **Database->Create Database Connection** から新しい接続を作成し、ポップアップメニューからデータベース用のドライバを選択します。 - -2. 新しい接続のために **Object View** タブが開かれます。 - -3. **Name** フィールドに接続の名前を入力し、オプションで **Notes** フィールドに接続の説明を入力します。 - -4. **Database Type** は **Auto Detect** のままにします。 - -5. **Driver Type** で選択したドライバに緑のチェックマークが付いていれば、使用可能です。チェックマークが付いていない場合は、**Driver Manager** でドライバを設定する必要があります。 - -6. 残りのフィールドにデータベースサーバに関する情報を入力します。 - -7. **Ping Server** ボタンをクリックして指定されたアドレスとポートにネットワーク接続が確立できるか確認します。 - -8. Ping Serverの結果がサーバに到達できることを示している場合は、**Connect** をクリックしてデータベースサーバに接続します。 - -:::tip -データベースへの接続に問題がある場合は、[Fixing Connection Issues](https://confluence.dbvis.com/display/UG231/Fixing+Connection+Issues)を参照してください。 - -## Learn more {#learn-more} - -DbVisualizerに関する詳細情報は、[DbVisualizer documentation](https://confluence.dbvis.com/display/UG231/Users+Guide)をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md.hash deleted file mode 100644 index 37ddb31fd12..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbvisualizer.md.hash +++ /dev/null @@ -1 +0,0 @@ -97f2b4bfdf2c923c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md deleted file mode 100644 index d9f52ddd159..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -slug: '/integrations/sql-clients/' -description: 'ClickHouse SQLクライアントの概要ページ。' -keywords: -- 'integrations' -- 'DataGrip' -- 'DBeaver' -- 'DbVisualizer' -- 'Jupyter Notebooks' -- 'QStudio' -- 'TABLUM.IO' -- 'marimo' -title: 'SQLクライアント統合' ---- - - - - -# SQLクライアント統合 - -このセクションでは、ClickHouseをさまざまな一般的なデータベース管理、分析、視覚化ツールに統合する方法について説明します。 - -| ツール | 説明 | -|-----------------------------------------------------|-----------------------------------------------------------| -| [DataGrip](/integrations/datagrip) | パワフルなデータベースIDE | -| [DBeaver](/integrations/dbeaver) | データベース管理および開発ツール | -| [DbVisualizer](/integrations/dbvisualizer) | 開発者、DBA、アナリスト向けのデータベース管理ツール | -| [Jupyter Notebooks](/integrations/jupysql) | コード、視覚化、およびテキスト用のインタラクティブノート | -| [QStudio](/integrations/qstudio) | 無料のオープンソースSQL GUIクライアント | -| [TABLUM.IO](/integrations/tablumio) | クラウドベースのデータ視覚化プラットフォーム | -| [marimo](/integrations/marimo) | SQLを内蔵したPython用のオープンソースリアクティブノート | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md.hash deleted file mode 100644 index 91de3b307c6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -0988107d1497bcbc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md deleted file mode 100644 index deeb490dd73..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md +++ /dev/null @@ -1,417 +0,0 @@ ---- -slug: '/integrations/jupysql' -sidebar_label: 'Jupyterノートブック' -description: 'JupySQLはJupyter向けのマルチプラットフォームデータベースツールです。' -title: 'ClickHouseとJupySQLの使用方法' ---- - -import Image from '@theme/IdealImage'; -import jupysql_plot_1 from '@site/static/images/integrations/sql-clients/jupysql-plot-1.png'; -import jupysql_plot_2 from '@site/static/images/integrations/sql-clients/jupysql-plot-2.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ClickHouseとのJupySQLの使用 - - - -このガイドでは、ClickHouseとの統合を示します。 - -JupySQLを使用して、ClickHouse上でクエリを実行します。 -データが読み込まれた後、SQLプロットを介して可視化します。 - -JupySQLとClickHouseの統合は、clickhouse_sqlalchemyライブラリの使用によって可能となります。このライブラリは、両システム間の簡単な通信を可能にし、ユーザーがClickHouseに接続し、SQL方言を指定できるようにします。接続後、ユーザーはClickHouseのネイティブUIまたはJupyterノートブックから直接SQLクエリを実行できます。 - -```python - -# 必要なパッケージをインストール -%pip install --quiet jupysql clickhouse_sqlalchemy -``` - - 注意: 更新されたパッケージを使用するには、カーネルを再起動する必要があります。 - -```python -import pandas as pd -from sklearn_evaluation import plot - - -# SQLセルを作成するためにjupysql Jupyter拡張をインポート -%load_ext sql -%config SqlMagic.autocommit=False -``` - -**次のステージに進むために、Clickhouseが起動し、接続可能であることを確認する必要があります。ローカル版またはクラウド版のいずれかを使用できます。** - -**注意:** 接続するインスタンスの種類に応じて接続文字列を調整する必要があります (url、user、password)。以下の例ではローカルインスタンスを使用しています。詳細については、[このガイド](/getting-started/quick-start)を参照してください。 - -```python -%sql clickhouse://default:@localhost:8123/default -``` - -```sql -%%sql -CREATE TABLE trips -( - `trip_id` UInt32, - `vendor_id` Enum8('1' = 1, '2' = 2, '3' = 3, '4' = 4, 'CMT' = 5, 'VTS' = 6, 'DDS' = 7, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14, '' = 15), - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_date` Date, - `dropoff_datetime` DateTime, - `store_and_fwd_flag` UInt8, - `rate_code_id` UInt8, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `fare_amount` Float32, - `extra` Float32, - `mta_tax` Float32, - `tip_amount` Float32, - `tolls_amount` Float32, - `ehail_fee` Float32, - `improvement_surcharge` Float32, - `total_amount` Float32, - `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), - `trip_type` UInt8, - `pickup` FixedString(25), - `dropoff` FixedString(25), - `cab_type` Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), - `pickup_nyct2010_gid` Int8, - `pickup_ctlabel` Float32, - `pickup_borocode` Int8, - `pickup_ct2010` String, - `pickup_boroct2010` String, - `pickup_cdeligibil` String, - `pickup_ntacode` FixedString(4), - `pickup_ntaname` String, - `pickup_puma` UInt16, - `dropoff_nyct2010_gid` UInt8, - `dropoff_ctlabel` Float32, - `dropoff_borocode` UInt8, - `dropoff_ct2010` String, - `dropoff_boroct2010` String, - `dropoff_cdeligibil` String, - `dropoff_ntacode` FixedString(4), - `dropoff_ntaname` String, - `dropoff_puma` UInt16 -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(pickup_date) -ORDER BY pickup_datetime; -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - -
    - - -
    - - - - -```sql -%%sql -INSERT INTO trips -SELECT * FROM s3( - 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_{1..2}.gz', - 'TabSeparatedWithNames', " - `trip_id` UInt32, - `vendor_id` Enum8('1' = 1, '2' = 2, '3' = 3, '4' = 4, 'CMT' = 5, 'VTS' = 6, 'DDS' = 7, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14, '' = 15), - `pickup_date` Date, - `pickup_datetime` DateTime, - `dropoff_date` Date, - `dropoff_datetime` DateTime, - `store_and_fwd_flag` UInt8, - `rate_code_id` UInt8, - `pickup_longitude` Float64, - `pickup_latitude` Float64, - `dropoff_longitude` Float64, - `dropoff_latitude` Float64, - `passenger_count` UInt8, - `trip_distance` Float64, - `fare_amount` Float32, - `extra` Float32, - `mta_tax` Float32, - `tip_amount` Float32, - `tolls_amount` Float32, - `ehail_fee` Float32, - `improvement_surcharge` Float32, - `total_amount` Float32, - `payment_type` Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), - `trip_type` UInt8, - `pickup` FixedString(25), - `dropoff` FixedString(25), - `cab_type` Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), - `pickup_nyct2010_gid` Int8, - `pickup_ctlabel` Float32, - `pickup_borocode` Int8, - `pickup_ct2010` String, - `pickup_boroct2010` String, - `pickup_cdeligibil` String, - `pickup_ntacode` FixedString(4), - `pickup_ntaname` String, - `pickup_puma` UInt16, - `dropoff_nyct2010_gid` UInt8, - `dropoff_ctlabel` Float32, - `dropoff_borocode` UInt8, - `dropoff_ct2010` String, - `dropoff_boroct2010` String, - `dropoff_cdeligibil` String, - `dropoff_ntacode` FixedString(4), - `dropoff_ntaname` String, - `dropoff_puma` UInt16 -") SETTINGS input_format_try_infer_datetimes = 0 -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - - - - -
    - - - - -```python -%sql SELECT count() FROM trips limit 5; -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - - - - - - - - -
    count()
    1999657
    - - - - -```python -%sql SELECT DISTINCT(pickup_ntaname) FROM trips limit 5; -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - - - - - - - - - - - - - - - - - - - - -
    pickup_ntaname
    Morningside Heights
    Hudson Yards-Chelsea-Flatiron-Union Square
    Midtown-Midtown South
    SoHo-Tribeca-Civic Center-Little Italy
    Murray Hill-Kips Bay
    - - - - -```python -%sql SELECT round(avg(tip_amount), 2) FROM trips -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - - - - - - - - -
    round(avg(tip_amount), 2)
    1.68
    - - - - -```sql -%%sql -SELECT - passenger_count, - ceil(avg(total_amount),2) AS average_total_amount -FROM trips -GROUP BY passenger_count -``` - - * clickhouse://default:***@localhost:8123/default - 完了。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    passenger_countaverage_total_amount
    022.69
    115.97
    217.15
    316.76
    417.33
    516.35
    616.04
    759.8
    836.41
    99.81
    - - - - -```sql -%%sql -SELECT - pickup_date, - pickup_ntaname, - SUM(1) AS number_of_trips -FROM trips -GROUP BY pickup_date, pickup_ntaname -ORDER BY pickup_date ASC -limit 5; -``` - -* clickhouse://default:***@localhost:8123/default -完了。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    pickup_datepickup_ntanamenumber_of_trips
    2015-07-01Bushwick North2
    2015-07-01Brighton Beach1
    2015-07-01Briarwood-Jamaica Hills3
    2015-07-01Williamsburg1
    2015-07-01Queensbridge-Ravenswood-Long Island City9
    - -```python - -# %sql DESCRIBE trips; -``` - -```python - -# %sql SELECT DISTINCT(trip_distance) FROM trips limit 50; -``` - -```sql -%%sql --save short-trips --no-execute -SELECT * -FROM trips -WHERE trip_distance < 6.3 -``` - - * clickhouse://default:***@localhost:8123/default - 実行をスキップ... - -```python -%sqlplot histogram --table short-trips --column trip_distance --bins 10 --with short-trips -``` - -```response - -``` -短距離のデータセットからの10個のビンを持つ旅行距離の分布を示すヒストグラム - - -```python -ax = %sqlplot histogram --table short-trips --column trip_distance --bins 50 --with short-trips -ax.grid() -ax.set_title("6.3未満の旅行距離") -_ = ax.set_xlabel("旅行距離") -``` - -6.3未満の旅行距離に関連する50個のビンとグリッドを持つヒストグラム diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md.hash deleted file mode 100644 index 54b7d20ae3d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/jupysql.md.hash +++ /dev/null @@ -1 +0,0 @@ -89676ae6d5073306 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md deleted file mode 100644 index 6fe48306ab8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -slug: '/integrations/marimo' -sidebar_label: 'marimo' -description: 'marimoはデータとやり取りするための次世代Pythonノートブックです。' -title: 'ClickHouseとmarimoの使用方法' ---- - -import Image from '@theme/IdealImage'; -import marimo_connect from '@site/static/images/integrations/sql-clients/marimo/clickhouse-connect.gif'; -import add_db_panel from '@site/static/images/integrations/sql-clients/marimo/panel-arrow.png'; -import add_db_details from '@site/static/images/integrations/sql-clients/marimo/add-db-details.png'; -import run_cell from '@site/static/images/integrations/sql-clients/marimo/run-cell.png'; -import choose_sql_engine from '@site/static/images/integrations/sql-clients/marimo/choose-sql-engine.png'; -import results from '@site/static/images/integrations/sql-clients/marimo/results.png'; -import dropdown_cell_chart from '@site/static/images/integrations/sql-clients/marimo/dropdown-cell-chart.png'; -import run_app_view from '@site/static/images/integrations/sql-clients/marimo/run-app-view.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ClickHouseを使用したmarimoの利用 - - - -[marimo](https://marimo.io/)は、SQLが組み込まれたオープンソースのリアクティブノートブックです。セルを実行したりUI要素と対話したりすると、marimoは影響を受けるセルを自動的に実行(または古くなったものとしてマーク)し、コードと出力を一貫性を持たせ、バグが発生する前に防ぎます。すべてのmarimoノートブックは純粋なPythonとして保存され、スクリプトとして実行可能で、アプリケーションとしてデプロイ可能です。 - -ClickHouseに接続 - -## 1. SQLサポートのあるmarimoのインストール {#install-marimo-sql} - -```shell -pip install "marimo[sql]" clickhouse_connect -marimo edit clickhouse_demo.py -``` -これにより、localhostで実行されているウェブブラウザが開きます。 - -## 2. ClickHouseへの接続 {#connect-to-clickhouse} - -marimoエディタの左側にあるデータソースパネルに移動し、「データベースを追加」をクリックします。 - -新しいデータベースを追加 - -データベースの詳細を入力するように求められます。 - -データベースの詳細を入力 - -その後、接続を確立するために実行できるセルが表示されます。 - -ClickHouseに接続するためにセルを実行 - -## 3. SQLを実行 {#run-sql} - -接続が設定されると、新しいSQLセルを作成し、clickhouseエンジンを選択できます。 - -SQLエンジンを選択 - -このガイドでは、New York Taxiデータセットを使用します。 - -```sql -CREATE TABLE trips ( - trip_id UInt32, - pickup_datetime DateTime, - dropoff_datetime DateTime, - pickup_longitude Nullable(Float64), - pickup_latitude Nullable(Float64), - dropoff_longitude Nullable(Float64), - dropoff_latitude Nullable(Float64), - passenger_count UInt8, - trip_distance Float32, - fare_amount Float32, - extra Float32, - tip_amount Float32, - tolls_amount Float32, - total_amount Float32, - payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4, 'UNK' = 5), - pickup_ntaname LowCardinality(String), - dropoff_ntaname LowCardinality(String) -) -ENGINE = MergeTree -PRIMARY KEY (pickup_datetime, dropoff_datetime); -``` - -```sql -INSERT INTO trips -SELECT - trip_id, - pickup_datetime, - dropoff_datetime, - pickup_longitude, - pickup_latitude, - dropoff_longitude, - dropoff_latitude, - passenger_count, - trip_distance, - fare_amount, - extra, - tip_amount, - tolls_amount, - total_amount, - payment_type, - pickup_ntaname, - dropoff_ntaname -FROM gcs( - 'https://storage.googleapis.com/clickhouse-public-datasets/nyc-taxi/trips_0.gz', - 'TabSeparatedWithNames' -); -``` - -```sql -SELECT * FROM trips LIMIT 1000; -``` - -データフレーム内の結果 - -これで、データフレーム内の結果を表示できるようになります。特定のピックアップ地点からの最も高額なドロップオフを視覚化したいと思います。marimoはこれをサポートするためにいくつかのUIコンポーネントを提供しています。私はドロップダウンを使用して地点を選択し、altairを使用してチャートを作成します。 - -ドロップダウン、テーブルおよびチャートの組み合わせ - -marimoのリアクティブ実行モデルはSQLクエリにまで拡張されるため、SQLの変更は自動的に依存するセルの下流計算をトリガーします(またはオプションとして、高価な計算のためにセルを古くなったものとしてマークします)。そのため、クエリが更新されるとチャートとテーブルが変更されます。 - -アプリビューを切り替えてデータを探索するためのクリーンインターフェースを持つこともできます。 - -アプリビューを実行 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md.hash deleted file mode 100644 index 8db2d9b61bf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md.hash +++ /dev/null @@ -1 +0,0 @@ -45cc6713683b1123 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md deleted file mode 100644 index 951469c4141..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -slug: '/integrations/qstudio' -sidebar_label: 'QStudio' -description: 'QStudio is a free SQL tool.' -title: 'Connect QStudio to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import qstudio_add_connection from '@site/static/images/integrations/sql-clients/qstudio-add-connection.png'; -import qstudio_running_query from '@site/static/images/integrations/sql-clients/qstudio-running-query.png'; -import Image from '@theme/IdealImage'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connect QStudio to ClickHouse - - - -QStudioは無料のSQL GUIで、SQLスクリプトの実行、テーブルの簡単なブラウジング、チャートの作成、結果のエクスポートを可能にします。すべてのオペレーティングシステムおよびすべてのデータベースで動作します。 - -QStudioはJDBCを使用してClickHouseに接続します。 - -## 1. Gather your ClickHouse details {#1-gather-your-clickhouse-details} - -QStudioはHTTP(S)経由でJDBCを使用してClickHouseに接続します。必要な情報は次のとおりです: - -- エンドポイント -- ポート番号 -- ユーザー名 -- パスワード - - - -## 2. Download QStudio {#2-download-qstudio} - -QStudioは https://www.timestored.com/qstudio/download/ から入手できます。 - -## 3. Add a database {#3-add-a-database} - -- QStudioを初めて開いたとき、メニューオプションの**サーバー->サーバーの追加**をクリックするか、ツールバーのサーバーの追加ボタンをクリックします。 -- 次に、詳細を設定します: - -QStudioデータベース接続設定画面でClickHouse接続設定を表示 - -1. サーバータイプ: Clickhouse.com -2. ホストには必ずhttps://を含めてください - ホスト: https://abc.def.clickhouse.cloud - ポート: 8443 -3. ユーザー名: default - パスワード: `XXXXXXXXXXX` - 4. 追加をクリック - -QStudioがClickHouseのJDBCドライバーがインストールされていないことを検出した場合、ダウンロードを提案します: - -## 4. Query ClickHouse {#4-query-clickhouse} - -- クエリエディタを開いてクエリを実行します。クエリを実行する方法は次のとおりです: -- Ctrl + e - ハイライトされたテキストを実行 -- Ctrl + Enter - 現在の行を実行 - -- 例のクエリ: - -QStudioインターフェースがClickHouseデータベースに対するサンプルSQLクエリの実行を表示 - -## Next Steps {#next-steps} - -[QStudio](https://www.timestored.com/qstudio)を参照してQStudioの機能について学び、[ClickHouse documentation](https://clickhouse.com/docs)を参照してClickHouseの機能について学びましょう。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md.hash deleted file mode 100644 index da0f88f0827..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/qstudio.md.hash +++ /dev/null @@ -1 +0,0 @@ -af61f73060035d8f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md deleted file mode 100644 index 23f9ece0b7f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md +++ /dev/null @@ -1,407 +0,0 @@ ---- -sidebar_label: 'SQLコンソール' -sidebar_position: 1 -title: 'SQLコンソール' -slug: '/integrations/sql-clients/sql-console' -description: 'SQLコンソールについて学ぶ' ---- - -import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; -import Image from '@theme/IdealImage'; -import table_list_and_schema from '@site/static/images/cloud/sqlconsole/table-list-and-schema.png'; -import view_columns from '@site/static/images/cloud/sqlconsole/view-columns.png'; -import abc from '@site/static/images/cloud/sqlconsole/abc.png'; -import inspecting_cell_content from '@site/static/images/cloud/sqlconsole/inspecting-cell-content.png'; -import sort_descending_on_column from '@site/static/images/cloud/sqlconsole/sort-descending-on-column.png'; -import filter_on_radio_column_equal_gsm from '@site/static/images/cloud/sqlconsole/filter-on-radio-column-equal-gsm.png'; -import add_more_filters from '@site/static/images/cloud/sqlconsole/add-more-filters.png'; -import filtering_and_sorting_together from '@site/static/images/cloud/sqlconsole/filtering-and-sorting-together.png'; -import create_a_query_from_sorts_and_filters from '@site/static/images/cloud/sqlconsole/create-a-query-from-sorts-and-filters.png'; -import creating_a_query from '@site/static/images/cloud/sqlconsole/creating-a-query.png'; -import run_selected_query from '@site/static/images/cloud/sqlconsole/run-selected-query.png'; -import run_at_cursor_2 from '@site/static/images/cloud/sqlconsole/run-at-cursor-2.png'; -import run_at_cursor from '@site/static/images/cloud/sqlconsole/run-at-cursor.png'; -import cancel_a_query from '@site/static/images/cloud/sqlconsole/cancel-a-query.png'; -import sql_console_save_query from '@site/static/images/cloud/sqlconsole/sql-console-save-query.png'; -import sql_console_rename from '@site/static/images/cloud/sqlconsole/sql-console-rename.png'; -import sql_console_share from '@site/static/images/cloud/sqlconsole/sql-console-share.png'; -import sql_console_edit_access from '@site/static/images/cloud/sqlconsole/sql-console-edit-access.png'; -import sql_console_add_team from '@site/static/images/cloud/sqlconsole/sql-console-add-team.png'; -import sql_console_edit_member from '@site/static/images/cloud/sqlconsole/sql-console-edit-member.png'; -import sql_console_access_queries from '@site/static/images/cloud/sqlconsole/sql-console-access-queries.png'; -import search_hn from '@site/static/images/cloud/sqlconsole/search-hn.png'; -import match_in_body from '@site/static/images/cloud/sqlconsole/match-in-body.png'; -import pagination from '@site/static/images/cloud/sqlconsole/pagination.png'; -import pagination_nav from '@site/static/images/cloud/sqlconsole/pagination-nav.png'; -import download_as_csv from '@site/static/images/cloud/sqlconsole/download-as-csv.png'; -import tabular_query_results from '@site/static/images/cloud/sqlconsole/tabular-query-results.png'; -import switch_from_query_to_chart from '@site/static/images/cloud/sqlconsole/switch-from-query-to-chart.png'; -import trip_total_by_week from '@site/static/images/cloud/sqlconsole/trip-total-by-week.png'; -import bar_chart from '@site/static/images/cloud/sqlconsole/bar-chart.png'; -import change_from_bar_to_area from '@site/static/images/cloud/sqlconsole/change-from-bar-to-area.png'; -import update_query_name from '@site/static/images/cloud/sqlconsole/update-query-name.png'; -import update_subtitle_etc from '@site/static/images/cloud/sqlconsole/update-subtitle-etc.png'; -import adjust_axis_scale from '@site/static/images/cloud/sqlconsole/adjust-axis-scale.png'; -import give_a_query_a_name from '@site/static/images/cloud/sqlconsole/give-a-query-a-name.png' -import save_the_query from '@site/static/images/cloud/sqlconsole/save-the-query.png' - - -# SQLコンソール - -SQLコンソールは、ClickHouse Cloudでデータベースを探索し、クエリを実行する最も迅速で簡単な方法です。SQLコンソールを使用して、以下のことができます。 - -- ClickHouse Cloud Servicesに接続する -- テーブルデータを表示、フィルタリング、並べ替える -- クエリを実行し、結果データを数回のクリックで視覚化する -- チームメンバーとクエリを共有し、より効果的にコラボレーションする - -## テーブルの探索 {#exploring-tables} - -### テーブルリストとスキーマ情報の表示 {#viewing-table-list-and-schema-info} - -ClickHouseインスタンスに含まれるテーブルの概要は、左側のサイドバーエリアにあります。左側のバーの上部にあるデータベースセレクターを使用して、特定のデータベース内のテーブルを表示します。 - -左側のサイドバーにデータベーステーブルが表示されているテーブルリストとスキーマビュー - -リスト内のテーブルは、カラムやタイプを表示するために展開することもできます。 - -カラム名とデータ型を表示する拡張テーブルのビュー - -### テーブルデータの探索 {#exploring-table-data} - -リスト内のテーブルをクリックすると、新しいタブで開きます。テーブルビューでは、データを簡単に表示、選択、コピーできます。Microsoft ExcelやGoogle Sheetsなどのスプレッドシートアプリケーションにコピー&ペーストするときに構造とフォーマットが保持されることに注意してください。フッターのナビゲーションを使用して、テーブルデータのページ間を切り替えられます(30行ずつページネーションされています)。 - -選択してコピーできるデータを表示するテーブルビュー - -### セルデータの検査 {#inspecting-cell-data} - -セルインスペクターツールを使用して、単一のセルに含まれる大量のデータを表示できます。これを開くには、セルを右クリックして「セルを検査」を選択します。セルインスペクタの内容は、インスペクタの内容の右上隅にあるコピーアイコンをクリックすることでコピーできます。 - -選択されたセルの内容を表示するセルインスペクターダイアログ - -## テーブルのフィルタリングとソート {#filtering-and-sorting-tables} - -### テーブルのソート {#sorting-a-table} - -SQLコンソールでテーブルをソートするには、テーブルを開き、ツールバーの「ソート」ボタンを選択します。このボタンをクリックすると、ソートを設定するためのメニューが開きます。ソートするカラムと、ソートの順序(昇順または降順)を設定できます。「適用」を選択するか、Enterを押してテーブルをソートします。 - -カラムの降順ソート設定を示すソートダイアログ - -SQLコンソールでは、テーブルに複数のソートを追加することもできます。もう一度「ソート」ボタンをクリックして、別のソートを追加します。注意:ソートは、ソートペインに表示される順序(上から下)で適用されます。ソートを削除するには、単にそのソートの横にある「x」ボタンをクリックします。 - -### テーブルのフィルタリング {#filtering-a-table} - -SQLコンソールでテーブルをフィルタリングするには、テーブルを開き、「フィルター」ボタンを選択します。ソートと同様に、このボタンをクリックすると、フィルタを設定するためのメニューが開きます。フィルタに使用するカラムを選択し、必要な基準を選択できます。SQLコンソールは、カラムに含まれるデータのタイプに応じたフィルタオプションを賢く表示します。 - -GSMに等しいラジオカラムをフィルタリングする設定を示すフィルタダイアログ - -フィルタが満足いくものであれば、「適用」を選択してデータをフィルタリングできます。また、以下に示すように追加のフィルタを追加することもできます。 - -2000より大きい範囲で追加のフィルタを追加する方法を示すダイアログ - -ソート機能と同様に、フィルタを削除するにはフィルタの横にある「x」ボタンをクリックします。 - -### フィルタリングとソートの同時適用 {#filtering-and-sorting-together} - -SQLコンソールでは、テーブルを同時にフィルタリングおよびソートすることができます。これを行うには、上記の手順で必要なすべてのフィルタとソートを追加し、「適用」ボタンをクリックします。 - -同時にフィルタリングとソートが適用されているインターフェース - -### フィルタおよびソートからクエリを作成 {#creating-a-query-from-filters-and-sorts} - -SQLコンソールでは、フィルタとソートを1クリックでクエリに変換できます。「クエリを作成」ボタンをツールバーから選択し、選択したソートおよびフィルタのパラメータを使用するだけです。「クエリを作成」をクリックすると、新しいクエリタブが開き、テーブルビューに含まれるデータに対応するSQLコマンドが事前に入力されます。 - -フィルタとソートからSQLを生成するCreate Queryボタンを示すインターフェース - -:::note -「クエリを作成」機能を使用する際にフィルタやソートは必須ではありません。 -::: - -SQLコンソールでのクエリについて詳しく学ぶには、(link) クエリ文書をお読みください。 - -## クエリの作成と実行 {#creating-and-running-a-query} - -### クエリの作成 {#creating-a-query} - -SQLコンソールで新しいクエリを作成する方法は2つあります。 - -- タブバーの「+」ボタンをクリックする -- 左側のサイドバーのクエリリストから「新しいクエリ」ボタンを選択する - -+ボタンまたは新しいクエリボタンを使用して新しいクエリを作成する方法を示すインターフェース - -### クエリの実行 {#running-a-query} - -クエリを実行するには、SQLエディタにSQLコマンドをタイプし、「実行」ボタンをクリックするか、ショートカット `cmd / ctrl + enter` を使用します。複数のコマンドを順次書き込み、実行するには、それぞれのコマンドの後にセミコロンを追加してください。 - -クエリ実行オプション -デフォルトでは、実行ボタンをクリックするとSQLエディタに含まれるすべてのコマンドが実行されます。SQLコンソールは、他の2つのクエリ実行オプションをサポートしています。 - -- 選択したコマンドを実行 -- カーソル位置のコマンドを実行 - -選択したコマンドを実行するには、希望のコマンドまたはコマンドのシーケンスを強調表示して「実行」ボタンをクリックします(または `cmd / ctrl + enter` ショートカットを使用)。選択がある場合、SQLエディタのコンテキストメニュー(エディタ内の任意の位置を右クリックすることで開く)から「選択したコマンドを実行」を選択することもできます。 - -選択したSQLクエリの一部を実行する方法を示すインターフェース - -現在のカーソル位置でコマンドを実行するには、次の2つの方法が利用できます。 - -- 拡張実行オプションメニューから「カーソル位置で実行」を選択する(または対応する `cmd / ctrl + shift + enter` キーボードショートカットを使用) - -拡張実行オプションメニュー内のカーソル位置での実行オプション - - - SQLエディタのコンテキストメニューから「カーソル位置で実行」を選択する - -SQLエディタのコンテキストメニュー内のカーソル位置での実行オプション - -:::note -カーソル位置にあるコマンドは、実行時に黄色に点滅します。 -::: - -### クエリのキャンセル {#canceling-a-query} - -クエリが実行中の場合、クエリエディタツールバーの「実行」ボタンが「キャンセル」ボタンに置き換えられます。このボタンをクリックするか、 `Esc` キーを押すだけでクエリをキャンセルできます。注意:キャンセルされた後でも、すでに返された結果は維持されます。 - -クエリ実行中に表示されるキャンセルボタン - -### クエリの保存 {#saving-a-query} - -以前に名前が付けられていない場合、クエリの名前は「無題のクエリ」となります。クエリ名をクリックして変更します。クエリの名前を変更すると、そのクエリが保存されます。 - -無題のクエリからクエリ名を変更する方法を示すインターフェース - -クエリを保存するには、保存ボタンまたは `cmd / ctrl + s` キーボードショートカットを使用することもできます。 - -クエリエディタツールバー内の保存ボタン - -## GenAIを使用してクエリを管理する {#using-genai-to-manage-queries} - -この機能により、ユーザーは自然言語の質問としてクエリを書くことができ、クエリコンソールは利用可能なテーブルのコンテキストに基づいてSQLクエリを作成します。GenAIは、ユーザーがクエリをデバッグするのにも役立ちます。 - -GenAIの詳細については、[ClickHouse CloudにおけるGenAIによるクエリ提案の発表ブログ投稿](https://clickhouse.com/blog/announcing-genai-powered-query-suggestions-clickhouse-cloud)をご覧ください。 - -### テーブルセットアップ {#table-setup} - -UK Price Paidのサンプルデータセットをインポートし、それを使用していくつかのGenAIクエリを作成しましょう。 - -1. ClickHouse Cloudサービスを開きます。 -1. _+_アイコンをクリックして新しいクエリを作成します。 -1. 次のコードを貼り付けて実行します: - - ```sql - CREATE TABLE uk_price_paid - ( - price UInt32, - date Date, - postcode1 LowCardinality(String), - postcode2 LowCardinality(String), - type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0), - is_new UInt8, - duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0), - addr1 String, - addr2 String, - street LowCardinality(String), - locality LowCardinality(String), - town LowCardinality(String), - district LowCardinality(String), - county LowCardinality(String) - ) - ENGINE = MergeTree - ORDER BY (postcode1, postcode2, addr1, addr2); - ``` - - このクエリが完了するのに約1秒かかります。完了すると、`uk_price_paid`という名前の空のテーブルが作成されます。 - -1. 新しいクエリを作成し、次のクエリを貼り付けます: - - ```sql - INSERT INTO uk_price_paid - WITH - splitByChar(' ', postcode) AS p - SELECT - toUInt32(price_string) AS price, - parseDateTimeBestEffortUS(time) AS date, - p[1] AS postcode1, - p[2] AS postcode2, - transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type, - b = 'Y' AS is_new, - transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration, - addr1, - addr2, - street, - locality, - town, - district, - county - FROM url( - 'http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv', - 'CSV', - 'uuid_string String, - price_string String, - time String, - postcode String, - a String, - b String, - c String, - addr1 String, - addr2 String, - street String, - locality String, - town String, - district String, - county String, - d String, - e String' - ) SETTINGS max_http_get_redirects=10; - ``` - -このクエリは、`gov.uk` ウェブサイトからデータセットを取得します。このファイルは約4GBであるため、処理には数分かかります。ClickHouseがクエリを処理した後、`uk_price_paid` テーブル内に全データセットが格納されるでしょう。 - -#### クエリ作成 {#query-creation} - -自然言語を使用してクエリを作成してみましょう。 - -1. **uk_price_paid** テーブルを選択し、次に **クエリを作成** をクリックします。 -1. **SQLを生成** をクリックします。クエリがChat-GPTに送信されることを受け入れるよう求められることがあります。続行するには、**同意します** を選択する必要があります。 -1. 自然言語クエリを入力し、ChatGPTがそれをSQLクエリに変換するようにプロンプトを使用できます。この例では、次のように入力します: - - > 年別のすべてのuk_price_paid取引の合計価格と総数を示してください。 - -1. コンソールは、私たちが探しているクエリを生成し、新しいタブに表示します。この例では、GenAIは次のクエリを作成しました: - - ```sql - -- 年別のすべてのuk_price_paid取引の合計価格と総数を示してください。 - SELECT year(date), sum(price) as total_price, Count(*) as total_transactions - FROM uk_price_paid - GROUP BY year(date) - ``` - -1. クエリが正しいことを確認したら、**実行** をクリックして実行します。 - -### デバッグ {#debugging} - -次に、GenAIのクエリデバッグ機能をテストしてみましょう。 - -1. _+_ アイコンをクリックし、新しいクエリを作成して次のコードを貼り付けます: - - ```sql - -- 年別のすべてのuk_price_paid取引の合計価格と総数を示してください。 - SELECT year(date), sum(pricee) as total_price, Count(*) as total_transactions - FROM uk_price_paid - GROUP BY year(date) - ``` - -1. **実行** をクリックします。このクエリは失敗します。なぜなら `pricee` から値を取得しようとしているからです。 -1. **クエリを修正** をクリックします。 -1. GenAIはクエリを修正しようとします。この場合、`pricee`を`price`に変更しました。また、`toYear`がこのシナリオで使用するのに適した関数であることに気付きました。 -1. 推奨された変更をクエリに追加するために **適用** を選択し、**実行** をクリックします。 - -GenAIは実験的な機能であるため、生成されたクエリを任意のデータセットに対して実行する際には注意してください。 - -## 高度なクエリ機能 {#advanced-querying-features} - -### クエリ結果の検索 {#searching-query-results} - -クエリを実行した後、結果ペインの検索入力を使用して返された結果セットを迅速に検索できます。この機能は、追加の `WHERE` 句の結果をプレビューしたり、特定のデータが結果セットに含まれていることを確認したりするのに役立ちます。検索入力に値を入力すると、結果ペインが更新され、入力した値に一致するレコードが返されます。この例では、`hackernews` テーブル内で `ClickHouse` を含むコメントのすべての `breakfast` インスタンスを探してみましょう。 - -Hacker Newsデータの検索 - -注意:入力した値に一致する任意のフィールドが返されます。たとえば、上記のスクリーンショットにおける3番目のレコードは `by` フィールドで 'breakfast' と一致しませんが、`text` フィールドは一致します: - -本文中の一致 - -### ページネーション設定の調整 {#adjusting-pagination-settings} - -デフォルトでは、クエリの結果ペインは、単一のページにすべての結果レコードを表示します。大きな結果セットの場合、簡単に表示できるようにページネーションを使用することが望ましい場合があります。これを行うには、結果ペインの右下隅にあるページネーションセレクターを使用します: - -ページネーションオプション - -ページサイズを選択すると、結果セットに即座にページネーションが適用され、結果ペインのフッターの中央にナビゲーションオプションが表示されます。 - -ページネーションナビゲーション - -### クエリ結果データのエクスポート {#exporting-query-result-data} - -クエリ結果セットは、SQLコンソールから直接CSV形式に簡単にエクスポートできます。これを行うには、結果ペインツールバーの右側にある `•••` メニューを開き、「CSVとしてダウンロード」を選択します。 - -CSVとしてダウンロード - -## クエリデータの視覚化 {#visualizing-query-data} - -いくつかのデータは、チャート形式でより簡単に解釈できます。クエリ結果データから数回のクリックで視覚化を迅速に作成できます。例として、NYCタクシーの週間統計を計算するクエリを使用してみましょう: - -```sql -select - toStartOfWeek(pickup_datetime) as week, - sum(total_amount) as fare_total, - sum(trip_distance) as distance_total, - count(*) as trip_total -from - nyc_taxi -group by - 1 -order by - 1 asc -``` - -表形式のクエリ結果 - -視覚化なしでは、これらの結果は解釈するのが難しいです。これらをチャートに変換しましょう。 - -### チャートの作成 {#creating-charts} - -視覚化を構築するには、クエリ結果ペインツールバーから「チャート」オプションを選択します。チャート配置ペインが表示されます: - -クエリからチャートに切り替え - -`trip_total`を`week`別に追跡するシンプルな棒グラフを作成することから始めましょう。これを達成するために、`week`フィールドをx軸に、`trip_total`フィールドをy軸にドラッグします: - -週別のトリップトータル - -ほとんどのチャートタイプは数値軸に複数のフィールドをサポートしています。示すために、`fare_total`フィールドをy軸にドラッグします: - -棒グラフ - -### チャートのカスタマイズ {#customizing-charts} - -SQLコンソールは、チャート設定ペインのチャートタイプセレクターから選択できる10種類のチャートタイプをサポートしています。たとえば、以前のチャートタイプを棒グラフから面グラフに簡単に変更できます: - -棒グラフから面グラフに変更 - -チャートタイトルは、データを提供するクエリの名前と一致します。クエリの名前を更新すると、チャートのタイトルも更新されます: - -クエリ名の更新 - -さらに多くの高度なチャート特性は、チャート配置ペインの「高度な」セクションで調整できます。まず、以下の設定を調整します: - -- サブタイトル -- 軸タイトル -- x軸のラベルの向き - -チャートはそれに応じて更新されます: - -サブタイトルなどの更新 - -いくつかのシナリオでは、各フィールドの軸スケールを独自に調整する必要がある場合があります。これもまた、チャート配置ペインの「高度な」セクションで、軸範囲の最小値と最大値を指定することで実現できます。たとえば、上記のチャートは良好に見えますが、`trip_total`と`fare_total`フィールドの相関関係を示すためには、軸範囲を調整する必要があります: - -軸スケールの調整 - -## クエリの共有 {#sharing-queries} - -SQLコンソールを使用すると、チームとクエリを共有できます。クエリが共有されると、チームの全員がそのクエリを確認、編集できるようになります。共有クエリは、チームとのコラボレーションに激しく役立ちます。 - -クエリを共有するには、クエリツールバーの「共有」ボタンをクリックします。 - -クエリツールバー内の共有ボタン - -ダイアログが開き、チームのすべてのメンバーとクエリを共有できるようになります。複数のチームがある場合、どのチームとクエリを共有するかを選択できます。 - -共有クエリへのアクセスを編集するためのダイアログ - -共有クエリにチームを追加するためのインターフェース - -共有クエリへのメンバーアクセスを編集するためのインターフェース - -いくつかのシナリオでは、各フィールドの軸スケールを独自に調整する必要がある場合があります。これもまた、チャート配置ペインの「高度な」セクションで、軸範囲の最小値と最大値を指定することで実現できます。たとえば、上記のチャートは良好に見えますが、`trip_total`と`fare_total`フィールドの相関関係を示すためには、軸範囲を調整する必要があります: - -クエリリスト内の私と共有されたセクション diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md.hash deleted file mode 100644 index 829abf7e863..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md.hash +++ /dev/null @@ -1 +0,0 @@ -2ec805dc4d103c13 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md deleted file mode 100644 index 4259d1a8fa8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -sidebar_label: 'TABLUM.IO' -slug: '/integrations/tablumio' -description: 'TABLUM.IO is a data management SaaS that supports ClickHouse out of - the box.' -title: 'Connecting TABLUM.IO to ClickHouse' ---- - -import Image from '@theme/IdealImage'; -import tablum_ch_0 from '@site/static/images/integrations/sql-clients/tablum-ch-0.png'; -import tablum_ch_1 from '@site/static/images/integrations/sql-clients/tablum-ch-1.png'; -import tablum_ch_2 from '@site/static/images/integrations/sql-clients/tablum-ch-2.png'; -import tablum_ch_3 from '@site/static/images/integrations/sql-clients/tablum-ch-3.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# Connecting TABLUM.IO to ClickHouse - - - -## Open the TABLUM.IO startup page {#open-the-tablumio-startup-page} - -:::note - あなたのLinuxサーバーにdockerでTABLUM.IOのセルフホステッドバージョンをインストールできます。 -::: - - -## 1. Sign up or sign in to the service {#1-sign-up-or-sign-in-to-the-service} - - まず、メールアドレスを使用してTABLUM.IOにサインアップするか、GoogleまたはFacebookのアカウントを使用してクイックログインを行います。 - -TABLUM.IOのログインページ - -## 2. Add a ClickHouse connector {#2-add-a-clickhouse-connector} - -ClickHouseの接続詳細を集めて、**Connector**タブに移動し、ホストURL、ポート、ユーザー名、パスワード、データベース名、およびコネクタの名前を入力します。これらのフィールドを入力した後、**Test connection**ボタンをクリックして詳細を確認し、その後**Save connector for me**をクリックして永続化します。 - -:::tip -正しい**HTTP**ポートを指定し、接続詳細に従って**SSL**モードを切り替えることを確認してください。 -::: - -:::tip -通常、TLSを使用する場合はポートは8443で、使用しない場合は8123です。 -::: - -TABLUM.IOでのClickHouseコネクタの追加 - -## 3. Select the connector {#3-select-the-connector} - -**Dataset**タブに移動します。ドロップダウンメニューから最近作成したClickHouseコネクタを選択します。右側のパネルには、利用可能なテーブルとスキーマのリストが表示されます。 - -TABLUM.IOでのClickHouseコネクタの選択 - -## 4. Input a SQL query and run it {#4-input-a-sql-query-and-run-it} - -SQLコンソールにクエリを入力し、**Run Query**を押します。結果はスプレッドシートとして表示されます。 - -:::tip -カラム名を右クリックすると、並べ替え、フィルター、その他のアクションのためのドロップダウンメニューが開きます。 -::: - -TABLUM.IOでのSQLクエリの実行 - -:::note -TABLUM.IOを使用すると、 -* TABLUM.IOアカウント内で複数のClickHouseコネクタを作成し、利用できます。 -* データソースに関係なく、読み込まれたデータに対してクエリを実行できます。 -* 結果を新しいClickHouseデータベースとして共有できます。 -::: - -## Learn more {#learn-more} - -TABLUM.IOに関する詳細情報はhttps://tablum.ioをご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md.hash deleted file mode 100644 index 1e0023c0f18..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md.hash +++ /dev/null @@ -1 +0,0 @@ -79a44bd618e4840c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md deleted file mode 100644 index 484807a7e46..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -sidebar_label: 'Easypanel' -slug: '/integrations/easypanel' -keywords: -- 'clickhouse' -- 'Easypanel' -- 'deployment' -- 'integrate' -- 'install' -description: 'You can use it to deploy ClickHouse on your own server.' -title: 'Deploying ClickHouse on Easypanel' ---- - -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# ClickHouseのEasypanelへのデプロイ - - - -[Easypanel](https://easypanel.io)は、モダンなサーバー管理パネルです。これを使用して、自分のサーバーにClickHouseをデプロイできます。 - -[![Easypanelにデプロイ](https://easypanel.io/img/deploy-on-easypanel-40.svg)](https://easypanel.io/docs/templates/clickhouse) - -## 手順 {#instructions} - -1. クラウドプロバイダー上にUbuntuを実行するVMを作成します。 -2. ウェブサイトの指示に従ってEasypanelをインストールします。 -3. 新しいプロジェクトを作成します。 -4. 専用のテンプレートを使用してClickHouseをインストールします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md.hash deleted file mode 100644 index 66b41e2f6a3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -7bb6c457a959f9fc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md deleted file mode 100644 index c54d37f42ea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -slug: '/integrations/tools/data-integrations' -keywords: -- 'Retool' -- 'Easypanel' -- 'Splunk' -title: 'データインテグレーション' -description: 'データインテグレーションセクションのランディングページ' ---- - - - - -# データインテグレーション - -| ページ | 説明 | -|-----------|---------------------------------------------------------------------------------------------------------------------------| -| [Easypanel](/integrations/easypanel) | Easypanelを使用すると、自分のサーバーにClickHouseをデプロイできます。 | -| [Retool](/integrations/retool) | リッチなユーザーインターフェースを持つウェブおよびモバイルアプリを迅速に構築し、複雑なタスクを自動化し、AIを統合します。すべてはあなたのデータから動いています。 | -| [Splunk](/integrations/audit-splunk) | ClickHouse Cloudの監査ログをSplunkに保存します。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md.hash deleted file mode 100644 index 87d95122a2d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -30265a12144cb62b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md deleted file mode 100644 index 176fe7de31a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_label: 'Retool' -slug: '/integrations/retool' -keywords: -- 'clickhouse' -- 'retool' -- 'connect' -- 'integrate' -- 'ui' -- 'admin' -- 'panel' -- 'dashboard' -- 'nocode' -- 'no-code' -description: 'Quickly build web and mobile apps with rich user interfaces, automate - complex tasks, and integrate AI—all powered by your data.' -title: 'Connecting Retool to ClickHouse' ---- - -import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; -import Image from '@theme/IdealImage'; -import retool_01 from '@site/static/images/integrations/tools/data-integration/retool/retool_01.png'; -import retool_02 from '@site/static/images/integrations/tools/data-integration/retool/retool_02.png'; -import retool_03 from '@site/static/images/integrations/tools/data-integration/retool/retool_03.png'; -import retool_04 from '@site/static/images/integrations/tools/data-integration/retool/retool_04.png'; -import retool_05 from '@site/static/images/integrations/tools/data-integration/retool/retool_05.png'; -import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - -# RetoolをClickHouseに接続する - - - -## 1. 接続詳細を収集する {#1-gather-your-connection-details} - - -## 2. ClickHouseリソースを作成する {#2-create-a-clickhouse-resource} - -Retoolアカウントにログインし、_Resources_ タブに移動します。「新規作成」 -> 「リソース」を選択します: - -新しいリソースを作成 -
    - -利用可能なコネクタのリストから「JDBC」を選択します: - -JDBCコネクタの選択 -
    - -セットアップウィザードで、「ドライバー名」として `com.clickhouse.jdbc.ClickHouseDriver` を選択してください: - -適切なドライバーを選択 -
    - -次の形式でClickHouseの認証情報を入力します: `jdbc:clickhouse://HOST:PORT/DATABASE?user=USERNAME&password=PASSWORD`。 -インスタンスがSSLを要求する場合、またはClickHouse Cloudを使用している場合は、接続文字列に `&ssl=true` を追加します。この場合、次のようになります: `jdbc:clickhouse://HOST:PORT/DATABASE?user=USERNAME&password=PASSWORD&ssl=true` - -認証情報を指定 -
    - -その後、接続をテストします: - -接続をテスト -
    - -これで、ClickHouseリソースを使用してアプリに進むことができるはずです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md.hash deleted file mode 100644 index fe89529b731..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -e5095a31649ee178 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md deleted file mode 100644 index d7db621a6d7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -sidebar_label: 'Splunk' -slug: '/integrations/audit-splunk' -keywords: -- 'clickhouse' -- 'Splunk' -- 'audit' -- 'cloud' -description: 'ClickHouse Cloudの監査ログをSplunkに保存します。' -title: 'ClickHouse Cloudの監査ログをSplunkに保存する' ---- - -import Image from '@theme/IdealImage'; -import splunk_001 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_001.png'; -import splunk_002 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_002.png'; -import splunk_003 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_003.png'; -import splunk_004 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_004.png'; -import splunk_005 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_005.png'; -import splunk_006 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_006.png'; -import splunk_007 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_007.png'; -import splunk_008 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_008.png'; -import splunk_009 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_009.png'; -import splunk_010 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_010.png'; -import splunk_011 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_011.png'; -import splunk_012 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_012.png'; -import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - - -# ClickHouse Cloud の監査ログを Splunk に保存する - - - -[Splunk](https://www.splunk.com/) は、データ分析およびモニタリングプラットフォームです。 - -このアドオンを使用すると、ユーザーは [ClickHouse Cloud 監査ログ](/cloud/security/audit-logging) を Splunk に保存できます。これは、[ClickHouse Cloud API](/cloud/manage/api/api-overview) を使用して監査ログをダウンロードします。 - -このアドオンには、モジュラー入力のみが含まれ、追加の UI は提供されていません。 - - -# インストール - -## Splunk Enterprise 向け {#for-splunk-enterprise} - -[Splunkbase](https://splunkbase.splunk.com/app/7709) から ClickHouse Cloud 監査アドオンをダウンロードします。 - -Splunkbase の ClickHouse Cloud 監査アドオンのダウンロードページを表示するウェブサイト - -Splunk Enterprise の場合、Apps -> Manage に移動します。次に、「ファイルからアプリをインストール」をクリックします。 - -アプリ管理ページに「ファイルからアプリをインストール」オプションを表示する Splunk Enterprise インターフェース - -Splunkbase からダウンロードしたアーカイブファイルを選択し、「アップロード」をクリックします。 - -ClickHouse アドオンをアップロードするための Splunk アプリインストールダイアログ - -すべてが問題なく進めば、ClickHouse 監査ログアプリケーションがインストールされているはずです。そうでない場合は、Splunkd ログを確認してエラーを探してください。 - - -# モジュラー入力の構成 - -モジュラー入力を構成するには、まず ClickHouse Cloud デプロイメントから情報を取得する必要があります。 - -- 組織 ID -- 管理者 [API Key](/cloud/manage/openapi) - -## ClickHouse Cloud からの情報取得 {#getting-information-from-clickhouse-cloud} - -[ClickHouse Cloud コンソール](https://console.clickhouse.cloud/) にログインします。 - -組織 -> 組織の詳細に移動します。そこで、組織 ID をコピーできます。 - -組織 ID を表示する ClickHouse Cloud コンソールの組織詳細ページ - -次に、左側のメニューから API キーに移動します。 - -左側のナビゲーションメニューに API キーセクションを表示する ClickHouse Cloud コンソール - -API キーを作成し、意味のある名前を付けて `Admin` 権限を選択します。「API キーを生成」をクリックします。 - -Admin 権限を選択した API キー作成インターフェースを表示する ClickHouse Cloud コンソール - -API キーとシークレットを安全な場所に保存します。 - -生成された API キーとシークレットを保存することを表示する ClickHouse Cloud コンソール - -## Splunk でのデータ入力の構成 {#configure-data-input-in-splunk} - -再度 Splunk に戻り、設定 -> データ入力に移動します。 - -データ入力オプションを持つ設定メニューを表示する Splunk インターフェース - -ClickHouse Cloud 監査ログデータ入力を選択します。 - -ClickHouse Cloud 監査ログオプションを表示する Splunk データ入力ページ - -「新規」をクリックしてデータ入力の新しいインスタンスを構成します。 - -新しい ClickHouse Cloud 監査ログデータ入力を構成するための Splunk インターフェース - -すべての情報を入力したら、「次へ」をクリックします。 - -完成した ClickHouse データ入力設定を持つ Splunk 構成ページ - -入力が構成されましたので、監査ログの閲覧を開始できます。 - - -# 使用法 - -モジュラー入力はデータを Splunk に保存します。データを表示するには、Splunk の一般的な検索ビューを使用できます。 - -ClickHouse 監査ログデータを表示する Splunk 検索インターフェース diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md.hash deleted file mode 100644 index 26f2cad0b03..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -2ad5a370aca39bfe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md deleted file mode 100644 index 4f6ab13199e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -slug: '/integrations/tools' -keywords: -- 'Retool' -- 'Easypanel' -- 'Splunk' -title: 'ツール' -description: 'ツールセクションのランディングページ' ---- - - - - -# ツール - -| ページ | 説明 | -|-----------|---------------------------------------------------------------------------------------------------------------------------------| -| [SQL クライアント](/integrations/sql-clients) | ClickHouseをさまざまな一般的なデータベース管理、分析、視覚化ツールと統合する方法 | -| [データ統合](/integrations/tools/data-integrations) | ClickHouse用のデータ統合 | -| [その他](/integrations/audit-splunk) | ClickHouse用の雑多なツール | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md.hash deleted file mode 100644 index 2a61de1b9aa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/index.md.hash +++ /dev/null @@ -1 +0,0 @@ -67328dd00699a25a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md deleted file mode 100644 index 69962501c1a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md +++ /dev/null @@ -1,650 +0,0 @@ ---- -description: 'ClickHouse コマンドラインクライアントインターフェースのドキュメント' -sidebar_label: 'ClickHouse クライアント' -sidebar_position: 17 -slug: '/interfaces/cli' -title: 'ClickHouse クライアント' ---- - -import Image from '@theme/IdealImage'; -import cloud_connect_button from '@site/static/images/_snippets/cloud-connect-button.png'; -import connection_details_native from '@site/static/images/_snippets/connection-details-native.png' - -ClickHouseは、ClickHouseサーバーに対して直接SQLクエリを実行するためのネイティブなコマンドラインクライアントを提供します。インタラクティブモード(ライブクエリ実行用)とバッチモード(スクリプトと自動化用)の両方をサポートしています。クエリ結果は端末に表示するか、ファイルにエクスポートでき、Pretty、CSV、JSONなどのすべてのClickHouse出力[フォーマット](formats.md)をサポートしています。 - -このクライアントは、プログレスバーや読み取った行数、処理したバイト数、クエリ実行時間とともに、クエリ実行に関するリアルタイムのフィードバックを提供します。また、[コマンドラインオプション](#command-line-options)と[構成ファイル](#configuration_files)の両方をサポートしています。 - -## インストール {#install} - -ClickHouseをダウンロードするには、次のコマンドを実行します: - -```bash -curl https://clickhouse.com/ | sh -``` - -次にインストールするには、以下を実行します: -```bash -sudo ./clickhouse install -``` - -さらに多くのインストールオプションについては、[ClickHouseをインストール](../getting-started/install/install.mdx)を参照してください。 - -クライアントとサーバーの異なるバージョンは互換性がありますが、古いクライアントでは一部の機能が利用できない場合があります。クライアントとサーバーには同じバージョンを使用することをお勧めします。 - -## 実行する {#run} - -:::note -ClickHouseをダウンロードしただけでインストールしていない場合は、`./clickhouse client`を使用してください。`clickhouse-client`を使用しないでください。 -::: - -ClickHouseサーバーに接続するには、次のコマンドを実行します: - -```bash -$ clickhouse-client --host server - -ClickHouse client version 24.12.2.29 (official build). -Connecting to server:9000 as user default. -Connected to ClickHouse server version 24.12.2. - -:) -``` - -必要に応じて、追加の接続詳細を指定します: - -**`--port `** - ClickHouseサーバーが接続を受け付けるポート。デフォルトポートは9440(TLS)と9000(非TLS)です。ClickHouse Clientはネイティブプロトコルを使用し、HTTP(S)は使用しません。 - -**`-s [ --secure ]`** - TLSを使用するかどうか(通常は自動検出されます)。 - -**`-u [ --user ] `** - 接続するデータベースユーザー。デフォルトでは`default`ユーザーとして接続します。 - -**`--password `** - データベースユーザーのパスワード。構成ファイル内に接続用のパスワードを指定することもできます。パスワードを指定しない場合は、クライアントがパスワードを尋ねます。 - -**`-c [ --config ] `** - ClickHouse Clientの構成ファイルの場所(デフォルトの場所でない場合)。 - -**`--connection `** - 構成ファイルから事前に構成された接続詳細の名前。 - -コマンドラインオプションの完全なリストについては、[コマンドラインオプション](#command-line-options)を参照してください。 - -### ClickHouse Cloudへの接続 {#connecting-cloud} - -ClickHouse Cloudサービスの詳細は、ClickHouse Cloudコンソールで確認できます。接続したいサービスを選択し、**接続**をクリックします: - -ClickHouse Cloud service connect button - -

    - -**ネイティブ**を選択すると、詳細が表示され、`clickhouse-client`コマンドの例が示されます: - -ClickHouse Cloud Native TCP connection details - -### 構成ファイルに接続を保存する {#connection-credentials} - -1つまたは複数のClickHouseサーバーの接続詳細を[構成ファイル](#configuration_files)に保存できます。 - -形式は次のようになります: -```xml - - - default - hostname - 9440 - 1 - default - password - - -``` - -詳細は[構成ファイルに関するセクション](#configuration_files)を参照してください。 - -:::note -クエリ構文に集中するため、残りの例では接続詳細(`--host`、`--port`など)を省略しています。コマンドを使用するときはそれらを追加することを忘れないでください。 -::: - -## バッチモード {#batch-mode} - -ClickHouse Clientをインタラクティブに使用するのではなく、バッチモードで実行できます。 - -単一のクエリを次のように指定できます: - -```bash -$ clickhouse-client "SELECT sum(number) FROM numbers(10)" -45 -``` - -`--query`コマンドラインオプションも使用できます: - -```bash -$ clickhouse-client --query "SELECT uniq(number) FROM numbers(10)" -10 -``` - -`stdin`にクエリを提供することもできます: - -```bash -$ echo "SELECT avg(number) FROM numbers(10)" | clickhouse-client -4.5 -``` - -データの挿入: - -```bash -$ echo "Hello\nGoodbye" | clickhouse-client --query "INSERT INTO messages FORMAT CSV" -``` - -`--query`が指定された場合、入力は行送りの後にリクエストに追加されます。 - -**リモートClickHouseサービスへのCSVファイルの挿入** - -この例では、サンプルデータセットCSVファイル`cell_towers.csv`を、`default`データベースの既存のテーブル`cell_towers`に挿入しています: - -```bash -clickhouse-client --host HOSTNAME.clickhouse.cloud \ - --port 9440 \ - --user default \ - --password PASSWORD \ - --query "INSERT INTO cell_towers FORMAT CSVWithNames" \ - < cell_towers.csv -``` - -**データ挿入のさらなる例** - -```bash -echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | \ - clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -``` - -```bash -cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -3, 'some text', '2016-08-14 00:00:00' -4, 'some more text', '2016-08-14 00:00:01' -_EOF -``` - -```bash -cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -``` - -## 注意事項 {#notes} - -インタラクティブモードでは、デフォルトの出力形式は`PrettyCompact`です。クエリの`FORMAT`句で形式を変更するか、`--format`コマンドラインオプションを指定できます。垂直形式を使用するには、`--vertical`またはクエリの末尾に`\G`を指定します。この形式では、各値が別の行に印刷され、広いテーブルには便利です。 - -バッチモードでは、デフォルトのデータ[フォーマット](formats.md)は`TabSeparated`です。クエリの`FORMAT`句で形式を設定できます。 - -インタラクティブモードでは、デフォルトで入力したものがEnterキーを押すと実行されます。クエリの末尾にセミコロンは必要ありません。 - -`-m, --multiline`パラメーターを指定してクライアントを起動できます。マルチラインクエリを入力するには、行送りの前にバックスラッシュ`\`を入力します。Enterを押すと、クエリの次の行を入力するように求められます。クエリを実行するには、セミコロンで終了してEnterを押します。 - -ClickHouse Clientは`replxx`(`readline`類似)に基づいているため、親しみのあるキーボードショートカットを使用し、履歴を保持します。履歴はデフォルトで`~/.clickhouse-client-history`に書き込まれます。 - -クライアントを終了するには、`Ctrl+D`を押すか、クエリの代わりに次のいずれかを入力します:`exit`、`quit`、 `logout`、 `exit;`、 `quit;`、 `logout;`、 `q`、 `Q`、 `:q`。 - -クエリを処理する際、クライアントは以下を表示します: - -1. プログレスは、デフォルトで1秒あたり10回以上更新されません。クイッククエリの場合、プログレスが表示される暇がないことがあります。 -2. デバッグ用に解析後のフォーマットされたクエリ。 -3. 指定されたフォーマットでの結果。 -4. 結果の行数、経過時間、クエリ処理の平均速度。すべてのデータ量は未圧縮データ参照します。 - -長いクエリをキャンセルするには`Ctrl+C`を押します。ただし、サーバーがリクエストを中断するのを待つ必要があります。特定の段階でクエリをキャンセルすることはできません。待たずに2度目に`Ctrl+C`を押すと、クライアントが終了します。 - -ClickHouse Clientは、クエリのために外部データ(外部一時テーブル)を渡すことも可能です。詳細については、[クエリ処理用の外部データに関するセクション](../engines/table-engines/special/external-data.md)を参照してください。 - -## パラメーターを使用したクエリ {#cli-queries-with-parameters} - -クエリ内でパラメーターを指定し、コマンドラインオプションでその値を渡すことができます。これにより、クライアントサイドで特定の動的値でクエリをフォーマットする必要がなくなります。例: - -```bash -$ clickhouse-client --param_parName="[1, 2]" --query "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" -``` - -インタラクティブセッション内からパラメーターを設定することも可能です: -```bash -$ clickhouse-client --query "SET param_parName='[1, 2]'; SELECT {parName:Array(UInt16)}" -``` - -### クエリ構文 {#cli-queries-with-parameters-syntax} - -クエリ内では、コマンドラインパラメータを使用して埋め込みたい値を次の形式で中括弧で囲みます: - -```sql -{:} -``` - -- `name` — プレースホルダー識別子。対応するコマンドラインオプションは`--param_ = value`です。 -- `data type` — パラメータの[データ型](../sql-reference/data-types/index.md)。例えば、データ構造`(integer, ('string', integer))`は`Tuple(UInt8, Tuple(String, UInt8))`データ型を持ち得ます(他の[整数](../sql-reference/data-types/int-uint.md)型も使用可能です)。テーブル名やデータベース名、カラム名をパラメータとして渡すことも可能で、その場合はデータ型として`Identifier`を使用する必要があります。 - -### 例 {#cli-queries-with-parameters-examples} - -```bash -$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" \ - --query "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}" - -$ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="number" --param_alias="top_ten" \ - --query "SELECT {col:Identifier} as {alias:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" -``` - -## エイリアス {#cli_aliases} - -- `\l` - SHOW DATABASES -- `\d` - SHOW TABLES -- `\c ` - USE DATABASE -- `.` - 前のクエリを繰り返す - - -## キーボードショートカット {#keyboard_shortcuts} - -- `Alt (Option) + Shift + e` - 現在のクエリでエディタを開く。環境変数`EDITOR`で使用するエディタを指定することができます。デフォルトでは`vim`が使用されます。 -- `Alt (Option) + #` - 行をコメントアウト。 -- `Ctrl + r` - ファジー履歴検索。 - -すべての利用可能なキーボードショートカットの完全なリストは、[replxx](https://github.com/AmokHuginnsson/replxx/blob/1f149bf/src/replxx_impl.cxx#L262)で確認できます。 - -:::tip -MacOSでメタキー(Option)の正しい動作を設定するには: - -iTerm2:Preferences -> Profile -> Keys -> Left Option keyに移動し、Esc+をクリックします。 -::: - - -## 接続文字列 {#connection_string} - -ClickHouse Clientは、接続文字列を使用してClickHouseサーバーに接続することもサポートしています。これはMongoDBやPostgreSQL、MySQLに類似しています。構文は次のようになります: - -```text -clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] -``` - -**構成要素** - -- `user` - (オプション)データベースのユーザー名。デフォルト:`default`。 -- `password` - (オプション)データベースユーザーのパスワード。`:`が指定され、パスワードが空の場合、クライアントはユーザーのパスワードを求めます。 -- `hosts_and_ports` - (オプション)ホストとオプションのポートのリスト`host[:port] [, host:[port]], ...`。デフォルト:`localhost:9000`。 -- `database` - (オプション)データベース名。デフォルト:`default`。 -- `query_parameters` - (オプション)キーと値のペアのリスト`param1=value1[,¶m2=value2], ...`。パラメータのいくつかでは、値は必要ありません。パラメータ名と値は大文字と小文字を区別します。 - -接続文字列でユーザー名、パスワード、またはデータベースを指定した場合、`--user`、`--password`、または`--database`で指定することはできません(その逆も然り)。 - -ホストコンポーネントは、ホスト名またはIPv4またはIPv6アドレスのいずれかです。IPv6アドレスは中括弧[]で囲む必要があります: - -```text -clickhouse://[2001:db8::1234] -``` - -接続文字列には、複数のホストを含めることができます。ClickHouse Clientは、これらのホストに順番に接続を試みます(左から右へ)。接続が確立されると、残りのホストへの接続は試みられません。 - -接続文字列は、`clickHouse-client`の最初の引数として指定する必要があります。接続文字列は、`--host`および`--port`を除く任意の[コマンドラインオプション](#command-line-options)と組み合わせることができます。 - -`query_parameters`に対しては、以下のキーが許可されています: - -- `secure`または省略形`ス`。指定された場合、クライアントはセキュアな接続(TLS)を介してサーバーに接続します。[コマンドラインオプション](#command-line-options)の`--secure`を参照してください。 - -**パーセントエンコーディング** - -非US ASCII、スペース、`user`、`password`、`hosts`、`database`および`query parameters`内の特殊文字は[パーセントエンコード](https://en.wikipedia.org/wiki/URL_encoding)する必要があります。 - -### 例 {#connection_string_examples} - -ポート9000の`localhost`に接続し、`SELECT 1`クエリを実行します。 - -```bash -clickhouse-client clickhouse://localhost:9000 --query "SELECT 1" -``` - -ユーザー`john`として、パスワード`secret`で、ホスト`127.0.0.1`およびポート`9000`に接続します。 - -```bash -clickhouse-client clickhouse://john:secret@127.0.0.1:9000 -``` - -ユーザー`default`の`localhost`に、IPV6アドレス`[::1]`のホストとポート`9000`に接続します。 - -```bash -clickhouse-client clickhouse://[::1]:9000 -``` - -マルチラインモードでポート9000の`localhost`に接続します。 - -```bash -clickhouse-client clickhouse://localhost:9000 '-m' -``` - -ユーザー`default`としてポート9000の`localhost`に接続します。 - -```bash -clickhouse-client clickhouse://default@localhost:9000 - - -# equivalent to: -clickhouse-client clickhouse://localhost:9000 --user default -``` - -ポート9000の`localhost`に接続し、デフォルトで`my_database`データベースを使用します。 - -```bash -clickhouse-client clickhouse://localhost:9000/my_database - - -# equivalent to: -clickhouse-client clickhouse://localhost:9000 --database my_database -``` - -ポート9000の`localhost`に接続し、接続文字列で指定された`my_database`データベースにデフォルトで接続し、省略形の`ス`パラメータを使用して安全な接続を確立します。 - -```bash -clickhouse-client clickhouse://localhost/my_database?s - - -# equivalent to: -clickhouse-client clickhouse://localhost/my_database -s -``` - -デフォルトのホストを使用して、デフォルトのポート、デフォルトのユーザー、デフォルトのデータベースに接続します。 - -```bash -clickhouse-client clickhouse: -``` - -デフォルトのポートを使用して、デフォルトのホストに接続し、ユーザー`my_user`として、パスワードなしで接続します。 - -```bash -clickhouse-client clickhouse://my_user@ - - -# 上記の:と@の間の空白のパスワードは、接続を開始する前にユーザーにパスワードを入力するよう求めることを意味します。 -clickhouse-client clickhouse://my_user:@ -``` - -ユーザー名にメールを使用して`localhost`に接続します。`@`記号はパーセントエンコードして`%40`になります。 - -```bash -clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 -``` - -2つのホストのいずれかに接続します:`192.168.1.15`、`192.168.1.25`。 - -```bash -clickhouse-client clickhouse://192.168.1.15,192.168.1.25 -``` - -## クエリID形式 {#query-id-format} - -インタラクティブモードでは、ClickHouse Clientは各クエリのクエリIDを表示します。デフォルトでは、IDは次のようにフォーマットされます: - -```sql -Query id: 927f137d-00f1-4175-8914-0dd066365e96 -``` - -カスタムフォーマットは、構成ファイル内の`query_id_formats`タグ内で指定できます。フォーマット文字列内の`{query_id}`プレースホルダーはクエリIDで置き換えられます。タグ内には複数のフォーマット文字列が許可されています。この機能は、クエリのプロファイリングを促進するためのURLを生成するために使用できます。 - -**例** - -```xml - - - http://speedscope-host/#profileURL=qp%3Fid%3D{query_id} - - -``` - -上記の構成では、クエリのIDは次の形式で表示されます: - -```response -speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d -``` - - -## 構成ファイル {#configuration_files} - -ClickHouse Clientは次のいずれかの最初に存在するファイルを使用します: - -- `-c [ -C, --config, --config-file ]`パラメータで定義されているファイル。 -- `./clickhouse-client.[xml|yaml|yml]` -- `~/.clickhouse-client/config.[xml|yaml|yml]` -- `/etc/clickhouse-client/config.[xml|yaml|yml]` - -ClickHouseリポジトリ内にあるサンプル構成ファイル:[`clickhouse-client.xml`](https://github.com/ClickHouse/ClickHouse/blob/master/programs/client/clickhouse-client.xml) - -XML構文の例: - -```xml - - username - password - true - - - /etc/ssl/cert.pem - - - -``` - -YAML形式の同じ構成: - -```yaml -user: username -password: 'password' -secure: true -openSSL: - client: - caConfig: '/etc/ssl/cert.pem' -``` - -## コマンドラインオプション {#command-line-options} - -すべてのコマンドラインオプションは、コマンドラインで直接指定するか、[構成ファイル](#configuration_files)のデフォルトとして指定できます。 - -### 一般オプション {#command-line-options-general} - -**`-c [ -C, --config, --config-file ] `** - -クライアントの構成ファイルの場所(デフォルトの場所でない場合)。[構成ファイル](#configuration_files)を参照してください。 - -**`--help`** - -使用法の概要を表示し、終了します。`--verbose`と組み合わせることで、クエリ設定を含むすべての可能なオプションを表示します。 - -**`--history_file `** - -コマンド履歴を含むファイルへのパス。 - -**`--history_max_entries`** - -履歴ファイル内の最大エントリ数。 - -デフォルト値:1000000(100万) - -**`--prompt `** - -カスタムプロンプトを指定します。 - -デフォルト値:サーバーの`display_name`。 - -**`--verbose`** - -出力の冗長性を増加させます。 - -**`-V [ --version ]`** - -バージョンを表示して終了します。 - -### 接続オプション {#command-line-options-connection} - -**`--connection `** - -構成ファイルから事前に構成された接続詳細の名前。詳細は[接続資格情報](#connection-credentials)を参照してください。 - -**`-d [ --database ] `** - -この接続のデフォルトとして選択するデータベース。 - -デフォルト値:サーバー設定の現在のデータベース(デフォルトで`default`)。 - -**`-h [ --host ] `** - -接続先のClickHouseサーバーのホスト名。ホスト名またはIPv4またはIPv6アドレスになります。複数のホストを渡すことができます。 - -デフォルト値:localhost - -**`--jwt `** - -認証のためにJSON Web Token(JWT)を使用します。 - -サーバーJWT認証はClickHouse Cloudでのみ利用可能です。 - -**`--no-warnings`** - -クライアントがサーバーに接続するときに、`system.warnings`からの警告を表示しないようにします。 - -**`--password `** - -データベースユーザーのパスワード。接続用のパスワードを構成ファイル内に指定することもできます。パスワードを指定しない場合、クライアントがパスワードを尋ねてきます。 - -**`--port `** - -サーバーが接続を受け付けているポート。デフォルトのポートは9440(TLS)と9000(非TLS)です。 - -注:クライアントはネイティブプロトコルを使用し、HTTP(S)は使用しません。 - -デフォルト値:`--secure`が指定されている場合は9440、そうでない場合は9000。ホスト名が`.clickhouse.cloud`で終わる場合は常に9440がデフォルトです。 - -**`-s [ --secure ]`** - -TLSを使用するかどうか。 - -ポート9440(デフォルトのセキュアポート)またはClickHouse Cloudに接続されると自動的に有効になります。 - -[構成ファイル](#configuration_files)内でCA証明書を設定する必要がある場合があります。利用可能な構成設定は、[サーバー側のTLS構成](../operations/server-configuration-parameters/settings.md#openssl)と同じです。 - -**`--ssh-key-file `** - -サーバーとの認証のために使用されるSSHプライベートキーを含むファイル。 - -**`--ssh-key-passphrase `** - -`--ssh-key-file`で指定されたSSHプライベートキーのパスフレーズ。 - -**`-u [ --user ] `** - -接続するデータベースユーザー。 - -デフォルト値:default - -`--host`、`--port`、`--user`、および`--password`オプションの代わりに、クライアントは[接続文字列](#connection_string)もサポートしています。 - -### クエリオプション {#command-line-options-query} - -**`--param_=`** - -[パラメータ付きクエリ](#cli-queries-with-parameters)のパラメータの置換値。 - -**`-q [ --query ] `** - -バッチモードで実行するクエリ。複数回指定できます(例:`--query "SELECT 1" --query "SELECT 2"`)または、セミコロンで区切られた複数のクエリを一度に指定できます(例:`--query "SELECT 1; SELECT 2;"`)。後者の場合、`VALUES`以外の形式の`INSERT`クエリは空の行で区切る必要があります。 - -単一のクエリはパラメータなしでも指定できます: -```bash -$ clickhouse-client "SELECT 1" -1 -``` - -`--queries-file`と同時に使用することはできません。 - -**`--queries-file `** - -クエリを含むファイルへのパス。複数回指定できます(例:`--queries-file queries1.sql --queries-file queries2.sql`)。 - -`--query`と同時に使用することはできません。 - -**`-m [ --multiline ]`** - -指定された場合、マルチラインクエリを許可します(Enterを押さないでクエリを送信しない)。クエリはセミコロンで終了するまで送信されません。 - -### クエリ設定 {#command-line-options-query-settings} - -クエリ設定は、クライアント内でコマンドラインオプションとして指定できます。例えば: -```bash -$ clickhouse-client --max_threads 1 -``` - -設定のリストについては、[設定](../operations/settings/settings.md)を参照してください。 - -### フォーマットオプション {#command-line-options-formatting} - -**`-f [ --format ] `** - -結果を出力するために指定された形式を使用します。 - -サポートされているフォーマットのリストについては、[入力および出力データの形式](formats.md)を参照してください。 - -デフォルト値:TabSeparated - -**`--pager `** - -すべての出力をこのコマンドにパイプします。通常の使用法は`less`(例:広い結果セットを表示するために`less -S`)です。 - -**`-E [ --vertical ]`** - -結果を出力するために[垂直形式](../interfaces/formats.md#vertical)を使用します。これは`–-format Vertical`と同じです。この形式では、各値が別の行に印刷され、広いテーブルを表示する際に役立ちます。 - -### 実行の詳細 {#command-line-options-execution-details} - -**`--enable-progress-table-toggle`** - -プログレステーブルの切り替えを有効にします。Controlキー(スペース)を押すことで切り替えが行えます。プログレステーブル表示が有効なインタラクティブモードでのみ適用可能です。 - -デフォルト値:有効 - -**`--hardware-utilization`** - -プログレスバーにハードウェアの利用状況情報を表示します。 - -**`--memory-usage`** - -指定された場合、非インタラクティブモードで`stderr`にメモリ使用量を印刷します。 - -可能な値: -- `none` - メモリ使用量を印刷しない -- `default` - バイト数を印刷する -- `readable` - 可読形式でメモリ使用量を印刷する - -**`--print-profile-events`** - -`ProfileEvents`パケットを印刷します。 - -**`--progress`** - -クエリ実行の進捗を印刷します。 - -可能な値: -- `tty|on|1|true|yes` - インタラクティブモードで端末に出力します -- `err` - 非インタラクティブモードで`stderr`に出力します -- `off|0|false|no` - プログレス印刷を無効にします - -デフォルト値:インタラクティブモードで`tty`、非インタラクティブモード(バッチモード)で`off`。 - -**`--progress-table`** - -クエリ実行中に変化するメトリックを含む進捗テーブルを印刷します。 - -可能な値: -- `tty|on|1|true|yes` - インタラクティブモードで端末に出力します -- `err` - 非インタラクティブモードで`stderr`に出力します -- `off|0|false|no` - プログレステーブルを無効にします - -デフォルト値:インタラクティブモードで`tty`、非インタラクティブモード(バッチモード)で`off`。 - -**`--stacktrace`** - -例外のスタックトレースを印刷します。 - -**`-t [ --time ]`** - -非インタラクティブモードでクエリ実行時間を`stderr`に印刷します(ベンチマーク用)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md.hash deleted file mode 100644 index aea31ed92f6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md.hash +++ /dev/null @@ -1 +0,0 @@ -ff8cfe8c18612740 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md deleted file mode 100644 index 9124f164157..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -description: 'Documentation for the ClickHouse C++ client library and integration - with u-server framework' -sidebar_label: 'C++ Client Library' -sidebar_position: 24 -slug: '/interfaces/cpp' -title: 'C++ Client Library' ---- - - - - -# C++ クライアントライブラリ - -[clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp) リポジトリの README を参照してください。 - - -# userver 非同期フレームワーク - -[userver (beta)](https://github.com/userver-framework/userver) は ClickHouse のための組み込みサポートを提供しています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md.hash deleted file mode 100644 index 93b851892e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cpp.md.hash +++ /dev/null @@ -1 +0,0 @@ -8badf4ca2cc17c43 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md deleted file mode 100644 index 50399fce874..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md +++ /dev/null @@ -1,484 +0,0 @@ ---- -description: 'Overview of supported data formats for input and output in ClickHouse' -sidebar_label: 'View all formats...' -sidebar_position: 21 -slug: '/interfaces/formats' -title: 'Formats for input and output data' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -# 入力データと出力データの形式 {#formats-for-input-and-output-data} - -ClickHouseは、既知のテキストおよびバイナリデータ形式のほとんどをサポートしています。これにより、ClickHouseの利点を活かすためのほぼすべてのデータパイプラインへの容易な統合が可能になります。 - -## 入力形式 {#input-formats} - -入力形式は次の目的で使用されます。 -- `INSERT`文に提供されたデータの解析 -- `File`、`URL`、または`HDFS`などのファイルバックテーブルからの`SELECT`クエリの実行 -- 辞書の読み取り - -適切な入力形式を選択することは、ClickHouseでの効率的なデータ取り込みにとって重要です。70以上のサポートされている形式の中から、最もパフォーマンスの高いオプションを選択することは、挿入速度、CPUおよびメモリ使用量、全体的なシステム効率に大きな影響を与える可能性があります。これらの選択肢をナビゲートするために、形式間の取り込みパフォーマンスをベンチマークし、重要なポイントを明らかにしました。 - -- **[Native](formats/Native.md)形式が最も効率的な入力形式です**。最良の圧縮、最低のリソース使用、最小のサーバー側処理オーバーヘッドを提供します。 -- **圧縮は重要です** - LZ4は最小限のCPUコストでデータサイズを削減し、ZSTDは追加のCPU使用量を犠牲にしてより高い圧縮を提供します。 -- **事前ソートは中程度の影響を持ちます**。ClickHouseはすでに効率的にソートを行います。 -- **バッチ処理は効率を大幅に改善します** - 大きなバッチは挿入オーバーヘッドを削減し、スループットを向上させます。 - -結果やベストプラクティスの詳細については、完全な[ベンチマーク分析](https://www.clickhouse.com/blog/clickhouse-input-format-matchup-which-is-fastest-most-efficient)をお読みください。完全なテスト結果については、[FastFormats](https://fastformats.clickhouse.com/)オンラインダッシュボードを探索してください。 - -## 出力形式 {#output-formats} - -出力用にサポートされている形式は次の目的で使用されます。 -- `SELECT`クエリの結果の整列 -- ファイルバックテーブルへの`INSERT`操作の実行 - -## 形式の概要 {#formats-overview} - -サポートされている形式は次のとおりです: - -| 形式 | 入力 | 出力 | -|-------------------------------------------------------------------------------------------|-----|-------| -| [TabSeparated](#tabseparated) | ✔ | ✔ | -| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ | -| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | -| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ | -| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ | -| [Template](#format-template) | ✔ | ✔ | -| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | -| [CSV](#csv) | ✔ | ✔ | -| [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ | -| [CustomSeparated](#format-customseparated) | ✔ | ✔ | -| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ | -| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ | -| [SQLInsert](#sqlinsert) | ✗ | ✔ | -| [Values](#data-format-values) | ✔ | ✔ | -| [Vertical](#vertical) | ✗ | ✔ | -| [JSON](#json) | ✔ | ✔ | -| [JSONAsString](#jsonasstring) | ✔ | ✗ | -| [JSONAsObject](#jsonasobject) | ✔ | ✗ | -| [JSONStrings](#jsonstrings) | ✔ | ✔ | -| [JSONColumns](#jsoncolumns) | ✔ | ✔ | -| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ | -| [JSONCompact](#jsoncompact) | ✔ | ✔ | -| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ | -| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ | -| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | -| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ | -| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | -| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ | -| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ | -| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ | -| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ | -| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ | -| [JSONCompactEachRowWithProgress](#jsoncompacteachrow) | ✗ | ✔ | -| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ | -| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ | -| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ | -| [JSONCompactStringsEachRowWithProgress](#jsoncompactstringseachrowwithnamesandtypes) | ✗ | ✔ | -| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ | -| [BSONEachRow](#bsoneachrow) | ✔ | ✔ | -| [TSKV](#tskv) | ✔ | ✔ | -| [Pretty](#pretty) | ✗ | ✔ | -| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | -| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ | -| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ | -| [PrettyCompact](#prettycompact) | ✗ | ✔ | -| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ | -| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | -| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ | -| [PrettySpace](#prettyspace) | ✗ | ✔ | -| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ | -| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ | -| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ | -| [Prometheus](#prometheus) | ✗ | ✔ | -| [Protobuf](#protobuf) | ✔ | ✔ | -| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | -| [ProtobufList](#protobuflist) | ✔ | ✔ | -| [Avro](#data-format-avro) | ✔ | ✔ | -| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | -| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ | -| [Arrow](#data-format-arrow) | ✔ | ✔ | -| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ | -| [ORC](#data-format-orc) | ✔ | ✔ | -| [One](#data-format-one) | ✔ | ✗ | -| [Npy](#data-format-npy) | ✔ | ✔ | -| [RowBinary](#rowbinary) | ✔ | ✔ | -| [RowBinaryWithNames](#rowbinarywithnames) | ✔ | ✔ | -| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✗ | -| [Native](#native) | ✔ | ✔ | -| [Null](#null) | ✗ | ✔ | -| [XML](#xml) | ✗ | ✔ | -| [CapnProto](#capnproto) | ✔ | ✔ | -| [LineAsString](#lineasstring) | ✔ | ✔ | -| [Regexp](#data-format-regexp) | ✔ | ✗ | -| [RawBLOB](#rawblob) | ✔ | ✔ | -| [MsgPack](#msgpack) | ✔ | ✔ | -| [MySQLDump](#mysqldump) | ✔ | ✗ | -| [DWARF](#dwarf) | ✔ | ✗ | -| [Markdown](#markdown) | ✗ | ✔ | -| [Form](#form) | ✔ | ✗ | - - -ClickHouseの設定を使用して、一部の形式処理パラメータを制御することができます。詳細については、[Settings](/operations/settings/settings-formats.md)セクションをお読みください。 - -### TabSeparated {#tabseparated} - -[TabSeparated](/interfaces/formats/TabSeparated)を参照してください。 - -### TabSeparatedRaw {#tabseparatedraw} - -[TabSeparatedRaw](/interfaces/formats/TabSeparatedRaw)を参照してください。 - -### TabSeparatedWithNames {#tabseparatedwithnames} - -[TabSeparatedWithNames](/interfaces/formats/TabSeparatedWithNames)を参照してください。 - -### TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes} - -[TabSeparatedWithNamesAndTypes](/interfaces/formats/TabSeparatedWithNamesAndTypes)を参照してください。 - -### TabSeparatedRawWithNames {#tabseparatedrawwithnames} - -[TabSeparatedRawWithNames](/interfaces/formats/TabSeparatedRawWithNames)を参照してください。 - -### TabSeparatedRawWithNamesAndTypes {#tabseparatedrawwithnamesandtypes} - -[TabSeparatedRawWithNamesAndTypes](/interfaces/formats/TabSeparatedRawWithNamesAndTypes)を参照してください。 - -### Template {#format-template} - -[Template](/interfaces/formats/Template)を参照してください。 - -### TemplateIgnoreSpaces {#templateignorespaces} - -[TemplateIgnoreSpaces](/interfaces/formats/TemplateIgnoreSpaces)を参照してください。 - -### TSKV {#tskv} - -[TSKV](/interfaces/formats/TSKV)を参照してください。 - -### CSV {#csv} - -[CSV](../interfaces/formats/CSV)を参照してください。 - -### CSVWithNames {#csvwithnames} - -[CSVWithNames](/interfaces/formats/CSVWithNames)を参照してください。 - -### CSVWithNamesAndTypes {#csvwithnamesandtypes} - -[CSVWithNamesAndTypes](/interfaces/formats/CSVWithNamesAndTypes)を参照してください。 - -### CustomSeparated {#format-customseparated} - -[CustomSeparated](/interfaces/formats/CustomSeparated)を参照してください。 - -### CustomSeparatedWithNames {#customseparatedwithnames} - -[CustomSeparatedWithNames](/interfaces/formats/CustomSeparatedWithNames)を参照してください。 - -### CustomSeparatedWithNamesAndTypes {#customseparatedwithnamesandtypes} - -[CustomSeparatedWithNamesAndTypes](/interfaces/formats/CustomSeparatedWithNamesAndTypes)を参照してください。 - -### SQLInsert {#sqlinsert} - -[SQLInsert](/interfaces/formats/SQLInsert)を参照してください。 - -### JSON {#json} - -[JSON](/interfaces/formats/JSON)を参照してください。 - -### JSONStrings {#jsonstrings} - -[JSONStrings](/interfaces/formats/JSONStrings)を参照してください。 - -### JSONColumns {#jsoncolumns} - -[JSONColumns](/interfaces/formats/JSONColumns)を参照してください。 - -### JSONColumnsWithMetadata {#jsoncolumnsmonoblock} - -[JSONColumnsWithMetadata](/interfaces/formats/JSONColumnsWithMetadata)を参照してください。 - -### JSONAsString {#jsonasstring} - -[JSONAsString](/interfaces/formats/JSONAsString)を参照してください。 - -### JSONAsObject {#jsonasobject} - -[JSONAsObject](/interfaces/formats/JSONAsObject)を参照してください。 - -### JSONCompact {#jsoncompact} - -[JSONCompact](/interfaces/formats/JSONCompact)を参照してください。 - -### JSONCompactStrings {#jsoncompactstrings} - -[JSONCompactStrings](/interfaces/formats/JSONCompactStrings)を参照してください。 - -### JSONCompactColumns {#jsoncompactcolumns} - -[JSONCompactColumns](/interfaces/formats/JSONCompactColumns)を参照してください。 - -### JSONEachRow {#jsoneachrow} - -[JSONEachRow](/interfaces/formats/JSONEachRow)を参照してください。 - -### PrettyJSONEachRow {#prettyjsoneachrow} - -[PrettyJSONEachRow](/interfaces/formats/PrettyJSONEachRow)を参照してください。 - -### JSONStringsEachRow {#jsonstringseachrow} - -[JSONStringsEachRow](/interfaces/formats/JSONStringsEachRow)を参照してください。 - -### JSONCompactEachRow {#jsoncompacteachrow} - -[JSONCompactEachRow](/interfaces/formats/JSONCompactEachRow)を参照してください。 - -### JSONCompactStringsEachRow {#jsoncompactstringseachrow} - -[JSONCompactStringsEachRow](/interfaces/formats/JSONCompactStringsEachRow)を参照してください。 - -### JSONEachRowWithProgress {#jsoneachrowwithprogress} - -[JSONEachRowWithProgress](/interfaces/formats/JSONEachRowWithProgress)を参照してください。 - -### JSONStringsEachRowWithProgress {#jsonstringseachrowwithprogress} - -[JSONStringsEachRowWithProgress](/interfaces/formats/JSONStringsEachRowWithProgress)を参照してください。 - -### JSONCompactEachRowWithNames {#jsoncompacteachrowwithnames} - -[JSONCompactEachRowWithNames](/interfaces/formats/JSONCompactEachRowWithNames)を参照してください。 - -### JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes} - -[JSONCompactEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactEachRowWithNamesAndTypes)を参照してください。 - -### JSONCompactEachRowWithProgress {#jsoncompacteachrowwithprogress} - -`JSONEachRowWithProgress`に似ていますが、`JSONCompactEachRow`形式のように`row`イベントをコンパクトな形式で出力します。 - -### JSONCompactStringsEachRowWithNames {#jsoncompactstringseachrowwithnames} - -[JSONCompactStringsEachRowWithNames](/interfaces/formats/JSONCompactStringsEachRowWithNames)を参照してください。 - -### JSONCompactStringsEachRowWithNamesAndTypes {#jsoncompactstringseachrowwithnamesandtypes} - -[JSONCompactStringsEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactStringsEachRowWithNamesAndTypes)を参照してください。 - -### JSONObjectEachRow {#jsonobjecteachrow} - -[JSONObjectEachRow](/interfaces/formats/JSONObjectEachRow)を参照してください。 - -### JSON形式設定 {#json-formats-settings} - -[JSON形式設定](/operations/settings/formats)を参照してください。 - -### BSONEachRow {#bsoneachrow} - -[BSONEachRow](/interfaces/formats/BSONEachRow)を参照してください。 - -### Native {#native} - -[Native](/interfaces/formats/Native)を参照してください。 - -### Null {#null} - -[Null](/interfaces/formats/Null)を参照してください。 - -### Pretty {#pretty} - -[Pretty](/interfaces/formats/Pretty)を参照してください。 - -### PrettyNoEscapes {#prettynoescapes} - -[PrettyNoEscapes](/interfaces/formats/PrettyNoEscapes)を参照してください。 - -### PrettyMonoBlock {#prettymonoblock} - -[PrettyMonoBlock](/interfaces/formats/PrettyMonoBlock)を参照してください。 - -### PrettyNoEscapesMonoBlock {#prettynoescapesmonoblock} - -[PrettyNoEscapesMonoBlock](/interfaces/formats/PrettyNoEscapesMonoBlock)を参照してください。 - -### PrettyCompact {#prettycompact} - -[PrettyCompact](/interfaces/formats/PrettyCompact)を参照してください。 - -### PrettyCompactNoEscapes {#prettycompactnoescapes} - -[PrettyCompactNoEscapes](/interfaces/formats/PrettyCompactNoEscapes)を参照してください。 - -### PrettyCompactMonoBlock {#prettycompactmonoblock} - -[PrettyCompactMonoBlock](/interfaces/formats/PrettyCompactMonoBlock)を参照してください。 - -### PrettyCompactNoEscapesMonoBlock {#prettycompactnoescapesmonoblock} - -[PrettyCompactNoEscapesMonoBlock](/interfaces/formats/PrettyCompactNoEscapesMonoBlock)を参照してください。 - -### PrettySpace {#prettyspace} - -[PrettySpace](/interfaces/formats/PrettySpace)を参照してください。 - -### PrettySpaceNoEscapes {#prettyspacenoescapes} - -[PrettySpaceNoEscapes](/interfaces/formats/PrettySpaceNoEscapes)を参照してください。 - -### PrettySpaceMonoBlock {#prettyspacemonoblock} - -[PrettySpaceMonoBlock](/interfaces/formats/PrettySpaceMonoBlock)を参照してください。 - -### PrettySpaceNoEscapesMonoBlock {#prettyspacenoescapesmonoblock} - -[PrettySpaceNoEscapesMonoBlock](/interfaces/formats/PrettySpaceNoEscapesMonoBlock)を参照してください。 - -### RowBinary {#rowbinary} - -[RowBinary](/interfaces/formats/RowBinary)を参照してください。 - -### RowBinaryWithNames {#rowbinarywithnames} - -[RowBinaryWithNames](/interfaces/formats/RowBinaryWithNames)を参照してください。 - -### RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} - -[RowBinaryWithNamesAndTypes](/interfaces/formats/RowBinaryWithNamesAndTypes)を参照してください。 - -### RowBinaryWithDefaults {#rowbinarywithdefaults} - -[RowBinaryWithDefaults](/interfaces/formats/RowBinaryWithDefaults)を参照してください。 - -### Values {#data-format-values} - -[Values](/interfaces/formats/Values)を参照してください。 - -### Vertical {#vertical} - -[Vertical](/interfaces/formats/Vertical)を参照してください。 - -### XML {#xml} - -[XML](/interfaces/formats/XML)を参照してください。 - -### CapnProto {#capnproto} - -[CapnProto](/interfaces/formats/CapnProto)を参照してください。 - -### Prometheus {#prometheus} - -[Prometheus](/interfaces/formats/Prometheus)を参照してください。 - -### Protobuf {#protobuf} - -[Protobuf](/interfaces/formats/Protobuf)を参照してください。 - -### ProtobufSingle {#protobufsingle} - -[ProtobufSingle](/interfaces/formats/ProtobufSingle)を参照してください。 - -### ProtobufList {#protobuflist} - -[ProtobufList](/interfaces/formats/ProtobufList)を参照してください。 - -### Avro {#data-format-avro} - -[Avro](/interfaces/formats/Avro)を参照してください。 - -### AvroConfluent {#data-format-avro-confluent} - -[AvroConfluent](/interfaces/formats/AvroConfluent)を参照してください。 - -### Parquet {#data-format-parquet} - -[Parquet](/interfaces/formats/Parquet)を参照してください。 - -### ParquetMetadata {#data-format-parquet-metadata} - -[ParquetMetadata](/interfaces/formats/ParquetMetadata)を参照してください。 - -### Arrow {#data-format-arrow} - -[Arrow](/interfaces/formats/ArrowStream)を参照してください。 - -### ArrowStream {#data-format-arrow-stream} - -[ArrowStream](/interfaces/formats/ArrowStream)を参照してください。 - -### ORC {#data-format-orc} - -[ORC](/interfaces/formats/ORC)を参照してください。 - -### One {#data-format-one} - -[One](/interfaces/formats/One)を参照してください。 - -### Npy {#data-format-npy} - -[Npy](/interfaces/formats/Npy)を参照してください。 - -### LineAsString {#lineasstring} - -次を参照してください: -- [LineAsString](/interfaces/formats/LineAsString) -- [LineAsStringWithNames](/interfaces/formats/LineAsStringWithNames) -- [LineAsStringWithNamesAndTypes](/interfaces/formats/LineAsStringWithNamesAndTypes) - -### Regexp {#data-format-regexp} - -[Regexp](/interfaces/formats/Regexp)を参照してください。 - -### RawBLOB {#rawblob} - -[RawBLOB](/interfaces/formats/RawBLOB)を参照してください。 - -### Markdown {#markdown} - -[Markdown](/interfaces/formats/Markdown)を参照してください。 - -### MsgPack {#msgpack} - -[MsgPack](/interfaces/formats/MsgPack)を参照してください。 - -### MySQLDump {#mysqldump} - -[MySQLDump](/interfaces/formats/MySQLDump)を参照してください。 - -### DWARF {#dwarf} - -[Dwarf](/interfaces/formats/DWARF)を参照してください。 - -### Form {#form} - -[Form](/interfaces/formats/Form)を参照してください。 - -## 形式スキーマ {#formatschema} - -形式スキーマを含むファイル名は、`format_schema`設定によって設定されます。 -この設定を設定する必要があるのは、`Cap'n Proto`および`Protobuf`形式の1つが使用される場合です。 -形式スキーマは、ファイル名とこのファイル内のメッセージ型の名前の組み合わせで、コロンで区切られます(例:`schemafile.proto:MessageType`)。 -ファイルが形式に対して標準の拡張子を持っている場合(例えば、`Protobuf`の場合は`.proto`)、省略可能であり、この場合、形式スキーマは`schemafile:MessageType`のようになります。 - -[client](/interfaces/cli.md)を介して対話モードでデータを入力または出力する場合、形式スキーマで指定されたファイル名には、絶対パスまたはクライアントの現在のディレクトリに対する相対パスを含めることができます。 -[バッチモード](/interfaces/cli.md/#batch-mode)でクライアントを使用する場合、スキーマへのパスは、セキュリティ上の理由から相対的である必要があります。 - -[HTTPインターフェース](/interfaces/http.md)を介してデータを入力または出力する場合、形式スキーマで指定されたファイル名は、サーバー構成の[format_schema_path](/operations/server-configuration-parameters/settings.md/#format_schema_path)で指定されたディレクトリに存在する必要があります。 - -## エラーをスキップ {#skippingerrors} - -`CSV`、`TabSeparated`、`TSKV`、`JSONEachRow`、`Template`、`CustomSeparated`、および`Protobuf`などの一部の形式は、解析エラーが発生した場合に壊れた行をスキップし、次の行の先頭から解析を続行できます。 [input_format_allow_errors_num](/operations/settings/settings-formats.md/#input_format_allow_errors_num)および[input_format_allow_errors_ratio](/operations/settings/settings-formats.md/#input_format_allow_errors_ratio)の設定を参照してください。 -制約: -- 解析エラーが発生した場合、`JSONEachRow`は新しい行(またはEOF)までのすべてのデータをスキップするため、行は正しくエラーをカウントするために`\n`で区切る必要があります。 -- `Template`と`CustomSeparated`は、次の行の先頭を見つけるために、最後のカラム後のデリミタと行間のデリミタを使用するため、エラーをスキップするのは、少なくとも一方が空でない場合のみ機能します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md.hash deleted file mode 100644 index 5a0111293da..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats.md.hash +++ /dev/null @@ -1 +0,0 @@ -f20b705c208b4ad2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md deleted file mode 100644 index 221bdde366e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -alias: [] -description: 'Arrow形式のドキュメント' -input_format: true -keywords: -- 'Arrow' -output_format: true -slug: '/interfaces/formats/Arrow' -title: 'Arrow' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[Apache Arrow](https://arrow.apache.org/) には、2 つの組み込みの列指向ストレージフォーマットがあります。 ClickHouse は、これらのフォーマットの読み取りおよび書き込み操作をサポートしています。 -`Arrow` は Apache Arrow の「ファイルモード」フォーマットです。これは、インメモリのランダムアクセス向けに設計されています。 - -## データ型の対応 {#data-types-matching} - -以下の表は、サポートされているデータ型と、それらが `INSERT` および `SELECT` クエリにおける ClickHouse の [データ型](/sql-reference/data-types/index.md) にどのように対応するかを示しています。 - -| Arrow データ型 (`INSERT`) | ClickHouse データ型 | Arrow データ型 (`SELECT`) | -|---------------------------------------------|---------------------------------------------------------------------------------------------------------|----------------------------| -| `BOOL` | [Bool](/sql-reference/data-types/boolean.md) | `BOOL` | -| `UINT8`, `BOOL` | [UInt8](/sql-reference/data-types/int-uint.md) | `UINT8` | -| `INT8` | [Int8](/sql-reference/data-types/int-uint.md)/[Enum8](/sql-reference/data-types/enum.md) | `INT8` | -| `UINT16` | [UInt16](/sql-reference/data-types/int-uint.md) | `UINT16` | -| `INT16` | [Int16](/sql-reference/data-types/int-uint.md)/[Enum16](/sql-reference/data-types/enum.md) | `INT16` | -| `UINT32` | [UInt32](/sql-reference/data-types/int-uint.md) | `UINT32` | -| `INT32` | [Int32](/sql-reference/data-types/int-uint.md) | `INT32` | -| `UINT64` | [UInt64](/sql-reference/data-types/int-uint.md) | `UINT64` | -| `INT64` | [Int64](/sql-reference/data-types/int-uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](/sql-reference/data-types/float.md) | `FLOAT32` | -| `DOUBLE` | [Float64](/sql-reference/data-types/float.md) | `FLOAT64` | -| `DATE32` | [Date32](/sql-reference/data-types/date32.md) | `UINT16` | -| `DATE64` | [DateTime](/sql-reference/data-types/datetime.md) | `UINT32` | -| `TIMESTAMP`, `TIME32`, `TIME64` | [DateTime64](/sql-reference/data-types/datetime64.md) | `TIMESTAMP` | -| `STRING`, `BINARY` | [String](/sql-reference/data-types/string.md) | `BINARY` | -| `STRING`, `BINARY`, `FIXED_SIZE_BINARY` | [FixedString](/sql-reference/data-types/fixedstring.md) | `FIXED_SIZE_BINARY` | -| `DECIMAL` | [Decimal](/sql-reference/data-types/decimal.md) | `DECIMAL` | -| `DECIMAL256` | [Decimal256](/sql-reference/data-types/decimal.md) | `DECIMAL256` | -| `LIST` | [Array](/sql-reference/data-types/array.md) | `LIST` | -| `STRUCT` | [Tuple](/sql-reference/data-types/tuple.md) | `STRUCT` | -| `MAP` | [Map](/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/sql-reference/data-types/ipv4.md) | `UINT32` | -| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/sql-reference/data-types/ipv6.md) | `FIXED_SIZE_BINARY` | -| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` | - -配列はネストでき、`Nullable` 型の値を引数として持つことができます。 `Tuple` と `Map` 型もネスト可能です。 - -`DICTIONARY` 型は `INSERT` クエリでサポートされており、`SELECT` クエリには [`output_format_arrow_low_cardinality_as_dictionary`](/operations/settings/formats#output_format_arrow_low_cardinality_as_dictionary) 設定があり、[LowCardinality](/sql-reference/data-types/lowcardinality.md) 型を `DICTIONARY` 型として出力することができます。 - -サポートされていない Arrow データ型: -- `FIXED_SIZE_BINARY` -- `JSON` -- `UUID` -- `ENUM`. - -ClickHouse テーブルカラムのデータ型は、対応する Arrow データフィールドと一致する必要はありません。 データを挿入するとき、ClickHouse は上記の表に従ってデータ型を解釈し、その後 [casts](/sql-reference/functions/type-conversion-functions#cast) して ClickHouse テーブルカラムに設定されたデータ型にデータを変換します。 - -## 使用例 {#example-usage} - -### データの挿入 {#inserting-data} - -ファイルから Arrow データを ClickHouse テーブルに挿入するには、次のコマンドを使用します: - -```bash -$ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" -``` - -### データの選択 {#selecting-data} - -ClickHouse テーブルからデータを選択し、Arrow フォーマットのいずれかのファイルに保存するには、次のコマンドを使用します: - -```bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow} -``` - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|-----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|--------------| -| `input_format_arrow_allow_missing_columns` | Arrow 入力フォーマットを読み込む際に欠損しているカラムを許可する | `1` | -| `input_format_arrow_case_insensitive_column_matching` | Arrow カラムと CH カラムの一致の際に大文字小文字を無視する。 | `0` | -| `input_format_arrow_import_nested` | 廃止された設定、何もしない。 | `0` | -| `input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference` | フォーマット Arrow のスキーマ推論中にサポートされていない型のカラムをスキップする | `0` | -| `output_format_arrow_compression_method` | Arrow 出力フォーマット用の圧縮方法。サポートされているコーデック:lz4_frame、zstd、none(未圧縮) | `lz4_frame` | -| `output_format_arrow_fixed_string_as_fixed_byte_array` | FixedString カラムに対して Arrow FIXED_SIZE_BINARY 型を使用する。 | `1` | -| `output_format_arrow_low_cardinality_as_dictionary` | LowCardinality 型を Arrow 型の Dictionary として出力するのを有効にする | `0` | -| `output_format_arrow_string_as_string` | String カラムに対して Arrow String 型を使用する。 | `1` | -| `output_format_arrow_use_64_bit_indexes_for_dictionary` | Arrow フォーマットの辞書インデックスに対して常に 64 ビット整数を使用する | `0` | -| `output_format_arrow_use_signed_indexes_for_dictionary` | Arrow フォーマットの辞書インデックスに対して符号付き整数を使用する | `1` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md.hash deleted file mode 100644 index 14c6683d98a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md.hash +++ /dev/null @@ -1 +0,0 @@ -11c06aa798481ca2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md deleted file mode 100644 index 55bcd7db4a3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -alias: [] -description: 'ArrowStream フォーマットのドキュメント' -input_format: true -keywords: -- 'ArrowStream' -output_format: true -slug: '/interfaces/formats/ArrowStream' -title: 'ArrowStream' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`ArrowStream` は Apache Arrow の「ストリームモード」フォーマットです。このフォーマットは、メモリ内ストリーム処理のために設計されています。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md.hash deleted file mode 100644 index b81184bf4e4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/ArrowStream.md.hash +++ /dev/null @@ -1 +0,0 @@ -2ee21a81e88df178 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md deleted file mode 100644 index 8a4fe6636f0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -alias: [] -description: 'Avroフォーマットのドキュメント' -input_format: true -keywords: -- 'Avro' -output_format: true -slug: '/interfaces/formats/Avro' -title: 'Avro' ---- - -import DataTypesMatching from './_snippets/data-types-matching.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[Apache Avro](https://avro.apache.org/) は、Apache の Hadoop プロジェクト内で開発された行指向のデータシリアル化フレームワークです。 -ClickHouse の `Avro` フォーマットは、[Avro データファイル](https://avro.apache.org/docs/current/spec.html#Object+Container+Files)の読み書きをサポートしています。 - -## データ型の対応 {#data-types-matching} - - - -## 使用例 {#example-usage} - -### データの挿入 {#inserting-data} - -Avro ファイルから ClickHouse テーブルにデータを挿入するには: - -```bash -$ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" -``` - -取り込む Avro ファイルのルートスキーマは `record` タイプでなければなりません。 - -テーブルのカラムと Avro スキーマのフィールドの対応を見つけるために、ClickHouse はそれらの名前を比較します。 -この比較は大文字小文字を区別し、未使用のフィールドはスキップされます。 - -ClickHouse テーブルカラムのデータ型は、挿入される Avro データの対応するフィールドと異なる場合があります。データを挿入する際、ClickHouse は上記のテーブルに従ってデータ型を解釈し、その後に[キャスト](/sql-reference/functions/type-conversion-functions#cast)して対応するカラムタイプに変換します。 - -データをインポートする際に、スキーマにフィールドが見つからず、設定 [`input_format_avro_allow_missing_fields`](/operations/settings/settings-formats.md/#input_format_avro_allow_missing_fields) が有効になっている場合、エラーをスローするのではなく、デフォルト値が使用されます。 - -### データの選択 {#selecting-data} - -ClickHouse テーブルから Avro ファイルにデータを選択するには: - -```bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro -``` - -カラム名は以下の条件を満たさなければなりません: - -- `[A-Za-z_]` で始まる -- 続けて `[A-Za-z0-9_]` のみが使用される - -出力 Avro ファイルの圧縮と同期間隔は、設定 [`output_format_avro_codec`](/operations/settings/settings-formats.md/#output_format_avro_codec) および [`output_format_avro_sync_interval`](/operations/settings/settings-formats.md/#output_format_avro_sync_interval) によってそれぞれ構成できます。 - -### 例データ {#example-data} - -ClickHouse の [`DESCRIBE`](/sql-reference/statements/describe-table) 関数を使用することで、次の例のように Avro ファイルの推測フォーマットを迅速に表示できます。 -この例には、ClickHouse S3 パブリックバケットにある公開アクセス可能な Avro ファイルの URL が含まれています: - -```sql title="クエリ" -DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro); -``` -```response title="レスポンス" -┌─name───────────────────────┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ WatchID │ Int64 │ │ │ │ │ │ -│ JavaEnable │ Int32 │ │ │ │ │ │ -│ Title │ String │ │ │ │ │ │ -│ GoodEvent │ Int32 │ │ │ │ │ │ -│ EventTime │ Int32 │ │ │ │ │ │ -│ EventDate │ Date32 │ │ │ │ │ │ -│ CounterID │ Int32 │ │ │ │ │ │ -│ ClientIP │ Int32 │ │ │ │ │ │ -│ ClientIP6 │ FixedString(16) │ │ │ │ │ │ -│ RegionID │ Int32 │ │ │ │ │ │ -... -│ IslandID │ FixedString(16) │ │ │ │ │ │ -│ RequestNum │ Int32 │ │ │ │ │ │ -│ RequestTry │ Int32 │ │ │ │ │ │ -└────────────────────────────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|------------------------------------------|----------------------------------------------------------------------------------------------|-----------| -| `input_format_avro_allow_missing_fields` | Avro/AvroConfluent フォーマット用:スキーマにフィールドが見つからない場合、エラーの代わりにデフォルト値を使用 | `0` | -| `input_format_avro_null_as_default` | Avro/AvroConfluent フォーマット用:null と非 Nullable カラムの場合にデフォルトを挿入する | `0` | -| `format_avro_schema_registry_url` | AvroConfluent フォーマット用:Confluent スキーマレジストリ URL。 | | -| `output_format_avro_codec` | 出力に使用される圧縮コーデック。可能な値:'null', 'deflate', 'snappy', 'zstd'。 | | -| `output_format_avro_sync_interval` | バイト単位の同期間隔。 | `16384` | -| `output_format_avro_string_column_pattern`| Avro フォーマット用:AVRO 文字列として選択する String カラムの正規表現。 | | -| `output_format_avro_rows_in_file` | ファイル内の最大行数(ストレージが許可する場合) | `1` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md.hash deleted file mode 100644 index ac008808273..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md.hash +++ /dev/null @@ -1 +0,0 @@ -d772ce7317c21166 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md deleted file mode 100644 index d8efccebcbc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -alias: [] -description: 'AvroConfluent フォーマットのドキュメント' -input_format: true -keywords: -- 'AvroConfluent' -output_format: false -slug: '/interfaces/formats/AvroConfluent' -title: 'AvroConfluent' ---- - -import DataTypesMatching from './_snippets/data-types-matching.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -AvroConfluentは、[Kafka](https://kafka.apache.org/)および[Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html)で一般的に使用される単一オブジェクトのAvroメッセージのデコードをサポートしています。 -各AvroメッセージはスキーマIDを埋め込んでおり、スキーマレジストリの助けを借りて実際のスキーマに解決できます。 -スキーマは、一度解決されるとキャッシュされます。 - -## データ型の一致 {#data_types-matching-1} - - - -## 使用例 {#example-usage} - -スキーマの解決を迅速に確認するために、[kafkacat](https://github.com/edenhill/kafkacat)を[clickhouse-local](/operations/utilities/clickhouse-local.md)と一緒に使用することができます。 - -```bash -$ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' -1 a -2 b -3 c -``` - -`AvroConfluent`を[Kafka](/engines/table-engines/integrations/kafka.md)と一緒に使用するには: - -```sql -CREATE TABLE topic1_stream -( - field1 String, - field2 String -) -ENGINE = Kafka() -SETTINGS -kafka_broker_list = 'kafka-broker', -kafka_topic_list = 'topic1', -kafka_group_name = 'group1', -kafka_format = 'AvroConfluent'; - --- デバッグ目的でセッション内にformat_avro_schema_registry_urlを設定できます。 --- この方法は本番環境では使用できません -SET format_avro_schema_registry_url = 'http://schema-registry'; - -SELECT * FROM topic1_stream; -``` - -## フォーマット設定 {#format-settings} - -スキーマレジストリURLは[`format_avro_schema_registry_url`](/operations/settings/settings-formats.md/#format_avro_schema_registry_url)で設定されます。 - -:::note -`format_avro_schema_registry_url`を設定することで、再起動後もその値を維持するために`users.xml`に設定する必要があります。また、`Kafka`テーブルエンジンの`format_avro_schema_registry_url`設定を使用することもできます。 -::: - -| 設定 | 説明 | デフォルト | -|---------------------------------------------|--------------------------------------------------------------------------------------------------|-----------| -| `input_format_avro_allow_missing_fields` | Avro/AvroConfluent形式の場合:スキーマにフィールドが見つからないとき、エラーの代わりにデフォルト値を使用する | `0` | -| `input_format_avro_null_as_default` | Avro/AvroConfluent形式の場合:nullおよび非Nullableカラムの場合にデフォルトを挿入する | `0` | -| `format_avro_schema_registry_url` | AvroConfluent形式の場合:Confluent Schema RegistryのURL。 | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md.hash deleted file mode 100644 index fbdd753f9f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md.hash +++ /dev/null @@ -1 +0,0 @@ -e8f398a279f5bb5b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md deleted file mode 100644 index c17c962bed0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{} ---- - - - -以下のテーブルは、Apache Avro フォーマットがサポートするすべてのデータ型と、それに対応する ClickHouse の[data types](/sql-reference/data-types/index.md) を `INSERT` と `SELECT` クエリに示しています。 - -| Avro データ型 `INSERT` | ClickHouse データ型 | Avro データ型 `SELECT` | -|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|---------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [Int(8\16\32)](/sql-reference/data-types/int-uint.md), [UInt(8\16\32)](/sql-reference/data-types/int-uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](/sql-reference/data-types/int-uint.md), [UInt64](/sql-reference/data-types/int-uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](/sql-reference/data-types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](/sql-reference/data-types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [String](/sql-reference/data-types/string.md) | `bytes` または `string` \* | -| `bytes`, `string`, `fixed` | [FixedString(N)](/sql-reference/data-types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum(8\16)](/sql-reference/data-types/enum.md) | `enum` | -| `array(T)` | [Array(T)](/sql-reference/data-types/array.md) | `array(T)` | -| `map(V, K)` | [Map(V, K)](/sql-reference/data-types/map.md) | `map(string, K)` | -| `union(null, T)`, `union(T, null)` | [Nullable(T)](/sql-reference/data-types/date.md) | `union(null, T)` | -| `union(T1, T2, …)` \** | [Variant(T1, T2, …)](/sql-reference/data-types/variant.md) | `union(T1, T2, …)` \** | -| `null` | [Nullable(Nothing)](/sql-reference/data-types/special-data-types/nothing.md) | `null` | -| `int (date)` \**\* | [Date](/sql-reference/data-types/date.md), [Date32](/sql-reference/data-types/date32.md) | `int (date)` \**\* | -| `long (timestamp-millis)` \**\* | [DateTime64(3)](/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \**\* | -| `long (timestamp-micros)` \**\* | [DateTime64(6)](/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \**\* | -| `bytes (decimal)` \**\* | [DateTime64(N)](/sql-reference/data-types/datetime.md) | `bytes (decimal)` \**\* | -| `int` | [IPv4](/sql-reference/data-types/ipv4.md) | `int` | -| `fixed(16)` | [IPv6](/sql-reference/data-types/ipv6.md) | `fixed(16)` | -| `bytes (decimal)` \**\* | [Decimal(P, S)](/sql-reference/data-types/decimal.md) | `bytes (decimal)` \**\* | -| `string (uuid)` \**\* | [UUID](/sql-reference/data-types/uuid.md) | `string (uuid)` \**\* | -| `fixed(16)` | [Int128/UInt128](/sql-reference/data-types/int-uint.md) | `fixed(16)` | -| `fixed(32)` | [Int256/UInt256](/sql-reference/data-types/int-uint.md) | `fixed(32)` | -| `record` | [Tuple](/sql-reference/data-types/tuple.md) | `record` | - -\* `bytes` はデフォルトで、[`output_format_avro_string_column_pattern`](/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern) を設定することで制御されます。 - -\** [Variant type](/sql-reference/data-types/variant) はフィールド値として `null` を暗黙的に受け入れるため、例えば Avro の `union(T1, T2, null)` は `Variant(T1, T2)` に変換されます。 -その結果、ClickHouse から Avro を生成する際には、スキーマ推論中に実際に値が `null` であるかどうかわからないため、常に Avro `union` 型セットに `null` 型を含める必要があります。 - -\**\* [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types) - -サポートされていない Avro 論理データ型: -- `time-millis` -- `time-micros` -- `duration` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md.hash deleted file mode 100644 index 26f3b5879a3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/_snippets/data-types-matching.md.hash +++ /dev/null @@ -1 +0,0 @@ -16d09f130354e941 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md deleted file mode 100644 index 7309c88cdea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -alias: [] -description: 'BSONEachRowフォーマットのドキュメント' -input_format: true -keywords: -- 'BSONEachRow' -output_format: true -slug: '/interfaces/formats/BSONEachRow' -title: 'BSONEachRow' ---- - - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`BSONEachRow` フォーマットは、区切り文字なしにバイナリ JSON (BSON) 文書のシーケンスとしてデータを解析します。 -各行は単一の文書としてフォーマットされ、各カラムはカラム名をキーとする単一の BSON 文書フィールドとしてフォーマットされます。 - -## データ型の対応 {#data-types-matching} - -出力には、ClickHouse 型と BSON 型の間の次の対応を使用します。 - -| ClickHouse 型 | BSON 型 | -|-------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| -| [Bool](/sql-reference/data-types/boolean.md) | `\x08` boolean | -| [Int8/UInt8](/sql-reference/data-types/int-uint.md)/[Enum8](/sql-reference/data-types/enum.md) | `\x10` int32 | -| [Int16/UInt16](/sql-reference/data-types/int-uint.md)/[Enum16](/sql-reference/data-types/enum.md) | `\x10` int32 | -| [Int32](/sql-reference/data-types/int-uint.md) | `\x10` int32 | -| [UInt32](/sql-reference/data-types/int-uint.md) | `\x12` int64 | -| [Int64/UInt64](/sql-reference/data-types/int-uint.md) | `\x12` int64 | -| [Float32/Float64](/sql-reference/data-types/float.md) | `\x01` double | -| [Date](/sql-reference/data-types/date.md)/[Date32](/sql-reference/data-types/date32.md) | `\x10` int32 | -| [DateTime](/sql-reference/data-types/datetime.md) | `\x12` int64 | -| [DateTime64](/sql-reference/data-types/datetime64.md) | `\x09` datetime | -| [Decimal32](/sql-reference/data-types/decimal.md) | `\x10` int32 | -| [Decimal64](/sql-reference/data-types/decimal.md) | `\x12` int64 | -| [Decimal128](/sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 16 | -| [Decimal256](/sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 32 | -| [Int128/UInt128](/sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 16 | -| [Int256/UInt256](/sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 32 | -| [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | `\x05` binary, `\x00` binary subtype または、設定 output_format_bson_string_as_string が有効な場合は \x02 string | -| [UUID](/sql-reference/data-types/uuid.md) | `\x05` binary, `\x04` uuid subtype, size = 16 | -| [Array](/sql-reference/data-types/array.md) | `\x04` array | -| [Tuple](/sql-reference/data-types/tuple.md) | `\x04` array | -| [Named Tuple](/sql-reference/data-types/tuple.md) | `\x03` document | -| [Map](/sql-reference/data-types/map.md) | `\x03` document | -| [IPv4](/sql-reference/data-types/ipv4.md) | `\x10` int32 | -| [IPv6](/sql-reference/data-types/ipv6.md) | `\x05` binary, `\x00` binary subtype | - -入力には、BSON 型と ClickHouse 型の間の次の対応を使用します。 - -| BSON 型 | ClickHouse 型 | -|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `\x01` double | [Float32/Float64](/sql-reference/data-types/float.md) | -| `\x02` string | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | -| `\x03` document | [Map](/sql-reference/data-types/map.md)/[Named Tuple](/sql-reference/data-types/tuple.md) | -| `\x04` array | [Array](/sql-reference/data-types/array.md)/[Tuple](/sql-reference/data-types/tuple.md) | -| `\x05` binary, `\x00` binary subtype | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md)/[IPv6](/sql-reference/data-types/ipv6.md) | -| `\x05` binary, `\x02` old binary subtype | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | -| `\x05` binary, `\x03` old uuid subtype | [UUID](/sql-reference/data-types/uuid.md) | -| `\x05` binary, `\x04` uuid subtype | [UUID](/sql-reference/data-types/uuid.md) | -| `\x07` ObjectId | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | -| `\x08` boolean | [Bool](/sql-reference/data-types/boolean.md) | -| `\x09` datetime | [DateTime64](/sql-reference/data-types/datetime64.md) | -| `\x0A` null value | [NULL](/sql-reference/data-types/nullable.md) | -| `\x0D` JavaScript code | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | -| `\x0E` symbol | [String](/sql-reference/data-types/string.md)/[FixedString](/sql-reference/data-types/fixedstring.md) | -| `\x10` int32 | [Int32/UInt32](/sql-reference/data-types/int-uint.md)/[Decimal32](/sql-reference/data-types/decimal.md)/[IPv4](/sql-reference/data-types/ipv4.md)/[Enum8/Enum16](/sql-reference/data-types/enum.md) | -| `\x12` int64 | [Int64/UInt64](/sql-reference/data-types/int-uint.md)/[Decimal64](/sql-reference/data-types/decimal.md)/[DateTime64](/sql-reference/data-types/datetime64.md) | - -他の BSON 型はサポートされていません。また、異なる整数型の間での変換も行います。 -例えば、BSON `int32` 値を ClickHouse に [`UInt8`](../../sql-reference/data-types/int-uint.md) として挿入することが可能です。 - -`Int128`/`UInt128`/`Int256`/`UInt256`/`Decimal128`/`Decimal256` などの大きな整数と小数は、BSON バイナリ値から `\x00` バイナリサブタイプで解析できます。 -この場合、フォーマットはバイナリデータのサイズが期待される値のサイズに等しいことを検証します。 - -:::note -このフォーマットはビッグエンディアンプラットフォームでは正しく機能しません。 -::: - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|----------| -| [`output_format_bson_string_as_string`](../../operations/settings/settings-formats.md/#output_format_bson_string_as_string) | 文字列カラムのためにバイナリではなく BSON 文字列型を使用します。 | `false` | -| [`input_format_bson_skip_fields_with_unsupported_types_in_schema_inference`](../../operations/settings/settings-formats.md/#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) | フォーマット BSONEachRow のスキーマ推論中にサポートされていない型のカラムをスキップできるようにします。 | `false` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md.hash deleted file mode 100644 index 2fbaf1f1e73..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -153df8523a4f7e70 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md deleted file mode 100644 index 435212fdcbb..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -alias: [] -description: 'CSV 形式のドキュメント' -input_format: true -keywords: -- 'CSV' -output_format: true -slug: '/interfaces/formats/CSV' -title: 'CSV' ---- - - - -## 説明 {#description} - -カンマ区切り値形式 ([RFC](https://tools.ietf.org/html/rfc4180))。 -フォーマットの際、行はダブルクォートで囲まれます。文字列内のダブルクォートは、2つのダブルクォートとして出力されます。 -他にエスケープ文字のルールはありません。 - -- 日付と日付時刻はダブルクォートで囲まれます。 -- 数値はダブルクォートなしで出力されます。 -- 値はデリミタ文字によって区切られ、デフォルトでは `,` です。デリミタ文字は設定 [format_csv_delimiter](/operations/settings/settings-formats.md/#format_csv_delimiter) で定義されています。 -- 行はUnix行フィード (LF) で区切られます。 -- 配列はCSVで以下のようにシリアル化されます: - - 最初に、配列はタブ区切り形式で文字列にシリアル化されます - - 結果の文字列はダブルクォートでCSVに出力されます。 -- CSV形式のタプルは、別々のカラムとしてシリアル化されます(つまり、タプル内のネストは失われます)。 - -```bash -$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv -``` - -:::note -デフォルトでは、デリミタは `,` です。 -詳細は設定 [format_csv_delimiter](/operations/settings/settings-formats.md/#format_csv_delimiter) を参照してください。 -::: - -解析する際、すべての値はダブルクォートありまたはなしで解析できます。ダブルクォートとシングルクォートの両方がサポートされています。 - -行はクォートなしでも配置できます。この場合、デリミタ文字または行フィード (CRまたはLF) まで解析されます。 -ただし、RFCに反して、クォートなしで行を解析する場合、先頭と末尾のスペースとタブは無視されます。 -行フィードは、Unix (LF)、Windows (CR LF)、Mac OS Classic (CR LF) タイプをサポートします。 - -`NULL` は設定 [format_csv_null_representation](/operations/settings/settings-formats.md/#format_csv_null_representation) に従ってフォーマットされます(デフォルト値は `\N` です)。 - -入力データでは、`ENUM` 値は名前またはIDとして表現できます。 -最初に、入力値をENUM名にマッチさせようとします。 -失敗した場合、かつ入力値が数値であれば、この数値をENUM IDにマッチさせようとします。 -入力データにENUM IDのみが含まれている場合は、`ENUM` 解析の最適化のために設定 [input_format_csv_enum_as_number](/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) を有効にすることをお勧めします。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | ノート | -|------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [format_csv_delimiter](/operations/settings/settings-formats.md/#format_csv_delimiter) | CSVデータでデリミタと見なされる文字。 | `,` | | -| [format_csv_allow_single_quotes](/operations/settings/settings-formats.md/#format_csv_allow_single_quotes) | シングルクォートで囲まれた文字列を許可します。 | `true` | | -| [format_csv_allow_double_quotes](/operations/settings/settings-formats.md/#format_csv_allow_double_quotes) | ダブルクォートで囲まれた文字列を許可します。 | `true` | | -| [format_csv_null_representation](/operations/settings/settings-formats.md/#format_tsv_null_representation) | CSV形式でのカスタムNULL表現。 | `\N` | | -| [input_format_csv_empty_as_default](/operations/settings/settings-formats.md/#input_format_csv_empty_as_default) | CSV入力の空のフィールドをデフォルト値として扱います。 | `true` | 複雑なデフォルト式の場合は、 [input_format_defaults_for_omitted_fields](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) も有効にする必要があります。 | -| [input_format_csv_enum_as_number](/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) | CSV形式の挿入されたENUM値をENUMインデックスとして扱います。 | `false` | | -| [input_format_csv_use_best_effort_in_schema_inference](/operations/settings/settings-formats.md/#input_format_csv_use_best_effort_in_schema_inference) | CSV形式でのスキーマ推論にいくつかの微調整とヒューリスティックを使用します。無効にすると、すべてのフィールドは文字列として推論されます。 | `true` | | -| [input_format_csv_arrays_as_nested_csv](/operations/settings/settings-formats.md/#input_format_csv_arrays_as_nested_csv) | CSVから配列を読む際、要素がネストされたCSVでシリアル化されて文字列に挿入されることを期待します。 | `false` | | -| [output_format_csv_crlf_end_of_line](/operations/settings/settings-formats.md/#output_format_csv_crlf_end_of_line) | これがtrueに設定されている場合、CSV出力形式の行の終わりは `\r\n` になります。 | `false` | | -| [input_format_csv_skip_first_lines](/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines) | データの最初の指定行数をスキップします。 | `0` | | -| [input_format_csv_detect_header](/operations/settings/settings-formats.md/#input_format_csv_detect_header) | CSV形式で名前と型を持つヘッダーを自動的に検出します。 | `true` | | -| [input_format_csv_skip_trailing_empty_lines](/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) | データの末尾にあるトレーリング空行をスキップします。 | `false` | | -| [input_format_csv_trim_whitespaces](/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) | 非引用のCSV文字列のスペースとタブをトリムします。 | `true` | | -| [input_format_csv_allow_whitespace_or_tab_as_delimiter](/operations/settings/settings-formats.md/#input_format_csv_allow_whitespace_or_tab_as_delimiter) | CSV文字列のフィールドデリミタとしてスペースまたはタブの使用を許可します。 | `false` | | -| [input_format_csv_allow_variable_number_of_columns](/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) | CSV形式で列数を可変にし、余分な列を無視し、欠損列にはデフォルト値を使用することを許可します。 | `false` | | -| [input_format_csv_use_default_on_bad_values](/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) | CSVフィールドのデシリアライズが不正な値で失敗した場合に、カラムにデフォルト値を設定することを許可します。 | `false` | | -| [input_format_csv_try_infer_numbers_from_strings](/operations/settings/settings-formats.md/#input_format_csv_try_infer_numbers_from_strings) | スキーマ推論中に文字列フィールドから数値を推測しようとします。 | `false` | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md.hash deleted file mode 100644 index 5b24839b509..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md.hash +++ /dev/null @@ -1 +0,0 @@ -59a1ef2b0315ada7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md deleted file mode 100644 index d3fa259e77a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -alias: [] -description: 'CSV フォーマットのドキュメント' -input_format: true -keywords: -- 'CSVWithNames' -output_format: true -slug: '/interfaces/formats/CSVWithNames' -title: 'CSVWithNames' ---- - - - -| 入力 | 出力 | エイリアス | -|-----|------|---------| -| ✔ | ✔ | | - -## 説明 {#description} - -カラム名とともにヘッダー行も印刷され、[TabSeparatedWithNames](/interfaces/formats/TabSeparatedWithNames) に類似しています。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -[`input_format_with_names_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_names_use_header) の設定が `1` に設定されている場合、入力データのカラムはその名前によってテーブルのカラムにマッピングされ、名前が不明なカラムは[環境設定](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields)で `1` に設定されている場合はスキップされます。 -そうでない場合、最初の行はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md.hash deleted file mode 100644 index fe31180e0ab..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -81969975bfe91ee0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md deleted file mode 100644 index e0f3dea4298..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'CSVWithNamesAndTypes 形式のドキュメント' -input_format: true -keywords: -- 'CSVWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/CSVWithNamesAndTypes' -title: 'CSVWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -これは、[TabSeparatedWithNamesAndTypes](../formats/TabSeparatedWithNamesAndTypes)に似たカラム名とタイプを持つ2つのヘッダーロウを印刷します。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -設定 [input_format_with_names_use_header](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、入力データのカラムは、テーブルのカラムに名前でマッピングされます。未知の名前のカラムは、設定 [input_format_skip_unknown_fields](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合、スキップされます。それ以外の場合、最初の行はスキップされます。 -::: - -:::note -設定 [input_format_with_types_use_header](../../../operations/settings/settings-formats.md/#input_format_with_types_use_header) が `1` に設定されている場合、入力データのタイプはテーブルの対応するカラムのタイプと比較されます。それ以外の場合、2行目はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md.hash deleted file mode 100644 index d1d603daf20..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -0aef49feac39bc2c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md deleted file mode 100644 index f04e87db9b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -alias: [] -description: 'Capnprotoのドキュメント' -input_format: true -keywords: -- 'CapnProto' -output_format: true -slug: '/interfaces/formats/CapnProto' -title: 'CapnProto' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -| 入力 | 出力 | エイリアス | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`CapnProto` フォーマットは、[`Protocol Buffers`](https://developers.google.com/protocol-buffers/) フォーマットや [Thrift](https://en.wikipedia.org/wiki/Apache_Thrift) と似たバイナリメッセージフォーマットですが、[JSON](./JSON/JSON.md) や [MessagePack](https://msgpack.org/) とは異なります。 -CapnProto メッセージは厳密に型付けされており、自己記述的ではないため、外部スキーマ記述が必要です。スキーマはその場で適用され、各クエリに対してキャッシュされます。 - -[フォーマットスキーマ](/interfaces/formats/#formatschema) も参照してください。 - -## データ型の一致 {#data_types-matching-capnproto} - -以下の表は、サポートされているデータ型と、それらが `INSERT` および `SELECT` クエリにおける ClickHouse の [データ型](/sql-reference/data-types/index.md) とどのように一致するかを示しています。 - -| CapnProto データ型(`INSERT`) | ClickHouse データ型 | CapnProto データ型(`SELECT`) | -|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| -| `UINT8`, `BOOL` | [UInt8](/sql-reference/data-types/int-uint.md) | `UINT8` | -| `INT8` | [Int8](/sql-reference/data-types/int-uint.md) | `INT8` | -| `UINT16` | [UInt16](/sql-reference/data-types/int-uint.md), [Date](/sql-reference/data-types/date.md) | `UINT16` | -| `INT16` | [Int16](/sql-reference/data-types/int-uint.md) | `INT16` | -| `UINT32` | [UInt32](/sql-reference/data-types/int-uint.md), [DateTime](/sql-reference/data-types/datetime.md) | `UINT32` | -| `INT32` | [Int32](/sql-reference/data-types/int-uint.md), [Decimal32](/sql-reference/data-types/decimal.md) | `INT32` | -| `UINT64` | [UInt64](/sql-reference/data-types/int-uint.md) | `UINT64` | -| `INT64` | [Int64](/sql-reference/data-types/int-uint.md), [DateTime64](/sql-reference/data-types/datetime.md), [Decimal64](/sql-reference/data-types/decimal.md) | `INT64` | -| `FLOAT32` | [Float32](/sql-reference/data-types/float.md) | `FLOAT32` | -| `FLOAT64` | [Float64](/sql-reference/data-types/float.md) | `FLOAT64` | -| `TEXT, DATA` | [String](/sql-reference/data-types/string.md), [FixedString](/sql-reference/data-types/fixedstring.md) | `TEXT, DATA` | -| `union(T, Void), union(Void, T)` | [Nullable(T)](/sql-reference/data-types/date.md) | `union(T, Void), union(Void, T)` | -| `ENUM` | [Enum(8/16)](/sql-reference/data-types/enum.md) | `ENUM` | -| `LIST` | [Array](/sql-reference/data-types/array.md) | `LIST` | -| `STRUCT` | [Tuple](/sql-reference/data-types/tuple.md) | `STRUCT` | -| `UINT32` | [IPv4](/sql-reference/data-types/ipv4.md) | `UINT32` | -| `DATA` | [IPv6](/sql-reference/data-types/ipv6.md) | `DATA` | -| `DATA` | [Int128/UInt128/Int256/UInt256](/sql-reference/data-types/int-uint.md) | `DATA` | -| `DATA` | [Decimal128/Decimal256](/sql-reference/data-types/decimal.md) | `DATA` | -| `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | - -- 整数型は、入力/出力中に相互に変換できます。 -- CapnProtoフォーマットでの`Enum`の取り扱いには、[format_capn_proto_enum_comparising_mode](/operations/settings/settings-formats.md/#format_capn_proto_enum_comparising_mode) 設定を使用してください。 -- 配列はネスト可能で、`Nullable`型の値を引数として持つことができます。`Tuple`および`Map`型もネストできます。 - -## 使用例 {#example-usage} - -### データの挿入と選択 {#inserting-and-selecting-data-capnproto} - -次のコマンドを使用して、ファイルから ClickHouse テーブルに CapnProto データを挿入できます。 - -```bash -$ cat capnproto_messages.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_schema = 'schema:Message' FORMAT CapnProto" -``` - -ここで、`schema.capnp`は次のようになります。 - -```capnp -struct Message { - SearchPhrase @0 :Text; - c @1 :Uint64; -} -``` - -次のコマンドを使用して、ClickHouse テーブルからデータを選択し、`CapnProto`フォーマットでファイルに保存できます。 - -```bash -$ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'" -``` - -### 自動生成スキーマの使用 {#using-autogenerated-capn-proto-schema} - -データに対する外部の `CapnProto` スキーマがない場合でも、自動生成スキーマを使用して `CapnProto` フォーマットでデータを出力/入力できます。 - -例えば: - -```sql -SELECT * FROM test.hits -FORMAT CapnProto -SETTINGS format_capn_proto_use_autogenerated_schema=1 -``` - -この場合、ClickHouse はテーブル構造に基づいて CapnProto スキーマを自動生成し、[structureToCapnProtoSchema](/sql-reference/functions/other-functions.md#structure_to_capn_proto_schema) 関数を使用して、このスキーマを使用して CapnProto フォーマットでデータをシリアライズします。 - -自動生成されたスキーマの CapnProto ファイルを読み取ることもできます(この場合、ファイルは同じスキーマを使用して作成する必要があります): - -```bash -$ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_capn_proto_use_autogenerated_schema=1 FORMAT CapnProto" -``` - -## フォーマット設定 {#format-settings} - -設定 [`format_capn_proto_use_autogenerated_schema`](../../operations/settings/settings-formats.md/#format_capn_proto_use_autogenerated_schema) はデフォルトで有効であり、[`format_schema`](/interfaces/formats#formatschema) が設定されていない場合に適用されます。 - -入力/出力中に [`output_format_schema`](/operations/settings/formats#output_format_schema) 設定を使用して、自動生成されたスキーマをファイルに保存することもできます。 - -例えば: - -```sql -SELECT * FROM test.hits -FORMAT CapnProto -SETTINGS - format_capn_proto_use_autogenerated_schema=1, - output_format_schema='path/to/schema/schema.capnp' -``` - -この場合、自動生成された `CapnProto` スキーマはファイル `path/to/schema/schema.capnp` に保存されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md.hash deleted file mode 100644 index 4fb93fa5c71..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md.hash +++ /dev/null @@ -1 +0,0 @@ -44b7bfbe8857ed27 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md deleted file mode 100644 index b728b937ab4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -alias: [] -description: 'CustomSeparated フォーマットのドキュメント' -input_format: true -keywords: -- 'CustomSeparated' -output_format: true -slug: '/interfaces/formats/CustomSeparated' -title: 'CustomSeparated' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[Template](../Template/Template.md) と似ていますが、すべてのカラムの名前とタイプを出力または読み込みし、[format_custom_escaping_rule](../../../operations/settings/settings-formats.md/#format_custom_escaping_rule) 設定からエスケープルールを使用し、以下の設定からデリミタを使用します。 - -- [format_custom_field_delimiter](/operations/settings/settings-formats.md/#format_custom_field_delimiter) -- [format_custom_row_before_delimiter](/operations/settings/settings-formats.md/#format_custom_row_before_delimiter) -- [format_custom_row_after_delimiter](/operations/settings/settings-formats.md/#format_custom_row_after_delimiter) -- [format_custom_row_between_delimiter](/operations/settings/settings-formats.md/#format_custom_row_between_delimiter) -- [format_custom_result_before_delimiter](/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) -- [format_custom_result_after_delimiter](/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) - -note::: -エスケープルール設定やフォーマット文字列からのデリミタは使用されません。 -::: - -[`CustomSeparatedIgnoreSpaces`](../CustomSeparated/CustomSeparatedIgnoreSpaces.md) フォーマットもあり、[TemplateIgnoreSpaces](../Template//TemplateIgnoreSpaces.md) に似ています。 - -## 例の使用法 {#example-usage} - -## フォーマット設定 {#format-settings} - -追加設定: - -| 設定 | 説明 | デフォルト | -|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|-----------| -| [input_format_custom_detect_header](../../../operations/settings/settings-formats.md/#input_format_custom_detect_header) | ある場合は、名前とタイプのヘッダーを自動的に検出するようにします。 | `true` | -| [input_format_custom_skip_trailing_empty_lines](../../../operations/settings/settings-formats.md/#input_format_custom_skip_trailing_empty_lines) | ファイルの終わりにあるトレーリングの空行をスキップします。 | `false` | -| [input_format_custom_allow_variable_number_of_columns](../../../operations/settings/settings-formats.md/#input_format_custom_allow_variable_number_of_columns) | CustomSeparatedフォーマットで可変数のカラムを許可し、余分なカラムを無視し、欠損カラムにはデフォルト値を使用します。 | `false` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md.hash deleted file mode 100644 index 1f2aa3675ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md.hash +++ /dev/null @@ -1 +0,0 @@ -e97bef7e3b98a88a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md deleted file mode 100644 index fe343429765..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'CustomSeparatedIgnoreSpaces フォーマットのドキュメント' -keywords: -- 'CustomSeparatedIgnoreSpaces' -slug: '/interfaces/formats/CustomSeparatedIgnoreSpaces' -title: 'CustomSeparatedIgnoreSpaces' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md.hash deleted file mode 100644 index dbf67628f65..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md.hash +++ /dev/null @@ -1 +0,0 @@ -8db9ae404395cea1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md deleted file mode 100644 index 06ddea44dd1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'CustomSeparatedIgnoreSpacesWithNames形式のドキュメント' -keywords: -- 'CustomSeparatedIgnoreSpacesWithNames' -slug: '/interfaces/formats/CustomSeparatedIgnoreSpacesWithNames' -title: 'CustomSeparatedIgnoreSpacesWithNames' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md.hash deleted file mode 100644 index a901dc9bfb5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -86beadfe008b703e diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md deleted file mode 100644 index 1a9eef99c53..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: 'Documentation for the CustomSeparatedIgnoreSpacesWithNamesAndTypes - format' -keywords: -- 'CustomSeparatedIgnoreSpacesWithNamesAndTypes' -slug: '/interfaces/formats/CustomSeparatedIgnoreSpacesWithNamesAndTypes' -title: 'CustomSeparatedIgnoreSpacesWithNamesAndTypes' ---- - - - -## 説明 {#description} - -## 例の使用法 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md.hash deleted file mode 100644 index 2efcaee7351..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -fa2339163c66ce36 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md deleted file mode 100644 index 585bfd29223..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -alias: [] -description: 'CustomSeparatedWithNames formatのドキュメント' -input_format: true -keywords: -- 'CustomSeparatedWithNames' -output_format: true -slug: '/interfaces/formats/CustomSeparatedWithNames' -title: 'CustomSeparatedWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -列名を含むヘッダー行を印刷し、[TabSeparatedWithNames](../TabSeparated/TabSeparatedWithNames.md)に似ています。 - -## 例の使用法 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -設定 [`input_format_with_names_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、 -入力データのカラムは、名前によってテーブルのカラムにマッピングされ、 -設定 [`input_format_skip_unknown_fields`](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合、未知の名前のカラムはスキップされます。 -そうでなければ、最初の行がスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md.hash deleted file mode 100644 index b2142d35d1a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -ddcb3c1231f36151 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md deleted file mode 100644 index 0e9e237525d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'CustomSeparatedWithNamesAndTypesフォーマットのドキュメント' -input_format: true -keywords: -- 'CustomSeparatedWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/CustomSeparatedWithNamesAndTypes' -title: 'CustomSeparatedWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## Description {#description} - -また、[TabSeparatedWithNamesAndTypes](../TabSeparated/TabSeparatedWithNamesAndTypes.md)と同様に、カラム名とタイプのヘッダー行を2つ印刷します。 - -## Example Usage {#example-usage} - -## Format Settings {#format-settings} - -:::note -設定 [`input_format_with_names_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、入力データのカラムは名前によってテーブルのカラムにマッピングされ、未知の名前のカラムは設定 [`input_format_skip_unknown_fields`](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合にスキップされます。それ以外の場合、最初の行はスキップされます。 -::: - -:::note -設定 [`input_format_with_types_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_types_use_header) が `1` に設定されている場合、入力データのタイプはテーブルの対応するカラムのタイプと比較されます。それ以外の場合、2行目はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md.hash deleted file mode 100644 index 5ffb02e4710..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -c1b9117a891176ac diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md deleted file mode 100644 index 94e9c6b3a8b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -alias: [] -description: 'DWARFフォーマットのドキュメント' -input_format: true -keywords: -- 'DWARF' -output_format: false -slug: '/interfaces/formats/DWARF' -title: 'DWARF' ---- - - - -| Input | Output | Alias | -|-------|---------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -`DWARF` 形式は ELF ファイル(実行可能ファイル、ライブラリ、またはオブジェクトファイル)から DWARF デバッグシンボルを解析します。 -これは `dwarfdump` に似ていますが、はるかに高速(毎秒数百 MB)で、SQL をサポートしています。 -それは `.debug_info` セクション内の各デバッグ情報エントリ (DIE) に対して 1 行を生成し、DWARF エンコーディングがツリー内の子供のリストを終了するために使用する “null” エントリを含みます。 - -:::info -`.debug_info` は *ユニット* で構成され、これがコンパイルユニットに対応します: -- 各ユニットは *DIE* のツリーであり、ルートとして `compile_unit` DIE を持ちます。 -- 各 DIE には *タグ* と *属性* のリストがあります。 -- 各属性には *名前* と *値* (および *形式* があり、これは値のエンコード方法を指定します)が含まれます。 - -DIE はソースコードからの項目を表しており、その *タグ* はそれが何の種類のものであるかを示します。例えば、以下のものがあります: - -- 関数 (タグ = `subprogram`) -- クラス/構造体/列挙型 (`class_type`/`structure_type`/`enumeration_type`) -- 変数 (`variable`) -- 関数の引数 (`formal_parameter`) - -ツリー構造は、対応するソースコードを反映します。例えば、`class_type` DIE はクラスのメソッドを表す `subprogram` DIE を含むことができます。 -::: - -`DWARF` 形式は以下のカラムを出力します: - -- `offset` - `.debug_info` セクション内の DIE の位置 -- `size` - エンコードされた DIE のバイト数(属性を含む) -- `tag` - DIE の種類; 従来の "DW_TAG_" プレフィックスは省略されます -- `unit_name` - この DIE を含むコンパイルユニットの名前 -- `unit_offset` - `.debug_info` セクション内のこの DIE を含むコンパイルユニットの位置 -- `ancestor_tags` - ツリー内の現在の DIE の先祖のタグの配列で、内側から外側へ順に -- `ancestor_offsets` - 先祖のオフセット、`ancestor_tags` に平行 -- 利便性のために属性の配列から複製された一般的な属性のいくつか: - - `name` - - `linkage_name` - マングルされた完全修飾名; 通常は関数のみが持つ(すべての関数ではないが) - - `decl_file` - このエンティティが宣言されたソースコードファイルの名前 - - `decl_line` - このエンティティが宣言されたソースコード内の行番号 -- 属性を説明する平行配列: - - `attr_name` - 属性の名前; 従来の "DW_AT_" プレフィックスは省略されています - - `attr_form` - 属性がどのようにエンコードされ、解釈されるか; 従来の DW_FORM_ プレフィックスは省略されます - - `attr_int` - 属性の整数値; 属性が数値値を持たない場合は 0 - - `attr_str` - 属性の文字列値; 属性が文字列値を持たない場合は空です - -## 使用例 {#example-usage} - -`DWARF` 形式は、最も多くの関数定義(テンプレートインスタンスや含まれているヘッダーファイルからの関数を含む)を持つコンパイルユニットを見つけるために使用できます: - -```sql title="クエリ" -SELECT - unit_name, - count() AS c -FROM file('programs/clickhouse', DWARF) -WHERE tag = 'subprogram' AND NOT has(attr_name, 'declaration') -GROUP BY unit_name -ORDER BY c DESC -LIMIT 3 -``` -```text title="レスポンス" -┌─unit_name──────────────────────────────────────────────────┬─────c─┐ -│ ./src/Core/Settings.cpp │ 28939 │ -│ ./src/AggregateFunctions/AggregateFunctionSumMap.cpp │ 23327 │ -│ ./src/AggregateFunctions/AggregateFunctionUniqCombined.cpp │ 22649 │ -└────────────────────────────────────────────────────────────┴───────┘ - -3 行がセットに含まれています。経過時間: 1.487 秒。139.76 百万行を処理しました。1.12 GB (93.97 百万行/秒, 752.77 MB/秒)。 -ピークメモリ使用量: 271.92 MiB。 -``` - -## 形式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md.hash deleted file mode 100644 index cefc11aa96e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb4bd09a96d72721 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md deleted file mode 100644 index fe9d37d315c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -alias: [] -description: 'Form形式のドキュメント' -input_format: true -keywords: -- 'Form' -output_format: false -slug: '/interfaces/formats/Form' -title: 'フォーム' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - - -## 説明 {#description} - -`Form`フォーマットは、データが`key1=value1&key2=value2`の形式でフォーマットされたapplication/x-www-form-urlencoded形式で単一のレコードを読み取るために使用できます。 - -## 使用例 {#example-usage} - -URLエンコードされたデータを含む`user_files`パスに配置されたファイル`data.tmp`があるとします: - -```text title="data.tmp" -t_page=116&c.e=ls7xfkpm&c.tti.m=raf&rt.start=navigation&rt.bmr=390%2C11%2C10 -``` - -```sql title="Query" -SELECT * FROM file(data.tmp, Form) FORMAT vertical; -``` - -```response title="Response" -行 1: -────── -t_page: 116 -c.e: ls7xfkpm -c.tti.m: raf -rt.start: navigation -rt.bmr: 390,11,10 -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md.hash deleted file mode 100644 index 8432ff9703d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md.hash +++ /dev/null @@ -1 +0,0 @@ -fe0ac2ab6ad0f84a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md deleted file mode 100644 index 74c30edd05d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'Documentation for the HiveText format' -keywords: -- 'HiveText' -slug: '/interfaces/formats/HiveText' -title: 'HiveText' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md.hash deleted file mode 100644 index aceb570bec3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/HiveText.md.hash +++ /dev/null @@ -1 +0,0 @@ -9cc3e702f28b4a93 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md deleted file mode 100644 index 43034e5d600..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -alias: [] -description: 'JSON フォーマットのドキュメント' -input_format: true -keywords: -- 'JSON' -output_format: true -slug: '/interfaces/formats/JSON' -title: 'JSON' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`JSON`フォーマットは、JSONフォーマットでデータを読み込み、出力します。 - -`JSON`フォーマットは以下を返します: - -| パラメーター | 説明 | -|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `meta` | カラム名とその型。 | -| `data` | データテーブル | -| `rows` | 出力行の合計数。 | -| `rows_before_limit_at_least` | LIMITがない場合の最小行数。クエリにLIMITが含まれている場合のみ出力されます。クエリに`GROUP BY`が含まれている場合、rows_before_limit_at_leastはLIMITがなかった場合の正確な行数となります。 | -| `statistics` | `elapsed`、`rows_read`、`bytes_read`などの統計情報。 | -| `totals` | 合計値(WITH TOTALSを使用している場合)。 | -| `extremes` | 極値(extremesが1に設定されている場合)。 | - -`JSON`型はJavaScriptと互換性があります。これを確保するために、一部の文字が追加でエスケープされます: -- スラッシュ `/` は `\/` としてエスケープされます。 -- 一部のブラウザを破損させる代替改行 `U+2028` と `U+2029` は `\uXXXX` としてエスケープされます。 -- ASCII制御文字はエスケープされます: バックスペース、フォームフィード、改行、キャリッジリターン、および水平タブはそれぞれ `\b`、`\f`、`\n`、`\r`、`\t` に置き換えられ、00-1F範囲の残りのバイトは `\uXXXX` シーケンスを使用して置き換えられます。 -- 無効なUTF-8シーケンスは置換文字 � に変更されるため、出力テキストは有効なUTF-8シーケンスで構成されます。 - -JavaScriptとの互換性のために、Int64およびUInt64整数はデフォルトでダブルクオートで囲まれます。 -クオートを削除するには、設定パラメーター [`output_format_json_quote_64bit_integers`](/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) を `0` に設定します。 - -ClickHouseは [NULL](/sql-reference/syntax.md) をサポートしており、これはJSON出力で `null` と表示されます。出力で `+nan`、`-nan`、`+inf`、`-inf` 値を有効にするには、[output_format_json_quote_denormals](/operations/settings/settings-formats.md/#output_format_json_quote_denormals) を `1` に設定します。 - -## 使用例 {#example-usage} - -例: - -```sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON -``` - -```json -{ - "meta": - [ - { - "name": "num", - "type": "Int32" - }, - { - "name": "str", - "type": "String" - }, - { - "name": "arr", - "type": "Array(UInt8)" - } - ], - - "data": - [ - { - "num": 42, - "str": "hello", - "arr": [0,1] - }, - { - "num": 43, - "str": "hello", - "arr": [0,1,2] - }, - { - "num": 44, - "str": "hello", - "arr": [0,1,2,3] - } - ], - - "rows": 3, - - "rows_before_limit_at_least": 3, - - "statistics": - { - "elapsed": 0.001137687, - "rows_read": 3, - "bytes_read": 24 - } -} -``` - -## フォーマット設定 {#format-settings} - -JSON入力フォーマットの場合、設定 [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) が `1` に設定されていると、 -入力データのメタデータからの型が、テーブルの対応するカラムの型と比較されます。 - -## 関連項目 {#see-also} - -- [JSONEachRow](/interfaces/formats/JSONEachRow) フォーマット -- [output_format_json_array_of_rows](/operations/settings/settings-formats.md/#output_format_json_array_of_rows) 設定 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md.hash deleted file mode 100644 index 2d485f52189..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md.hash +++ /dev/null @@ -1 +0,0 @@ -e440f90ee9c59c93 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md deleted file mode 100644 index 04f9398b27b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -alias: [] -description: 'JSONAsObjectフォーマットのドキュメント' -input_format: true -keywords: -- 'JSONAsObject' -output_format: false -slug: '/interfaces/formats/JSONAsObject' -title: 'JSONAsObject' ---- - - - -## 説明 {#description} - -このフォーマットでは、単一のJSONオブジェクトが単一の [JSON](/sql-reference/data-types/newjson.md) 値として解釈されます。入力に複数のJSONオブジェクト(カンマ区切り)が含まれている場合、それらは別々の行として解釈されます。入力データが角括弧で囲まれている場合、それはJSONの配列として解釈されます。 - -このフォーマットは、[JSON](/sql-reference/data-types/newjson.md) タイプの単一フィールドを持つテーブルに対してのみ解析可能です。残りのカラムは [`DEFAULT`](/sql-reference/statements/create/table.md/#default) または [`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view) に設定する必要があります。 - -## 使用例 {#example-usage} - -### 基本的な例 {#basic-example} - -```sql title="Query" -SET enable_json_type = 1; -CREATE TABLE json_as_object (json JSON) ENGINE = Memory; -INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1} -SELECT * FROM json_as_object FORMAT JSONEachRow; -``` - -```response title="Response" -{"json":{"foo":{"bar":{"x":"y"},"baz":"1"}}} -{"json":{}} -{"json":{"any json stucture":"1"}} -``` - -### JSONオブジェクトの配列 {#an-array-of-json-objects} - -```sql title="Query" -SET enable_json_type = 1; -CREATE TABLE json_square_brackets (field JSON) ENGINE = Memory; -INSERT INTO json_square_brackets FORMAT JSONAsObject [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; -SELECT * FROM json_square_brackets FORMAT JSONEachRow; -``` - -```response title="Response" -{"field":{"id":"1","name":"name1"}} -{"field":{"id":"2","name":"name2"}} -``` - -### デフォルト値を持つカラム {#columns-with-default-values} - -```sql title="Query" -SET enable_json_type = 1; -CREATE TABLE json_as_object (json JSON, time DateTime MATERIALIZED now()) ENGINE = Memory; -INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}}; -INSERT INTO json_as_object (json) FORMAT JSONAsObject {}; -INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1} -SELECT time, json FROM json_as_object FORMAT JSONEachRow -``` - -```response title="Response" -{"time":"2024-09-16 12:18:10","json":{}} -{"time":"2024-09-16 12:18:13","json":{"any json stucture":"1"}} -{"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md.hash deleted file mode 100644 index 67ca257159d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md.hash +++ /dev/null @@ -1 +0,0 @@ -88f8c847da22eda5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md deleted file mode 100644 index 3ffd995e2bf..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -alias: [] -description: 'JSONAsString フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONAsString' -output_format: false -slug: '/interfaces/formats/JSONAsString' -title: 'JSONAsString' ---- - - - -| Input | Output | Alias | -|-------|---------|-------| -| ✔ | ✗ | | - - -## 説明 {#description} - -この形式では、単一のJSONオブジェクトは単一の値として解釈されます。 -入力に複数のJSONオブジェクト(カンマ区切り)がある場合、それらは別々の行として解釈されます。 -入力データが角括弧で囲まれている場合、それはJSONオブジェクトの配列として解釈されます。 - -:::note -この形式は、[String](/sql-reference/data-types/string.md)型の単一フィールドを持つテーブルに対してのみ解析可能です。 -残りのカラムは、[`DEFAULT`](/sql-reference/statements/create/table.md/#default)または[`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view)に設定するか、省略する必要があります。 -::: - -JSONオブジェクト全体を文字列に直列化したら、[JSON関数](/sql-reference/functions/json-functions.md)を使用してそれを処理できます。 - -## 使用例 {#example-usage} - -### 基本例 {#basic-example} - -```sql title="クエリ" -DROP TABLE IF EXISTS json_as_string; -CREATE TABLE json_as_string (json String) ENGINE = Memory; -INSERT INTO json_as_string (json) FORMAT JSONAsString {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1} -SELECT * FROM json_as_string; -``` - -```response title="レスポンス" -┌─json──────────────────────────────┐ -│ {"foo":{"bar":{"x":"y"},"baz":1}} │ -│ {} │ -│ {"any json stucture":1} │ -└───────────────────────────────────┘ -``` - -### JSONオブジェクトの配列 {#an-array-of-json-objects} - -```sql title="クエリ" -CREATE TABLE json_square_brackets (field String) ENGINE = Memory; -INSERT INTO json_square_brackets FORMAT JSONAsString [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; - -SELECT * FROM json_square_brackets; -``` - -```response title="レスポンス" -┌─field──────────────────────┐ -│ {"id": 1, "name": "name1"} │ -│ {"id": 2, "name": "name2"} │ -└────────────────────────────┘ -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md.hash deleted file mode 100644 index 7e46a99cc66..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md.hash +++ /dev/null @@ -1 +0,0 @@ -60c15fac59cd2bc1 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md deleted file mode 100644 index 8082df576c0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -alias: [] -description: 'JSONColumns フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONColumns' -output_format: true -slug: '/interfaces/formats/JSONColumns' -title: 'JSONColumns' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -:::tip -JSONColumns* フォーマットの出力は ClickHouse フィールド名を提供し、そのフィールドに対するテーブルの各行の内容を示します。視覚的には、データは左に90度回転しています。 -::: - -このフォーマットでは、すべてのデータが単一の JSON オブジェクトとして表現されます。 - -:::note -`JSONColumns` フォーマットはすべてのデータをメモリにバッファリングし、その後単一のブロックとして出力するため、高いメモリ消費を引き起こす可能性があります。 -::: - -## 使用例 {#example-usage} - -例: - -```json -{ - "num": [42, 43, 44], - "str": ["hello", "hello", "hello"], - "arr": [[0,1], [0,1,2], [0,1,2,3]] -} -``` - -## フォーマット設定 {#format-settings} - -インポート中に、不明な名前のカラムは、設定 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合、スキップされます。ブロックに存在しないカラムはデフォルト値で埋められます(ここでは [`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) 設定を使用できます)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md.hash deleted file mode 100644 index d28c35af0b6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md.hash +++ /dev/null @@ -1 +0,0 @@ -7f139ea2713f58da diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md deleted file mode 100644 index baaa7cd4055..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -alias: [] -description: 'JSONColumnsWithMetadata フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONColumnsWithMetadata' -output_format: true -slug: '/interfaces/formats/JSONColumnsWithMetadata' -title: 'JSONColumnsWithMetadata' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`JSONColumns`](./JSONColumns.md) フォーマットとは異なり、メタデータと統計情報も含まれており([`JSON`](./JSON.md) フォーマットに似ています)、これが特徴です。 - -:::note -`JSONColumnsWithMetadata` フォーマットは、すべてのデータをメモリにバッファし、その後単一のブロックとして出力するため、高いメモリ消費を引き起こす可能性があります。 -::: - -## 使用例 {#example-usage} - -例: - -```json -{ - "meta": - [ - { - "name": "num", - "type": "Int32" - }, - { - "name": "str", - "type": "String" - }, - - { - "name": "arr", - "type": "Array(UInt8)" - } - ], - - "data": - { - "num": [42, 43, 44], - "str": ["hello", "hello", "hello"], - "arr": [[0,1], [0,1,2], [0,1,2,3]] - }, - - "rows": 3, - - "rows_before_limit_at_least": 3, - - "statistics": - { - "elapsed": 0.000272376, - "rows_read": 3, - "bytes_read": 24 - } -} -``` - -`JSONColumnsWithMetadata` 入力フォーマットに対して、設定 [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) が `1` に設定されている場合、入力データのメタデータから取得したタイプが、テーブルの対応するカラムのタイプと比較されます。 - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md.hash deleted file mode 100644 index 2b22385b249..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md.hash +++ /dev/null @@ -1 +0,0 @@ -4497e336f3ccc798 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md deleted file mode 100644 index 13565683c1d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -alias: [] -description: 'JSONCompact形式のドキュメント' -input_format: true -keywords: -- 'JSONCompact' -output_format: true -slug: '/interfaces/formats/JSONCompact' -title: 'JSONCompact' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -データ行がオブジェクトではなく配列として出力される点が[JSON](./JSON.md)と異なります。 - -## 使用例 {#example-usage} - -```json -{ - "meta": - [ - { - "name": "num", - "type": "Int32" - }, - { - "name": "str", - "type": "String" - }, - { - "name": "arr", - "type": "Array(UInt8)" - } - ], - - "data": - [ - [42, "hello", [0,1]], - [43, "hello", [0,1,2]], - [44, "hello", [0,1,2,3]] - ], - - "rows": 3, - - "rows_before_limit_at_least": 3, - - "statistics": - { - "elapsed": 0.001222069, - "rows_read": 3, - "bytes_read": 24 - } -} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md.hash deleted file mode 100644 index e47eefa11be..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md.hash +++ /dev/null @@ -1 +0,0 @@ -3faa4511cc277042 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md deleted file mode 100644 index 12be84d7507..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -alias: [] -description: 'JSONCompactColumns フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONCompactColumns' -output_format: true -slug: '/interfaces/formats/JSONCompactColumns' -title: 'JSONCompactColumns' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -このフォーマットでは、すべてのデータが単一のJSON配列として表現されます。 - -:::note -`JSONCompactColumns` 出力フォーマットは、すべてのデータをメモリにバッファリングして、単一のブロックとして出力します。これにより、高いメモリ消費が発生する可能性があります。 -::: - -## 使用例 {#example-usage} - -```json -[ - [42, 43, 44], - ["hello", "hello", "hello"], - [[0,1], [0,1,2], [0,1,2,3]] -] -``` - -ブロックに存在しないカラムは、デフォルト値で埋められます(ここでは [`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) 設定を使用できます) - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md.hash deleted file mode 100644 index 48e1053092f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md.hash +++ /dev/null @@ -1 +0,0 @@ -8c9a01212234a03f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md deleted file mode 100644 index ce4f34a31dd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'JSONCompactEachRow フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONCompactEachRow' -output_format: true -slug: '/interfaces/formats/JSONCompactEachRow' -title: 'JSONCompactEachRow' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`JSONEachRow`](./JSONEachRow.md) とは異なり、データ行はオブジェクトではなく配列として出力されます。 - -## 例の使い方 {#example-usage} - -例: - -```json -[42, "hello", [0,1]] -[43, "hello", [0,1,2]] -[44, "hello", [0,1,2,3]] -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md.hash deleted file mode 100644 index 6a43947dd1e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -fa9ced0d7d346ba9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md deleted file mode 100644 index cda7bc257de..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'JSONCompactEachRowWithNames形式のドキュメント' -input_format: true -keywords: -- 'JSONCompactEachRowWithNames' -output_format: true -slug: '/interfaces/formats/JSONCompactEachRowWithNames' -title: 'JSONCompactEachRowWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - - -## 説明 {#description} - -[`JSONCompactEachRow`](./JSONCompactEachRow.md) フォーマットと異なり、カラム名を含むヘッダ行も表示され、[`TabSeparatedWithNames`](../TabSeparated/TabSeparatedWithNames.md) フォーマットに似ています。 - - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -設定 [`input_format_with_names_use_header`](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が 1 に設定されている場合、 -入力データからのカラムは、その名前によってテーブルのカラムにマッピングされます。未知の名前のカラムは、設定 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が 1 に設定されている場合にスキップされます。 -そうでなければ、最初の行はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md.hash deleted file mode 100644 index 20a48bc4392..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -28b804a7bc8d1ea9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md deleted file mode 100644 index a84683ebd00..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'JSONCompactEachRowWithNamesAndTypes フォーマットのドキュメント' -input_format: true -keywords: -- 'JSONCompactEachRowWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/JSONCompactEachRowWithNamesAndTypes' -title: 'JSONCompactEachRowWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`JSONCompactEachRow`](./JSONCompactEachRow.md) フォーマットとは異なり、列の名前と型の2つのヘッダ行を出力します。これは、[TabSeparatedWithNamesAndTypes](../TabSeparated/TabSeparatedWithNamesAndTypes.md) フォーマットに似ています。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -設定 [`input_format_with_names_use_header`](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、 -入力データのカラムは、名前によってテーブルのカラムにマッピングされます。未知の名前のカラムは、設定 [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合、スキップされます。 -そうでない場合、最初の行はスキップされます。 -設定 [`input_format_with_types_use_header`](/operations/settings/settings-formats.md/#input_format_with_types_use_header) が `1` に設定されている場合、 -入力データの型は、テーブルの対応するカラムの型と比較されます。そうでない場合、2行目はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md.hash deleted file mode 100644 index e6b650fd687..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -cdbc848900d145df diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md deleted file mode 100644 index 347da24e9e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -alias: [] -description: 'JSONCompactStrings フォーマットのドキュメント' -input_format: false -keywords: -- 'JSONCompactStrings' -output_format: true -slug: '/interfaces/formats/JSONCompactStrings' -title: 'JSONCompactStrings' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`JSONCompactStrings` フォーマットは、データ行がオブジェクトではなく配列として出力される点のみが [JSONStrings](./JSONStrings.md) と異なります。 - -## 使用例 {#example-usage} - -```json -{ - "meta": - [ - { - "name": "num", - "type": "Int32" - }, - { - "name": "str", - "type": "String" - }, - { - "name": "arr", - "type": "Array(UInt8)" - } - ], - - "data": - [ - ["42", "hello", "[0,1]"], - ["43", "hello", "[0,1,2]"], - ["44", "hello", "[0,1,2,3]"] - ], - - "rows": 3, - - "rows_before_limit_at_least": 3, - - "statistics": - { - "elapsed": 0.001572097, - "rows_read": 3, - "bytes_read": 24 - } -} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md.hash deleted file mode 100644 index e0472180e4f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md.hash +++ /dev/null @@ -1 +0,0 @@ -bb9fe09ac0646b0f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md deleted file mode 100644 index 104819bcf65..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: [] -description: 'JSONCompactStringsEachRowフォーマットのドキュメント' -input_format: true -keywords: -- 'JSONCompactStringsEachRow' -output_format: true -slug: '/interfaces/formats/JSONCompactStringsEachRow' -title: 'JSONCompactStringsEachRow' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -データフィールドが型付きJSON値ではなく文字列として出力される点を除いて、 [`JSONCompactEachRow`](./JSONCompactEachRow.md) とは異なります。 - -## 使用例 {#example-usage} - -例: - -```json -["42", "hello", "[0,1]"] -["43", "hello", "[0,1,2]"] -["44", "hello", "[0,1,2,3]"] -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md.hash deleted file mode 100644 index 4c0cfcc8a56..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -74331893124d9788 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md deleted file mode 100644 index fd040e30a99..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -alias: [] -description: 'JSONCompactStringsEachRowWithNames形式のドキュメント' -input_format: true -keywords: -- 'JSONCompactStringsEachRowWithNames' -output_format: true -slug: '/interfaces/formats/JSONCompactStringsEachRowWithNames' -title: 'JSONCompactStringsEachRowWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`JSONCompactEachRow`](./JSONCompactEachRow.md) 形式とは異なり、[TabSeparatedWithNames](../TabSeparated/TabSeparatedWithNames.md) 形式と同様に、カラム名を含むヘッダー行を出力します。 - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} - -:::note -[`input_format_with_names_use_header`](/operations/settings/settings-formats.md/#input_format_with_names_use_header) の設定が `1` に設定されている場合、入力データのカラムは、その名前によってテーブルのカラムにマッピングされ、未知の名前のカラムは [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) の設定が `1` に設定されている場合にはスキップされます。それ以外の場合、最初の行はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md.hash deleted file mode 100644 index 67da3161753..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -e16aa50bd65cd9d7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md deleted file mode 100644 index b88c643d3fa..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: 'JSONCompactStringsEachRowWithNamesAndTypes フォーマットのドキュメント' -keywords: -- 'JSONCompactStringsEachRowWithNamesAndTypes' -slug: '/interfaces/formats/JSONCompactStringsEachRowWithNamesAndTypes' -title: 'JSONCompactStringsEachRowWithNamesAndTypes' ---- - - - -## 説明 {#description} - -`JSONCompactEachRow` フォーマットとは異なり、カラム名とタイプの2つのヘッダ行を印刷します。これは、[TabSeparatedWithNamesAndTypes](/interfaces/formats/TabSeparatedRawWithNamesAndTypes) に似ています。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -:::note -設定 [input_format_with_names_use_header](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が 1 に設定されている場合、入力データのカラムはその名前によってテーブルのカラムにマッピングされます。カラム名が不明なものは、設定 [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が 1 に設定されている場合、スキップされます。そうでない場合、最初の行はスキップされます。 -::: - -:::note -設定 [input_format_with_types_use_header](/operations/settings/settings-formats.md/#input_format_with_types_use_header) が 1 に設定されている場合、入力データのタイプは、テーブルの対応するカラムのタイプと比較されます。そうでない場合、2行目はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md.hash deleted file mode 100644 index a5ec0068b06..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -4d467ebbb48f6baa diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md deleted file mode 100644 index 7438116f16c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: 'JSONEachRowフォーマットのドキュメント' -keywords: -- 'JSONEachRow' -slug: '/interfaces/formats/JSONEachRow' -title: 'JSONEachRow' ---- - - - -## 説明 {#description} - -このフォーマットでは、ClickHouseは各行を別々の、改行区切りのJSONオブジェクトとして出力します。別名: `JSONLines`, `NDJSON`. - -## 使用例 {#example-usage} - -例: - -```json -{"num":42,"str":"hello","arr":[0,1]} -{"num":43,"str":"hello","arr":[0,1,2]} -{"num":44,"str":"hello","arr":[0,1,2,3]} -``` - -データをインポートする際に、設定が [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) を 1 に設定されている場合、名前が不明なカラムはスキップされます。 - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md.hash deleted file mode 100644 index 7d790010144..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -98bc6a02bb597212 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md deleted file mode 100644 index b633ba50554..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -alias: [] -description: 'JSONEachRowWithProgress フォーマットのドキュメント' -input_format: false -keywords: -- 'JSONEachRowWithProgress' -output_format: true -slug: '/interfaces/formats/JSONEachRowWithProgress' -title: 'JSONEachRowWithProgress' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`JSONEachRow`](./JSONEachRow.md)/[`JSONStringsEachRow`](./JSONStringsEachRow.md) と異なり、ClickHouse は JSON 値として進捗情報も提供します。 - -## 例の使い方 {#example-usage} - -```json -{"row":{"num":42,"str":"hello","arr":[0,1]}} -{"row":{"num":43,"str":"hello","arr":[0,1,2]}} -{"row":{"num":44,"str":"hello","arr":[0,1,2,3]}} -{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md.hash deleted file mode 100644 index 6d9d5648aea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md.hash +++ /dev/null @@ -1 +0,0 @@ -32cf9fd3f9ff7fa8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md deleted file mode 100644 index 9a2c7d1038d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'JSONLines フォーマットのドキュメント' -keywords: -- 'JSONLines' -slug: '/interfaces/formats/JSONLines' -title: 'JSONLines' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md.hash deleted file mode 100644 index 75af51a059b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md.hash +++ /dev/null @@ -1 +0,0 @@ -c60c10423a11d5a4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md deleted file mode 100644 index e94fda9a626..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -alias: [] -description: 'JSONObjectEachRowフォーマットのドキュメント' -input_format: true -keywords: -- 'JSONObjectEachRow' -output_format: true -slug: '/interfaces/formats/JSONObjectEachRow' -title: 'JSONObjectEachRow' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -このフォーマットでは、すべてのデータが単一の JSON オブジェクトとして表現され、各行はこのオブジェクトの別々のフィールドとして表されます。これは、[`JSONEachRow`](./JSONEachRow.md) フォーマットに似ています。 - -## 使用例 {#example-usage} - -### 基本例 {#basic-example} - -以下の JSON があるとします: - -```json -{ - "row_1": {"num": 42, "str": "hello", "arr": [0,1]}, - "row_2": {"num": 43, "str": "hello", "arr": [0,1,2]}, - "row_3": {"num": 44, "str": "hello", "arr": [0,1,2,3]} -} -``` - -オブジェクト名をカラム値として使用するには、特別な設定である [`format_json_object_each_row_column_for_object_name`](/operations/settings/settings-formats.md/#format_json_object_each_row_column_for_object_name) を使用できます。 -この設定の値は、結果のオブジェクト内の行に対して JSON キーとして使用されるカラムの名前に設定されます。 - -#### 出力 {#output} - -テーブル `test` が次の二つのカラムを持っているとしましょう: - -```text -┌─object_name─┬─number─┐ -│ first_obj │ 1 │ -│ second_obj │ 2 │ -│ third_obj │ 3 │ -└─────────────┴────────┘ -``` - -これを `JSONObjectEachRow` フォーマットで出力し、`format_json_object_each_row_column_for_object_name` 設定を使用しましょう: - -```sql title="Query" -SELECT * FROM test SETTINGS format_json_object_each_row_column_for_object_name='object_name' -``` - -```json title="Response" -{ - "first_obj": {"number": 1}, - "second_obj": {"number": 2}, - "third_obj": {"number": 3} -} -``` - -#### 入力 {#input} - -前の例からの出力を `data.json` という名前のファイルに保存したとしましょう: - -```sql title="Query" -SELECT * FROM file('data.json', JSONObjectEachRow, 'object_name String, number UInt64') SETTINGS format_json_object_each_row_column_for_object_name='object_name' -``` - -```response title="Response" -┌─object_name─┬─number─┐ -│ first_obj │ 1 │ -│ second_obj │ 2 │ -│ third_obj │ 3 │ -└─────────────┴────────┘ -``` - -スキーマ推論にも対応しています: - -```sql title="Query" -DESCRIBE file('data.json', JSONObjectEachRow) SETTING format_json_object_each_row_column_for_object_name='object_name' -``` - -```response title="Response" -┌─name────────┬─type────────────┐ -│ object_name │ String │ -│ number │ Nullable(Int64) │ -└─────────────┴─────────────────┘ -``` - - -### データの挿入 {#json-inserting-data} - -```sql title="Query" -INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} -``` - -ClickHouse では以下が可能です: - -- オブジェクト内のキーと値のペアの順序に制限がない。 -- 一部の値を省略すること。 - -ClickHouse は、要素間の空白やオブジェクト後のカンマを無視します。すべてのオブジェクトを1行で渡すことができます。行の改行で区切る必要はありません。 - -#### 省略値の処理 {#omitted-values-processing} - -ClickHouse は省略された値を対応する [データ型](/sql-reference/data-types/index.md) のデフォルト値で置き換えます。 - -もし `DEFAULT expr` が指定されている場合、ClickHouse は [input_format_defaults_for_omitted_fields](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) 設定に応じて異なる置き換えルールを使用します。 - -以下のテーブルを考えましょう: - -```sql title="Query" -CREATE TABLE IF NOT EXISTS example_table -( - x UInt32, - a DEFAULT x * 2 -) ENGINE = Memory; -``` - -- `input_format_defaults_for_omitted_fields = 0` の場合、`x` と `a` のデフォルト値は `0` です(`UInt32` データ型のデフォルト値として)。 -- `input_format_defaults_for_omitted_fields = 1` の場合、`x` のデフォルト値は `0` ですが、`a` のデフォルト値は `x * 2` になります。 - -:::note -`input_format_defaults_for_omitted_fields = 1` の設定でデータを挿入する場合、ClickHouse は `input_format_defaults_for_omitted_fields = 0` の設定での挿入と比べて、より多くの計算リソースを消費します。 -::: - -### データの選択 {#json-selecting-data} - -`UserActivity` テーブルを例にとりましょう: - -```response -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -クエリ `SELECT * FROM UserActivity FORMAT JSONEachRow` は以下を返します: - -```response -{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} -{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} -``` - -[JSON](/interfaces/formats/JSON) フォーマットとは異なり、無効な UTF-8 シーケンスの置き換えは行われません。値は `JSON` と同様にエスケープされます。 - -:::info -任意のバイトセットを文字列として出力できます。テーブル内のデータが情報を失うことなく JSON 形式でフォーマットできると確信している場合は、[`JSONEachRow`](./JSONEachRow.md) フォーマットを使用してください。 -::: - -### 入れ子構造の使用 {#jsoneachrow-nested} - -[`Nested`](/sql-reference/data-types/nested-data-structures/index.md) データ型のカラムを持つテーブルがある場合、同じ構造の JSON データを挿入することができます。この機能は、[input_format_import_nested_json](/operations/settings/settings-formats.md/#input_format_import_nested_json) 設定を有効にして使用します。 - -例えば、以下のテーブルを考えましょう: - -```sql -CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory -``` - -`Nested` データ型の説明で示されているように、ClickHouse は入れ子構造の各コンポーネントを別々のカラム(私たちのテーブルに対しては `n.s` と `n.i`)として扱います。以下の方法でデータを挿入できます: - -```sql -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} -``` - -階層的な JSON オブジェクトとしてデータを挿入するには、[`input_format_import_nested_json=1`](/operations/settings/settings-formats.md/#input_format_import_nested_json) を設定します。 - -```json -{ - "n": { - "s": ["abc", "def"], - "i": [1, 23] - } -} -``` - -この設定がない場合、ClickHouse は例外をスローします。 - -```sql title="Query" -SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' -``` - -```response title="Response" -┌─name────────────────────────────┬─value─┐ -│ input_format_import_nested_json │ 0 │ -└─────────────────────────────────┴───────┘ -``` - -```sql title="Query" -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} -``` - -```response title="Response" -Code: 117. DB::Exception: Unknown field found while parsing JSONEachRow format: n: (at row 1) -``` - -```sql title="Query" -SET input_format_import_nested_json=1 -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} -SELECT * FROM json_each_row_nested -``` - -```response title="Response" -┌─n.s───────────┬─n.i────┐ -│ ['abc','def'] │ [1,23] │ -└───────────────┴────────┘ -``` - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | メモ | -|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [`input_format_import_nested_json`](/operations/settings/settings-formats.md/#input_format_import_nested_json) | 入れ子の JSON データを入れ子のテーブルにマッピングします(JSONEachRow フォーマットで機能します)。 | `false` | | -| [`input_format_json_read_bools_as_numbers`](/operations/settings/settings-formats.md/#input_format_json_read_bools_as_numbers) | JSON 入力フォーマットでブール値を数値として解析できるようにします。 | `true` | | -| [`input_format_json_read_bools_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_bools_as_strings) | JSON 入力フォーマットでブール値を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_numbers_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_numbers_as_strings) | JSON 入力フォーマットで数値を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_arrays_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_arrays_as_strings) | JSON 入力フォーマットで JSON 配列を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_objects_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_objects_as_strings) | JSON 入力フォーマットで JSON オブジェクトを文字列として解析できるようにします。 | `true` | | -| [`input_format_json_named_tuples_as_objects`](/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) | 名前付きタプルカラムを JSON オブジェクトとして解析します。 | `true` | | -| [`input_format_json_try_infer_numbers_from_strings`](/operations/settings/settings-formats.md/#input_format_json_try_infer_numbers_from_strings) | スキーマ推論中に文字列フィールドから数値を推測しようとします。 | `false` | | -| [`input_format_json_try_infer_named_tuples_from_objects`](/operations/settings/settings-formats.md/#input_format_json_try_infer_named_tuples_from_objects) | スキーマ推論中に JSON オブジェクトから名前付きタプルを推測しようとします。 | `true` | | -| [`input_format_json_infer_incomplete_types_as_strings`](/operations/settings/settings-formats.md/#input_format_json_infer_incomplete_types_as_strings) | JSON 入力フォーマットでスキーマ推論中に Nill または空のオブジェクト/配列のみを含むキーに対して型 String を使用します。 | `true` | | -| [`input_format_json_defaults_for_missing_elements_in_named_tuple`](/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) | 名前付きタプルを解析中に JSON オブジェクトの欠落している要素にデフォルト値を挿入します。 | `true` | | -| [`input_format_json_ignore_unknown_keys_in_named_tuple`](/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) | 名前付きタプルの JSON オブジェクト内の未知のキーを無視します。 | `false` | | -| [`input_format_json_compact_allow_variable_number_of_columns`](/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) | JSONCompact/JSONCompactEachRow フォーマットで変数数のカラムを許可し、追加のカラムを無視し欠落したカラムにデフォルト値を使用します。 | `false` | | -| [`input_format_json_throw_on_bad_escape_sequence`](/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) | JSON 文字列に不正なエスケープシーケンスが含まれている場合、例外をスローします。無効にした場合、不正なエスケープシーケンスはデータ内でそのまま残ります。 | `true` | | -| [`input_format_json_empty_as_default`](/operations/settings/settings-formats.md/#input_format_json_empty_as_default) | JSON 入力の空のフィールドをデフォルト値として扱います。 | `false` | 複雑なデフォルト式の場合、[`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) も有効にする必要があります。 | -| [`output_format_json_quote_64bit_integers`](/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) | JSON 出力フォーマットで 64 ビット整数の引用を制御します。 | `true` | | -| [`output_format_json_quote_64bit_floats`](/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) | JSON 出力フォーマットで 64 ビット浮動小数点数の引用を制御します。 | `false` | | -| [`output_format_json_quote_denormals`](/operations/settings/settings-formats.md/#output_format_json_quote_denormals) | JSON 出力フォーマットで '+nan'、'-nan'、'+inf'、'-inf' の出力を有効にします。 | `false` | | -| [`output_format_json_quote_decimals`](/operations/settings/settings-formats.md/#output_format_json_quote_decimals) | JSON 出力フォーマットで小数の引用を制御します。 | `false` | | -| [`output_format_json_escape_forward_slashes`](/operations/settings/settings-formats.md/#output_format_json_escape_forward_slashes) | JSON 出力フォーマットで文字列出力のスラッシュをエスケープするかどうかを制御します。 | `true` | | -| [`output_format_json_named_tuples_as_objects`](/operations/settings/settings-formats.md/#output_format_json_named_tuples_as_objects) | 名前付きタプルカラムを JSON オブジェクトとしてシリアライズします。 | `true` | | -| [`output_format_json_array_of_rows`](/operations/settings/settings-formats.md/#output_format_json_array_of_rows) | JSONEachRow(Compact) フォーマットで全行の JSON 配列を出力します。 | `false` | | -| [`output_format_json_validate_utf8`](/operations/settings/settings-formats.md/#output_format_json_validate_utf8) | JSON 出力フォーマットで UTF-8 シーケンスの検証を有効にします(JSON/JSONCompact/JSONColumnsWithMetadata フォーマットには影響しないため、常に utf8 の検証が行われます)。 | `false` | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md.hash deleted file mode 100644 index a38841c4bf1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -b88790c5bcafea3c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md deleted file mode 100644 index f94f86b784e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -alias: [] -description: 'JSONStrings形式のドキュメント' -input_format: true -keywords: -- 'JSONStrings' -output_format: true -slug: '/interfaces/formats/JSONStrings' -title: 'JSONStrings' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -データフィールドが型付きのJSON値ではなく文字列として出力される以外は、[JSON](./JSON.md)フォーマットと異なります。 - -## 使用例 {#example-usage} - -例: - -```json -{ - "meta": - [ - { - "name": "num", - "type": "Int32" - }, - { - "name": "str", - "type": "String" - }, - { - "name": "arr", - "type": "Array(UInt8)" - } - ], - - "data": - [ - { - "num": "42", - "str": "hello", - "arr": "[0,1]" - }, - { - "num": "43", - "str": "hello", - "arr": "[0,1,2]" - }, - { - "num": "44", - "str": "hello", - "arr": "[0,1,2,3]" - } - ], - - "rows": 3, - - "rows_before_limit_at_least": 3, - - "statistics": - { - "elapsed": 0.001403233, - "rows_read": 3, - "bytes_read": 24 - } -} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md.hash deleted file mode 100644 index d496ad72899..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md.hash +++ /dev/null @@ -1 +0,0 @@ -5c847410f39da479 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md deleted file mode 100644 index b400e2115a1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -alias: [] -description: 'JSONStringsEachRow フォーマットのドキュメント' -input_format: false -keywords: -- 'JSONStringsEachRow' -output_format: true -slug: '/interfaces/formats/JSONStringsEachRow' -title: 'JSONStringsEachRow' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`JSONEachRow`](./JSONEachRow.md) との違いは、データフィールドが型付きの JSON 値ではなく、文字列で出力される点です。 - -## 例の使用法 {#example-usage} - -```json -{"num":"42","str":"hello","arr":"[0,1]"} -{"num":"43","str":"hello","arr":"[0,1,2]"} -{"num":"44","str":"hello","arr":"[0,1,2,3]"} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md.hash deleted file mode 100644 index 1902f6bc8a3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -e5f565036cc49dc4 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md deleted file mode 100644 index 95e651abe54..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: 'JSONStringsEachRowWithProgress フォーマットのドキュメント' -keywords: -- 'JSONStringsEachRowWithProgress' -slug: '/interfaces/formats/JSONStringsEachRowWithProgress' -title: 'JSONStringsEachRowWithProgress' ---- - - - -## 説明 {#description} - -`JSONEachRow`/`JSONStringsEachRow` とは異なり、ClickHouse は進行状況情報を JSON 値としても出力します。 - -## 使用例 {#example-usage} - -```json -{"row":{"num":42,"str":"hello","arr":[0,1]}} -{"row":{"num":43,"str":"hello","arr":[0,1,2]}} -{"row":{"num":44,"str":"hello","arr":[0,1,2,3]}} -{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md.hash deleted file mode 100644 index 604a5ceb05b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md.hash +++ /dev/null @@ -1 +0,0 @@ -94c7cfc6539cfdad diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md deleted file mode 100644 index 16107c840e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -alias: -- 'PrettyJSONLines' -- 'PrettyNDJSON' -description: 'PrettyJSONEachRow フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyJSONEachRow' -- 'PrettyJSONLines' -- 'PrettyNDJSON' -output_format: true -slug: '/interfaces/formats/PrettyJSONEachRow' -title: 'PrettyJSONEachRow' ---- - - - -| Input | Output | Alias | -|-------|--------|-----------------------------------| -| ✗ | ✔ | `PrettyJSONLines`, `PrettyNDJSON` | - -## 説明 {#description} - -[JSONEachRow](./JSONEachRow.md) とは異なり、JSONが新しい行区切りと4つのスペースのインデントできれいにフォーマットされています。 - -## 使用例 {#example-usage} - -```json -{ - "num": "42", - "str": "hello", - "arr": [ - "0", - "1" - ], - "tuple": { - "num": 42, - "str": "world" - } -} -{ - "num": "43", - "str": "hello", - "arr": [ - "0", - "1", - "2" - ], - "tuple": { - "num": 43, - "str": "world" - } -} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md.hash deleted file mode 100644 index 766f94091f5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md.hash +++ /dev/null @@ -1 +0,0 @@ -500c14d3bd726710 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md deleted file mode 100644 index 3312e959a08..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: 'JSONフォーマットの設定のリスト' -keywords: -- 'Format Settings' -- 'JSON' -slug: '/interfaces/formats/JSON/format-settings' -title: 'JSON用フォーマット設定' ---- - - - -このページでは、すべてのJSON形式に共通するフォーマット設定を見つけることができます。 - - - -| 設定 | 説明 | デフォルト | 注 | -|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [`input_format_import_nested_json`](/operations/settings/settings-formats.md/#input_format_import_nested_json) | ネストされたJSONデータをネストされたテーブルにマッピングします(JSONEachRow形式で機能します)。 | `false` | | -| [`input_format_json_read_bools_as_numbers`](/operations/settings/settings-formats.md/#input_format_json_read_bools_as_numbers) | JSON入力形式でブール値を数値として解析できるようにします。 | `true` | | -| [`input_format_json_read_bools_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_bools_as_strings) | JSON入力形式でブール値を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_numbers_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_numbers_as_strings) | JSON入力形式で数値を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_arrays_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_arrays_as_strings) | JSON入力形式でJSON配列を文字列として解析できるようにします。 | `true` | | -| [`input_format_json_read_objects_as_strings`](/operations/settings/settings-formats.md/#input_format_json_read_objects_as_strings) | JSON入力形式でJSONオブジェクトを文字列として解析できるようにします。 | `true` | | -| [`input_format_json_named_tuples_as_objects`](/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) | 名前付きタプルカラムをJSONオブジェクトとして解析します。 | `true` | | -| [`input_format_json_try_infer_numbers_from_strings`](/operations/settings/settings-formats.md/#input_format_json_try_infer_numbers_from_strings) | スキーマ推論中に文字列フィールドから数値を推測しようとします。 | `false` | | -| [`input_format_json_try_infer_named_tuples_from_objects`](/operations/settings/settings-formats.md/#input_format_json_try_infer_named_tuples_from_objects) | スキーマ推論中にJSONオブジェクトから名前付きタプルを推測しようとします。 | `true` | | -| [`input_format_json_infer_incomplete_types_as_strings`](/operations/settings/settings-formats.md/#input_format_json_infer_incomplete_types_as_strings) | JSON入力形式で、Nullまたは空のオブジェクト/配列のみを含むキーには、文字列型を使用します。 | `true` | | -| [`input_format_json_defaults_for_missing_elements_in_named_tuple`](/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) | 名前付きタプルの解析中に、JSONオブジェクト内の欠落した要素にデフォルト値を挿入します。 | `true` | | -| [`input_format_json_ignore_unknown_keys_in_named_tuple`](/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) | 名前付きタプルのためにJSONオブジェクト内の未知のキーを無視します。 | `false` | | -| [`input_format_json_compact_allow_variable_number_of_columns`](/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) | JSONCompact/JSONCompactEachRow形式で可変数のカラムを許可し、余分なカラムを無視し、欠落したカラムにはデフォルト値を使用します。 | `false` | | -| [`input_format_json_throw_on_bad_escape_sequence`](/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) | JSON文字列に不正なエスケープシーケンスが含まれている場合、例外をスローします。無効にすると、不正なエスケープシーケンスはデータ内にそのまま残ります。 | `true` | | -| [`input_format_json_empty_as_default`](/operations/settings/settings-formats.md/#input_format_json_empty_as_default) | JSON入力内の空のフィールドをデフォルト値と見なします。 | `false` | 複雑なデフォルト式については、[input_format_defaults_for_omitted_fields](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields)も有効にする必要があります。 | -| [`output_format_json_quote_64bit_integers`](/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) | JSON出力形式での64ビット整数の引用を制御します。 | `true` | | -| [`output_format_json_quote_64bit_floats`](/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) | JSON出力形式での64ビット浮動小数点数の引用を制御します。 | `false` | | -| [`output_format_json_quote_denormals`](/operations/settings/settings-formats.md/#output_format_json_quote_denormals) | JSON出力形式での'+nan', '-nan', '+inf', '-inf'出力を有効にします。 | `false` | | -| [`output_format_json_quote_decimals`](/operations/settings/settings-formats.md/#output_format_json_quote_decimals) | JSON出力形式での小数点の引用を制御します。 | `false` | | -| [`output_format_json_escape_forward_slashes`](/operations/settings/settings-formats.md/#output_format_json_escape_forward_slashes) | JSON出力形式での文字列出力に対するスラッシュのエスケープを制御します。 | `true` | | -| [`output_format_json_named_tuples_as_objects`](/operations/settings/settings-formats.md/#output_format_json_named_tuples_as_objects) | 名前付きタプルカラムをJSONオブジェクトとしてシリアライズします。 | `true` | | -| [`output_format_json_array_of_rows`](/operations/settings/settings-formats.md/#output_format_json_array_of_rows) | JSONEachRow(Compact)形式で、すべての行を含むJSON配列を出力します。 | `false` | | -| [`output_format_json_validate_utf8`](/operations/settings/settings-formats.md/#output_format_json_validate_utf8) | JSON出力形式におけるUTF-8シーケンスの検証を有効にします。 | `false` | JSON/JSONCompact/JSONColumnsWithMetadata形式には影響しないことに注意してください。これらは常にUTF-8を検証します。 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md.hash deleted file mode 100644 index e8b7d489af6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/format-settings.md.hash +++ /dev/null @@ -1 +0,0 @@ -cf015bc00b9988ea diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md deleted file mode 100644 index 9c8ae6c3b75..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -alias: [] -description: 'LineAsString フォーマットのドキュメント' -input_format: true -keywords: -- 'LineAsString' -output_format: true -slug: '/interfaces/formats/LineAsString' -title: 'LineAsString' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`LineAsString` フォーマットは、入力データの各行を単一の文字列値として解釈します。 -このフォーマットは、[String](/sql-reference/data-types/string.md) 型の単一フィールドを持つテーブルに対してのみ解析可能です。 -残りのカラムは [`DEFAULT`](/sql-reference/statements/create/table.md/#default)、[`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view) に設定するか、省略する必要があります。 - -## 使用例 {#example-usage} - -```sql title="Query" -DROP TABLE IF EXISTS line_as_string; -CREATE TABLE line_as_string (field String) ENGINE = Memory; -INSERT INTO line_as_string FORMAT LineAsString "I love apple", "I love banana", "I love orange"; -SELECT * FROM line_as_string; -``` - -```text title="Response" -┌─field─────────────────────────────────────────────┐ -│ "I love apple", "I love banana", "I love orange"; │ -└───────────────────────────────────────────────────┘ -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md.hash deleted file mode 100644 index 69d061d73d6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md.hash +++ /dev/null @@ -1 +0,0 @@ -67ff64b0772354ae diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md deleted file mode 100644 index 3c3f639cdf9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -alias: [] -description: 'LineAsStringWithNames フォーマットのドキュメント' -input_format: true -keywords: -- 'LineAsStringWithNames' -output_format: true -slug: '/interfaces/formats/LineAsStringWithNames' -title: 'LineAsStringWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`LineAsStringWithNames`形式は、[`LineAsString`](./LineAsString.md)形式に似ていますが、カラム名を含むヘッダ行を出力します。 - -## 使用例 {#example-usage} - -```sql title="クエリ" -CREATE TABLE example ( - name String, - value Int32 -) -ENGINE = Memory; - -INSERT INTO example VALUES ('John', 30), ('Jane', 25), ('Peter', 35); - -SELECT * FROM example FORMAT LineAsStringWithNames; -``` - -```response title="レスポンス" -name value -John 30 -Jane 25 -Peter 35 -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md.hash deleted file mode 100644 index ba8d6f0e655..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -54ed29f79368ee94 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md deleted file mode 100644 index 9dcb5c77625..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -alias: [] -description: 'LineAsStringWithNamesAndTypes フォーマットのドキュメント' -input_format: false -keywords: -- 'LineAsStringWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/LineAsStringWithNamesAndTypes' -title: 'LineAsStringWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`LineAsStringWithNames` フォーマットは、[`LineAsString`](./LineAsString.md) フォーマットに似ていますが、2つのヘッダ行を印刷します:1つはカラム名、もう1つはタイプです。 - -## 例の使用法 {#example-usage} - -```sql -CREATE TABLE example ( - name String, - value Int32 -) -ENGINE = Memory; - -INSERT INTO example VALUES ('John', 30), ('Jane', 25), ('Peter', 35); - -SELECT * FROM example FORMAT LineAsStringWithNamesAndTypes; -``` - -```response title="応答" -name value -String Int32 -John 30 -Jane 25 -Peter 35 -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md.hash deleted file mode 100644 index 6d565a976a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -63747cd4d06633b5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md deleted file mode 100644 index 50e563301ee..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -description: 'Markdown formatの文書' -keywords: -- 'Markdown' -slug: '/interfaces/formats/Markdown' -title: 'Markdown' ---- - - - -## 説明 {#description} - -[Markdown](https://en.wikipedia.org/wiki/Markdown) 形式を使用して結果をエクスポートすることで、`.md` ファイルに貼り付ける準備が整った出力を生成できます。 - -マークダウンテーブルは自動的に生成され、Github のようなマークダウン対応プラットフォームで使用できます。この形式は出力専用です。 - -## 使用例 {#example-usage} - -```sql -SELECT - number, - number * 2 -FROM numbers(5) -FORMAT Markdown -``` -```results -| number | multiply(number, 2) | -|-:|-:| -| 0 | 0 | -| 1 | 2 | -| 2 | 4 | -| 3 | 6 | -| 4 | 8 | -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md.hash deleted file mode 100644 index d91166f901a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md.hash +++ /dev/null @@ -1 +0,0 @@ -25b675b39b49d7f5 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md deleted file mode 100644 index 00371300b14..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -alias: [] -description: 'MsgPack形式のドキュメント' -input_format: true -keywords: -- 'MsgPack' -output_format: true -slug: '/interfaces/formats/MsgPack' -title: 'MsgPack' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -ClickHouseは、[MessagePack](https://msgpack.org/)データファイルの読み取りと書き込みをサポートしています。 - -## データ型の対応 {#data-types-matching} - -| MessagePackデータ型(`INSERT`) | ClickHouseデータ型 | MessagePackデータ型(`SELECT`) | -|-----------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-----------------------------------| -| `uint N`、`positive fixint` | [`UIntN`](/sql-reference/data-types/int-uint.md) | `uint N` | -| `int N`、`negative fixint` | [`IntN`](/sql-reference/data-types/int-uint.md) | `int N` | -| `bool` | [`UInt8`](/sql-reference/data-types/int-uint.md) | `uint 8` | -| `fixstr`、`str 8`、`str 16`、`str 32`、`bin 8`、`bin 16`、`bin 32` | [`String`](/sql-reference/data-types/string.md) | `bin 8`、`bin 16`、`bin 32` | -| `fixstr`、`str 8`、`str 16`、`str 32`、`bin 8`、`bin 16`、`bin 32` | [`FixedString`](/sql-reference/data-types/fixedstring.md) | `bin 8`、`bin 16`、`bin 32` | -| `float 32` | [`Float32`](/sql-reference/data-types/float.md) | `float 32` | -| `float 64` | [`Float64`](/sql-reference/data-types/float.md) | `float 64` | -| `uint 16` | [`Date`](/sql-reference/data-types/date.md) | `uint 16` | -| `int 32` | [`Date32`](/sql-reference/data-types/date32.md) | `int 32` | -| `uint 32` | [`DateTime`](/sql-reference/data-types/datetime.md) | `uint 32` | -| `uint 64` | [`DateTime64`](/sql-reference/data-types/datetime.md) | `uint 64` | -| `fixarray`、`array 16`、`array 32` | [`Array`](/sql-reference/data-types/array.md)/[`Tuple`](/sql-reference/data-types/tuple.md) | `fixarray`、`array 16`、`array 32` | -| `fixmap`、`map 16`、`map 32` | [`Map`](/sql-reference/data-types/map.md) | `fixmap`、`map 16`、`map 32` | -| `uint 32` | [`IPv4`](/sql-reference/data-types/ipv4.md) | `uint 32` | -| `bin 8` | [`String`](/sql-reference/data-types/string.md) | `bin 8` | -| `int 8` | [`Enum8`](/sql-reference/data-types/enum.md) | `int 8` | -| `bin 8` | [`(U)Int128`/`(U)Int256`](/sql-reference/data-types/int-uint.md) | `bin 8` | -| `int 32` | [`Decimal32`](/sql-reference/data-types/decimal.md) | `int 32` | -| `int 64` | [`Decimal64`](/sql-reference/data-types/decimal.md) | `int 64` | -| `bin 8` | [`Decimal128`/`Decimal256`](/sql-reference/data-types/decimal.md) | `bin 8 ` | - -## 例示使用法 {#example-usage} - -ファイル ".msgpk" への書き込み: - -```sql -$ clickhouse-client --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory;" -$ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])"; -$ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; -``` - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|-------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|-------------| -| [`input_format_msgpack_number_of_columns`](/operations/settings/settings-formats.md/#input_format_msgpack_number_of_columns) | 挿入されたMsgPackデータのカラム数。データから自動的にスキーマを推測するために使用されます。 | `0` | -| [`output_format_msgpack_uuid_representation`](/operations/settings/settings-formats.md/#output_format_msgpack_uuid_representation) | MsgPack形式でUUIDを出力する方法。 | `EXT` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md.hash deleted file mode 100644 index 5916f3b8477..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md.hash +++ /dev/null @@ -1 +0,0 @@ -0bd67919de3290bf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md deleted file mode 100644 index 4832878f275..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -alias: [] -description: 'MySQLDump形式のドキュメント' -input_format: true -keywords: -- 'MySQLDump' -output_format: false -slug: '/interfaces/formats/MySQLDump' -title: 'MySQLDump' ---- - - - -| Input | Output | Alias | -|-------|---------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -ClickHouse は MySQL の [ダンプ](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html) を読み込むことをサポートしています。 - -ダンプ内の単一テーブルに属する `INSERT` クエリからすべてのデータを読み込みます。 -複数のテーブルが存在する場合、デフォルトでは最初のテーブルからデータを読み込みます。 - -:::note -このフォーマットはスキーマ推論をサポートします:ダンプに指定されたテーブルに対する `CREATE` クエリが含まれている場合、その構造がそこから推論されます。それ以外の場合、`INSERT` クエリのデータからスキーマが推論されます。 -::: - -## 使用例 {#example-usage} - -以下の SQL ダンプファイルが与えられた場合: - -```sql title="dump.sql" -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!50503 SET character_set_client = utf8mb4 */; -CREATE TABLE `test` ( - `x` int DEFAULT NULL, - `y` int DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -/*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `test` VALUES (1,NULL),(2,NULL),(3,NULL),(3,NULL),(4,NULL),(5,NULL),(6,7); -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!50503 SET character_set_client = utf8mb4 */; -CREATE TABLE `test 3` ( - `y` int DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -/*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `test 3` VALUES (1); -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!50503 SET character_set_client = utf8mb4 */; -CREATE TABLE `test2` ( - `x` int DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -/*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `test2` VALUES (1),(2),(3); -``` - -次のクエリを実行できます: - -```sql title="Query" -DESCRIBE TABLE file(dump.sql, MySQLDump) -SETTINGS input_format_mysql_dump_table_name = 'test2' -``` - -```response title="Response" -┌─name─┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ x │ Nullable(Int32) │ │ │ │ │ │ -└──────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ -``` - -```sql title="Query" -SELECT * -FROM file(dump.sql, MySQLDump) -SETTINGS input_format_mysql_dump_table_name = 'test2' -``` - -```response title="Response" -┌─x─┐ -│ 1 │ -│ 2 │ -│ 3 │ -└───┘ -``` - -## フォーマット設定 {#format-settings} - -データを読み込むテーブルの名前を [`input_format_mysql_dump_table_name`](/operations/settings/settings-formats.md/#input_format_mysql_dump_table_name) 設定を使用して指定できます。 -設定 `input_format_mysql_dump_map_columns` が `1` に設定されていて、ダンプに指定されたテーブルに対する `CREATE` クエリまたは `INSERT` クエリ内のカラム名が含まれている場合、入力データのカラムがテーブルのカラムに名前でマッピングされます。 -未知の名前のカラムは、設定 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合はスキップされます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md.hash deleted file mode 100644 index 4f886f102a7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md.hash +++ /dev/null @@ -1 +0,0 @@ -c00070bf0366a02c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md deleted file mode 100644 index 9a54d1c6488..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'MySQLWireフォーマットのドキュメント' -keywords: -- 'MySQLWire' -slug: '/interfaces/formats/MySQLWire' -title: 'MySQLWire' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md.hash deleted file mode 100644 index 39fa1f17150..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLWire.md.hash +++ /dev/null @@ -1 +0,0 @@ -cfb14249e79a842a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md deleted file mode 100644 index ead5303be94..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -alias: [] -description: 'Nativeフォーマットのドキュメント' -input_format: true -keywords: -- 'Native' -output_format: true -slug: '/interfaces/formats/Native' -title: 'Native' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`Native` フォーマットは、ClickHouse の最も効率的なフォーマットです。なぜなら、これは本当に「列指向」であり、カラムを行に変換しないからです。 - -このフォーマットでは、データは [ブロック](/development/architecture#block) にバイナリフォーマットで書き込まれ、読み取られます。各ブロックについて、行数、カラム数、カラム名およびタイプ、ブロック内のカラムの部分が次々と記録されます。 - -これはサーバー間のインターフェイス、コマンドラインクライアントの使用、および C++ クライアントとのインタラクションに使用されるフォーマットです。 - -:::tip -このフォーマットを使用すると、ClickHouse DBMS だけが読み取ることができるダンプを迅速に生成できます。 -自分でこのフォーマットで作業するのは実用的ではないかもしれません。 -::: - -## 例の使用法 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md.hash deleted file mode 100644 index f09b14a8471..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Native.md.hash +++ /dev/null @@ -1 +0,0 @@ -505a099951498b12 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md deleted file mode 100644 index 6b310a84bcc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -alias: [] -description: 'Npyフォーマットのドキュメント' -input_format: true -keywords: -- 'Npy' -output_format: true -slug: '/interfaces/formats/Npy' -title: 'Npy' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`Npy` 形式は、`.npy` ファイルから NumPy 配列を ClickHouse に読み込むために設計されています。 -NumPy ファイル形式は、数値データの配列を効率的に保存するためのバイナリ形式です。 -インポート時、ClickHouse は最上位の次元を単一カラムの行の配列として扱います。 - -下の表には、サポートされている Npy データ型と、ClickHouse 内での対応する型が示されています。 - -## データ型の対応 {#data_types-matching} - - -| Npy データ型 (`INSERT`) | ClickHouse データ型 | Npy データ型 (`SELECT`) | -|--------------------------|-----------------------------------------------------------------|-------------------------| -| `i1` | [Int8](/sql-reference/data-types/int-uint.md) | `i1` | -| `i2` | [Int16](/sql-reference/data-types/int-uint.md) | `i2` | -| `i4` | [Int32](/sql-reference/data-types/int-uint.md) | `i4` | -| `i8` | [Int64](/sql-reference/data-types/int-uint.md) | `i8` | -| `u1`, `b1` | [UInt8](/sql-reference/data-types/int-uint.md) | `u1` | -| `u2` | [UInt16](/sql-reference/data-types/int-uint.md) | `u2` | -| `u4` | [UInt32](/sql-reference/data-types/int-uint.md) | `u4` | -| `u8` | [UInt64](/sql-reference/data-types/int-uint.md) | `u8` | -| `f2`, `f4` | [Float32](/sql-reference/data-types/float.md) | `f4` | -| `f8` | [Float64](/sql-reference/data-types/float.md) | `f8` | -| `S`, `U` | [String](/sql-reference/data-types/string.md) | `S` | -| | [FixedString](/sql-reference/data-types/fixedstring.md) | `S` | - -## 使用例 {#example-usage} - -### Python を使用して .npy 形式で配列を保存する {#saving-an-array-in-npy-format-using-python} - -```Python -import numpy as np -arr = np.array([[[1],[2],[3]],[[4],[5],[6]]]) -np.save('example_array.npy', arr) -``` - -### ClickHouse で NumPy ファイルを読み込む {#reading-a-numpy-file-in-clickhouse} - -```sql title="クエリ" -SELECT * -FROM file('example_array.npy', Npy) -``` - -```response title="レスポンス" -┌─array─────────┐ -│ [[1],[2],[3]] │ -│ [[4],[5],[6]] │ -└───────────────┘ -``` - -### データの選択 {#selecting-data} - -ClickHouse テーブルからデータを選択し、以下のコマンドを使用して Npy 形式のファイルに保存できます。clickhouse-client を使用します: - -```bash -$ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy} -``` - -## 形式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md.hash deleted file mode 100644 index 4a369e1041d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md.hash +++ /dev/null @@ -1 +0,0 @@ -ac3af3340ab19abe diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md deleted file mode 100644 index 31a4055c569..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -alias: [] -description: 'Nullフォーマットのドキュメント' -input_format: false -keywords: -- 'Null' -- 'format' -output_format: true -slug: '/interfaces/formats/Null' -title: 'Null' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`Null`フォーマットでは、何も出力されません。 -最初は奇妙に思えるかもしれませんが、何も出力しないにもかかわらず、クエリは依然として処理されることが重要です。 -コマンドラインクライアントを使用する際には、データがクライアントに送信されます。 - -:::tip -`Null`フォーマットは、性能テストに役立つ場合があります。 -::: - -## 使用例 {#example-usage} - -clickhouseクライアントで`play.clickhouse.com`に接続します: - -```bash -clickhouse client --secure --host play.clickhouse.com --user explorer -``` - -次のクエリを実行します: - -```sql title="クエリ" -SELECT town -FROM uk_price_paid -LIMIT 1000 -FORMAT `Null` -``` - -```response title="レスポンス" -0 rows in set. Elapsed: 0.002 sec. Processed 1.00 thousand rows, 2.00 KB (506.97 thousand rows/s., 1.01 MB/s.) -Peak memory usage: 297.74 KiB. -``` - -1000行が処理されたが、結果セットには0行が出力されたことに注意してください。 - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md.hash deleted file mode 100644 index b94a94b5f2c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md.hash +++ /dev/null @@ -1 +0,0 @@ -61c9ee616d9f9933 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md deleted file mode 100644 index e95542f1b28..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'ODBCDriver2フォーマットのドキュメント' -keywords: -- 'ODBCDriver2' -slug: '/interfaces/formats/ODBCDriver2' -title: 'ODBCDriver2' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md.hash deleted file mode 100644 index c59ea3091a6..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ODBCDriver2.md.hash +++ /dev/null @@ -1 +0,0 @@ -669605d96cef8cbf diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md deleted file mode 100644 index 1ac93ff53db..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -alias: [] -description: 'ORC フォーマットのドキュメント' -input_format: true -keywords: -- 'ORC' -output_format: true -slug: '/interfaces/formats/ORC' -title: 'ORC' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[Apache ORC](https://orc.apache.org/) は、[Hadoop](https://hadoop.apache.org/) エコシステムで広く使用されている列指向ストレージ形式です。 - -## データ型の一致 {#data-types-matching-orc} - -下の表は、サポートされている ORC データ型と、それに対応する ClickHouse の [データ型](/sql-reference/data-types/index.md) を `INSERT` および `SELECT` クエリで比較したものです。 - -| ORC データ型 (`INSERT`) | ClickHouse データ型 | ORC データ型 (`SELECT`) | -|---------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------------------| -| `Boolean` | [UInt8](/sql-reference/data-types/int-uint.md) | `Boolean` | -| `Tinyint` | [Int8/UInt8](/sql-reference/data-types/int-uint.md)/[Enum8](/sql-reference/data-types/enum.md) | `Tinyint` | -| `Smallint` | [Int16/UInt16](/sql-reference/data-types/int-uint.md)/[Enum16](/sql-reference/data-types/enum.md) | `Smallint` | -| `Int` | [Int32/UInt32](/sql-reference/data-types/int-uint.md) | `Int` | -| `Bigint` | [Int64/UInt32](/sql-reference/data-types/int-uint.md) | `Bigint` | -| `Float` | [Float32](/sql-reference/data-types/float.md) | `Float` | -| `Double` | [Float64](/sql-reference/data-types/float.md) | `Double` | -| `Decimal` | [Decimal](/sql-reference/data-types/decimal.md) | `Decimal` | -| `Date` | [Date32](/sql-reference/data-types/date32.md) | `Date` | -| `Timestamp` | [DateTime64](/sql-reference/data-types/datetime64.md) | `Timestamp` | -| `String`, `Char`, `Varchar`, `Binary` | [String](/sql-reference/data-types/string.md) | `Binary` | -| `List` | [Array](/sql-reference/data-types/array.md) | `List` | -| `Struct` | [Tuple](/sql-reference/data-types/tuple.md) | `Struct` | -| `Map` | [Map](/sql-reference/data-types/map.md) | `Map` | -| `Int` | [IPv4](/sql-reference/data-types/int-uint.md) | `Int` | -| `Binary` | [IPv6](/sql-reference/data-types/ipv6.md) | `Binary` | -| `Binary` | [Int128/UInt128/Int256/UInt256](/sql-reference/data-types/int-uint.md) | `Binary` | -| `Binary` | [Decimal256](/sql-reference/data-types/decimal.md) | `Binary` | - -- 他の型はサポートされていません。 -- 配列はネスト可能で、引数として `Nullable` 型の値を持つことができます。`Tuple` と `Map` 型もネスト可能です。 -- ClickHouse テーブルカラムのデータ型は、対応する ORC データフィールドに一致する必要はありません。データを挿入する際、ClickHouse は上の表に従ってデータ型を解釈し、その後 [キャスト](/sql-reference/functions/type-conversion-functions#cast) して ClickHouse テーブルカラムに設定されたデータ型に変換します。 - -## 使用例 {#example-usage} - -### データの挿入 {#inserting-data-orc} - -以下のコマンドを使用して、ファイルから ClickHouse テーブルに ORC データを挿入できます: - -```bash -$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" -``` - -### データの選択 {#selecting-data-orc} - -以下のコマンドを使用して、ClickHouse テーブルからデータを選択し、ORC フォーマットのファイルに保存できます: - -```bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.orc} -``` - -## 形式設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------|-----------| -| [`output_format_arrow_string_as_string`](/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) | 文字列カラムのためにバイナリではなく Arrow String 型を使用します。 | `false` | -| [`output_format_orc_compression_method`](/operations/settings/settings-formats.md/#output_format_orc_compression_method) | 出力 ORC 形式で使用される圧縮方法。デフォルト値 | `none` | -| [`input_format_arrow_case_insensitive_column_matching`](/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) | Arrow カラムと ClickHouse カラムの一致を確認する際に大文字と小文字を無視します。 | `false` | -| [`input_format_arrow_allow_missing_columns`](/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) | Arrow データを読み取る際に欠落したカラムを許可します。 | `false` | -| [`input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference`](/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) | Arrow 形式のスキーマ推論中にサポートされていない型のカラムをスキップすることを許可します。| `false` | - -Hadoop とデータを交換するには、[HDFS テーブルエンジン](/engines/table-engines/integrations/hdfs.md)を使用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md.hash deleted file mode 100644 index ca6774d93f7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md.hash +++ /dev/null @@ -1 +0,0 @@ -5918150dbac27713 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md deleted file mode 100644 index c4aac59bb3f..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -alias: [] -description: 'One formatのドキュメント' -input_format: true -keywords: -- 'One' -output_format: false -slug: '/interfaces/formats/One' -title: 'One' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -`One` フォーマットは特別な入力フォーマットで、ファイルからデータを読み込まず、カラムの型が [`UInt8`](../../sql-reference/data-types/int-uint.md) で名前が `dummy`、値が `0` という1行のみを返します(`system.one` テーブルのように)。実際のデータを読み込まずに、すべてのファイルをリストするために仮想カラム `_file/_path` と共に使用できます。 - -## 使用例 {#example-usage} - -例: - -```sql title="クエリ" -SELECT _file FROM file('path/to/files/data*', One); -``` - -```text title="レスポンス" -┌─_file────┐ -│ data.csv │ -└──────────┘ -┌─_file──────┐ -│ data.jsonl │ -└────────────┘ -┌─_file────┐ -│ data.tsv │ -└──────────┘ -┌─_file────────┐ -│ data.parquet │ -└──────────────┘ -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md.hash deleted file mode 100644 index 5c7b322915a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md.hash +++ /dev/null @@ -1 +0,0 @@ -3196834f09547acd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md deleted file mode 100644 index db0a9a693d5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -alias: [] -description: 'Parquetフォーマットのドキュメント' -input_format: true -keywords: -- 'Parquet' -output_format: true -slug: '/interfaces/formats/Parquet' -title: 'Parquet' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[Apache Parquet](https://parquet.apache.org/) は、Hadoop エコシステムで広く使用されている列指向ストレージ形式です。ClickHouse は、この形式の読み取りおよび書き込み操作をサポートしています。 - -## データ型の一致 {#data-types-matching-parquet} - -以下の表は、サポートされているデータ型と、それらが ClickHouse の [データ型](/sql-reference/data-types/index.md) にどのように一致するかを示しています。 - -| Parquet データ型 (`INSERT`) | ClickHouse データ型 | Parquet データ型 (`SELECT`) | -|-----------------------------------------------|--------------------------------------------------------------------------------------------------------|-------------------------------| -| `BOOL` | [Bool](/sql-reference/data-types/boolean.md) | `BOOL` | -| `UINT8`, `BOOL` | [UInt8](/sql-reference/data-types/int-uint.md) | `UINT8` | -| `INT8` | [Int8](/sql-reference/data-types/int-uint.md)/[Enum8](/sql-reference/data-types/enum.md) | `INT8` | -| `UINT16` | [UInt16](/sql-reference/data-types/int-uint.md) | `UINT16` | -| `INT16` | [Int16](/sql-reference/data-types/int-uint.md)/[Enum16](/sql-reference/data-types/enum.md) | `INT16` | -| `UINT32` | [UInt32](/sql-reference/data-types/int-uint.md) | `UINT32` | -| `INT32` | [Int32](/sql-reference/data-types/int-uint.md) | `INT32` | -| `UINT64` | [UInt64](/sql-reference/data-types/int-uint.md) | `UINT64` | -| `INT64` | [Int64](/sql-reference/data-types/int-uint.md) | `INT64` | -| `FLOAT` | [Float32](/sql-reference/data-types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](/sql-reference/data-types/float.md) | `DOUBLE` | -| `DATE` | [Date32](/sql-reference/data-types/date.md) | `DATE` | -| `TIME (ms)` | [DateTime](/sql-reference/data-types/datetime.md) | `UINT32` | -| `TIMESTAMP`, `TIME (us, ns)` | [DateTime64](/sql-reference/data-types/datetime64.md) | `TIMESTAMP` | -| `STRING`, `BINARY` | [String](/sql-reference/data-types/string.md) | `BINARY` | -| `STRING`, `BINARY`, `FIXED_LENGTH_BYTE_ARRAY` | [FixedString](/sql-reference/data-types/fixedstring.md) | `FIXED_LENGTH_BYTE_ARRAY` | -| `DECIMAL` | [Decimal](/sql-reference/data-types/decimal.md) | `DECIMAL` | -| `LIST` | [Array](/sql-reference/data-types/array.md) | `LIST` | -| `STRUCT` | [Tuple](/sql-reference/data-types/tuple.md) | `STRUCT` | -| `MAP` | [Map](/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/sql-reference/data-types/ipv4.md) | `UINT32` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/sql-reference/data-types/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` | - -配列は入れ子にでき、引数として `Nullable` 型の値を持つことができます。`Tuple` および `Map` 型も入れ子にできます。 - -サポートされていない Parquet データ型は次のとおりです: -- `FIXED_SIZE_BINARY` -- `JSON` -- `UUID` -- `ENUM`。 - -ClickHouse テーブルカラムのデータ型は、挿入される Parquet データの対応するフィールドとは異なる場合があります。データを挿入する際、ClickHouse は上の表に従ってデータ型を解釈し、その後 [キャスト](/sql-reference/functions/type-conversion-functions#cast) を行って、ClickHouse テーブルカラムに設定されたデータ型にデータを変換します。 - -## 使用例 {#example-usage} - -### データの挿入と選択 {#inserting-and-selecting-data-parquet} - -次のコマンドを使用して、ファイルから ClickHouse テーブルに Parquet データを挿入できます: - -```bash -$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" -``` - -次のコマンドを使用して、ClickHouse テーブルからデータを選択し、Parquet 形式のファイルに保存できます: - -```bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} -``` - -Hadoop とデータを交換するには、[`HDFS テーブルエンジン`](/engines/table-engines/integrations/hdfs.md) を使用できます。 - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|----------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------| -| `input_format_parquet_case_insensitive_column_matching` | Parquet カラムと CH カラムを照合する際に、大文字小文字を無視します。 | `0` | -| `input_format_parquet_preserve_order` | Parquet ファイルから読み取る際に、行の順序を変更しないようにします。通常、これにより遅くなります。 | `0` | -| `input_format_parquet_filter_push_down` | Parquet ファイルを読み取る際に、WHERE/PREWHERE 式および Parquet メタデータの最小/最大統計情報に基づいて全行グループをスキップします。 | `1` | -| `input_format_parquet_bloom_filter_push_down` | Parquet ファイルを読み取る際に、WHERE 式および Parquet メタデータのブルームフィルタに基づいて全行グループをスキップします。 | `0` | -| `input_format_parquet_use_native_reader` | Parquet ファイルを読み取る際に、Arrow リーダーの代わりにネイティブリーダーを使用します。 | `0` | -| `input_format_parquet_allow_missing_columns` | Parquet 入力形式読み取り時に、欠落しているカラムを許可します。 | `1` | -| `input_format_parquet_local_file_min_bytes_for_seek` | Local read (file) においてファイルをシークするために必要な最小バイト数です。これにより、Parquet 入力形式で読み取りと無視を行うことができます。 | `8192` | -| `input_format_parquet_enable_row_group_prefetch` | Parquet パース時に行グループのプリフェッチを有効にします。現在、単一スレッドのパースのみがプリフェッチを行うことができます。 | `1` | -| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | スキーマ推論時に、サポートされていない型のカラムをスキップします。 | `0` | -| `input_format_parquet_max_block_size` | Parquet リーダーの最大ブロックサイズです。 | `65409` | -| `input_format_parquet_prefer_block_bytes` | Parquet リーダーによって出力される平均ブロックバイト数です。 | `16744704` | -| `output_format_parquet_row_group_size` | 行のターゲット Row Group サイズです。 | `1000000` | -| `output_format_parquet_row_group_size_bytes` | 圧縮前のバイトでのターゲット Row Group サイズです。 | `536870912` | -| `output_format_parquet_string_as_string` | 文字列カラムのために Parquet String 型を使用します。 | `1` | -| `output_format_parquet_fixed_string_as_fixed_byte_array` | 固定文字列カラムのために Parquet FIXED_LENGTH_BYTE_ARRAY 型を使用します。 | `1` | -| `output_format_parquet_version` | 出力フォーマット用の Parquet フォーマットバージョンです。サポートされているバージョン:1.0、2.4、2.6、および2.latest (デフォルト)。 | `2.latest` | -| `output_format_parquet_compression_method` | Parquet 出力フォーマットの圧縮方法です。サポートされているコーデック:snappy、lz4、brotli、zstd、gzip、none (非圧縮)。 | `zstd` | -| `output_format_parquet_compliant_nested_types` | Parquet ファイルスキーマで、リスト要素の名称に 'element' を使用します。これは Arrow ライブラリの実装の履歴的な遺物です。一般的には互換性が向上しますが、古いバージョンの Arrow の一部では例外があります。 | `1` | -| `output_format_parquet_use_custom_encoder` | より高速な Parquet エンコーダー実装を使用します。 | `1` | -| `output_format_parquet_parallel_encoding` | 複数スレッドで Parquet エンコーディングを行います。`output_format_parquet_use_custom_encoder` が必要です。 | `1` | -| `output_format_parquet_data_page_size` | 圧縮前のバイト単位のターゲットページサイズです。 | `1048576` | -| `output_format_parquet_batch_size` | この行数ごとにページサイズを確認します。平均値のサイズが数KBを超えるカラムがある場合は減少を検討してください。 | `1024` | -| `output_format_parquet_write_page_index` | Parquet ファイルにページインデックスを書く機能を追加します。 | `1` | -| `input_format_parquet_import_nested` | 廃止された設定で、何もしません。 | `0` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md.hash deleted file mode 100644 index 8b4b4967f23..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md.hash +++ /dev/null @@ -1 +0,0 @@ -0630b44b7c376bc0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md deleted file mode 100644 index ae7f876a4f4..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -description: 'ParquetMetadata フォーマットのドキュメント' -keywords: -- 'ParquetMetadata' -slug: '/interfaces/formats/ParquetMetadata' -title: 'ParquetMetadata' ---- - - - -## 説明 {#description} - -Parquetファイルメタデータを読み取るための特別なフォーマットです (https://parquet.apache.org/docs/file-format/metadata/)。常に次の構造/内容で1行が出力されます: -- `num_columns` - カラムの数 -- `num_rows` - 行の総数 -- `num_row_groups` - 行グループの総数 -- `format_version` - parquetフォーマットバージョン、常に1.0または2.6 -- `total_uncompressed_size` - データの総未圧縮バイトサイズ、すべての行グループのtotal_byte_sizeの合計として計算されます -- `total_compressed_size` - データの総圧縮バイトサイズ、すべての行グループのtotal_compressed_sizeの合計として計算されます -- `columns` - 次の構造を持つカラムメタデータのリスト: - - `name` - カラム名 - - `path` - カラムパス(ネストされたカラムの名前とは異なります) - - `max_definition_level` - 最大定義レベル - - `max_repetition_level` - 最大繰り返しレベル - - `physical_type` - カラムの物理タイプ - - `logical_type` - カラムの論理タイプ - - `compression` - このカラムに使用される圧縮 - - `total_uncompressed_size` - カラムの総未圧縮バイトサイズ、すべての行グループのカラムのtotal_uncompressed_sizeの合計として計算されます - - `total_compressed_size` - カラムの総圧縮バイトサイズ、すべての行グループのカラムのtotal_compressed_sizeの合計として計算されます - - `space_saved` - 圧縮によって保存されたスペースのパーセント、(1 - total_compressed_size/total_uncompressed_size)として計算されます - - `encodings` - このカラムに使用されるエンコーディングのリスト -- `row_groups` - 次の構造を持つ行グループメタデータのリスト: - - `num_columns` - 行グループ内のカラム数 - - `num_rows` - 行グループ内の行数 - - `total_uncompressed_size` - 行グループの総未圧縮バイトサイズ - - `total_compressed_size` - 行グループの総圧縮バイトサイズ - - `columns` - 次の構造を持つカラムチャンクメタデータのリスト: - - `name` - カラム名 - - `path` - カラムパス - - `total_compressed_size` - カラムの総圧縮バイトサイズ - - `total_uncompressed_size` - 行グループの総未圧縮バイトサイズ - - `have_statistics` - カラムチャンクメタデータがカラム統計を含むかどうかを示すブールフラグ - - `statistics` - カラムチャンクの統計(have_statistics = falseの場合、すべてのフィールドはNULL)次の構造: - - `num_values` - カラムチャンク内の非NULL値の数 - - `null_count` - カラムチャンク内のNULL値の数 - - `distinct_count` - カラムチャンク内の異なる値の数 - - `min` - カラムチャンクの最小値 - - `max` - カラムチャンクの最大値 - -## 使用例 {#example-usage} - -例: - -```sql -SELECT * -FROM file(data.parquet, ParquetMetadata) -FORMAT PrettyJSONEachRow -``` - -```json -{ - "num_columns": "2", - "num_rows": "100000", - "num_row_groups": "2", - "format_version": "2.6", - "metadata_size": "577", - "total_uncompressed_size": "282436", - "total_compressed_size": "26633", - "columns": [ - { - "name": "number", - "path": "number", - "max_definition_level": "0", - "max_repetition_level": "0", - "physical_type": "INT32", - "logical_type": "Int(bitWidth=16, isSigned=false)", - "compression": "LZ4", - "total_uncompressed_size": "133321", - "total_compressed_size": "13293", - "space_saved": "90.03%", - "encodings": [ - "RLE_DICTIONARY", - "PLAIN", - "RLE" - ] - }, - { - "name": "concat('Hello', toString(modulo(number, 1000)))", - "path": "concat('Hello', toString(modulo(number, 1000)))", - "max_definition_level": "0", - "max_repetition_level": "0", - "physical_type": "BYTE_ARRAY", - "logical_type": "None", - "compression": "LZ4", - "total_uncompressed_size": "149115", - "total_compressed_size": "13340", - "space_saved": "91.05%", - "encodings": [ - "RLE_DICTIONARY", - "PLAIN", - "RLE" - ] - } - ], - "row_groups": [ - { - "num_columns": "2", - "num_rows": "65409", - "total_uncompressed_size": "179809", - "total_compressed_size": "14163", - "columns": [ - { - "name": "number", - "path": "number", - "total_compressed_size": "7070", - "total_uncompressed_size": "85956", - "have_statistics": true, - "statistics": { - "num_values": "65409", - "null_count": "0", - "distinct_count": null, - "min": "0", - "max": "999" - } - }, - { - "name": "concat('Hello', toString(modulo(number, 1000)))", - "path": "concat('Hello', toString(modulo(number, 1000)))", - "total_compressed_size": "7093", - "total_uncompressed_size": "93853", - "have_statistics": true, - "statistics": { - "num_values": "65409", - "null_count": "0", - "distinct_count": null, - "min": "Hello0", - "max": "Hello999" - } - } - ] - }, - ... - ] -} -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md.hash deleted file mode 100644 index 366722692bc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md.hash +++ /dev/null @@ -1 +0,0 @@ -07b8e56fc24bc278 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md deleted file mode 100644 index 156d39d806b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: 'PostgreSQLWire formatのドキュメント' -keywords: -- 'PostgreSQLWire' -slug: '/interfaces/formats/PostgreSQLWire' -title: 'PostgreSQLWire' ---- - - - -## 説明 {#description} - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md.hash deleted file mode 100644 index 3cdaa912aea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/PostgreSQLWire.md.hash +++ /dev/null @@ -1 +0,0 @@ -6f7a69ab25773ad8 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md deleted file mode 100644 index ccb08fde178..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -alias: [] -description: 'Pretty format' -input_format: false -keywords: -- 'Pretty' -output_format: true -slug: '/interfaces/formats/Pretty' -title: 'Pretty' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`Pretty` フォーマットは、データを Unicode アートテーブルとして出力し、ターミナルで色を表示するために ANSI エスケープシーケンスを使用します。 -テーブルの全体のグリッドが描画され、各行はターミナルで 2 行を占めます。 -各結果ブロックは別々のテーブルとして出力されます。 -これは、すべての値の可視幅を事前に計算するためにバッファリングなしでブロックを出力できるようにするために必要です(バッファリングが必要になります)。 - -[NULL](/sql-reference/syntax.md) は `ᴺᵁᴸᴸ` として出力されます。 - -## 使用例 {#example-usage} - -例([`PrettyCompact`](./PrettyCompact.md) フォーマットのために示されています): - -```sql title="クエリ" -SELECT * FROM t_null -``` - -```response title="応答" -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -行は `Pretty` フォーマットのいずれにおいてもエスケープされません。以下の例は[`PrettyCompact`](./PrettyCompact.md) フォーマットのために示されています: - -```sql title="クエリ" -SELECT 'String with \'quotes\' and \t character' AS Escaping_test -``` - -```response title="応答" -┌─Escaping_test────────────────────────┐ -│ String with 'quotes' and character │ -└──────────────────────────────────────┘ -``` - -ターミナルにあまりにも多くのデータを出力しないように、最初の `10,000` 行のみが出力されます。 -行数が `10,000` 以上の場合、メッセージ "Showed first 10 000" が出力されます。 - -:::note -このフォーマットは、クエリ結果の出力には適していますが、データの解析には適していません。 -::: - -Pretty フォーマットは、合計値(`WITH TOTALS` を使用する場合)や極値(`extremes` が 1 に設定されている場合)の出力をサポートしています。 -これらの場合、合計値と極値は、主なデータの後に別々のテーブルで出力されます。 -これは、[`PrettyCompact`](./PrettyCompact.md) フォーマットを使用した以下の例に示されています: - -```sql title="クエリ" -SELECT EventDate, count() AS c -FROM test.hits -GROUP BY EventDate -WITH TOTALS -ORDER BY EventDate -FORMAT PrettyCompact -``` - -```response title="応答" -┌──EventDate─┬───────c─┐ -│ 2014-03-17 │ 1406958 │ -│ 2014-03-18 │ 1383658 │ -│ 2014-03-19 │ 1405797 │ -│ 2014-03-20 │ 1353623 │ -│ 2014-03-21 │ 1245779 │ -│ 2014-03-22 │ 1031592 │ -│ 2014-03-23 │ 1046491 │ -└────────────┴─────────┘ - -合計: -┌──EventDate─┬───────c─┐ -│ 1970-01-01 │ 8873898 │ -└────────────┴─────────┘ - -極値: -┌──EventDate─┬───────c─┐ -│ 2014-03-17 │ 1031592 │ -│ 2014-03-23 │ 1406958 │ -└────────────┴─────────┘ -``` - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md.hash deleted file mode 100644 index a13396a14ec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md.hash +++ /dev/null @@ -1 +0,0 @@ -2851ad3158990089 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md deleted file mode 100644 index 7d8c4fb5c40..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -alias: [] -description: 'Documentation for the PrettyCompact format' -input_format: false -keywords: -- 'PrettyCompact' -output_format: true -slug: '/interfaces/formats/PrettyCompact' -title: 'PrettyCompact' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`Pretty`](./Pretty.md) 形式とは異なり、行の間にグリッドが描画されたテーブルが表示されます。このため、結果はよりコンパクトになります。 - -:::note -この形式は、インタラクティブモードのコマンドラインクライアントでデフォルトで使用されます。 -::: - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md.hash deleted file mode 100644 index 8a6e881745b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompact.md.hash +++ /dev/null @@ -1 +0,0 @@ -5b941ce0938d1d64 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md deleted file mode 100644 index 720d73fae0a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettyCompactMonoBlockフォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyCompactMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettyCompactMonoBlock' -title: 'PrettyCompactMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettyCompact`](./PrettyCompact.md) フォーマットと異なり、最大 `10,000` 行がバッファに格納され、 -単一のテーブルとして出力されます。 [ブロック](/development/architecture#block) ではありません。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md.hash deleted file mode 100644 index 111ad1b930d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -8be838d24ab5f2fc diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md deleted file mode 100644 index da418849a0b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettyCompactNoEscapes 形式のドキュメント' -input_format: false -keywords: -- 'PrettyCompactNoEscapes' -output_format: true -slug: '/interfaces/formats/PrettyCompactNoEscapes' -title: 'PrettyCompactNoEscapes' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettyCompact`](./PrettyCompact.md) 形式と異なり、[ANSIエスケープシーケンス](http://en.wikipedia.org/wiki/ANSI_escape_code) は使用されません。 -これは、ブラウザで形式を表示するため、及び 'watch' コマンドラインユーティリティを使用するために必要です。 - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md.hash deleted file mode 100644 index 1df7ca03181..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapes.md.hash +++ /dev/null @@ -1 +0,0 @@ -c8732336eaece1bd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md deleted file mode 100644 index 44f36f7d3d1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettyCompactNoEscapesMonoBlock フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyCompactNoEscapesMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettyCompactNoEscapesMonoBlock' -title: 'PrettyCompactNoEscapesMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettyCompactNoEscapes`](./PrettyCompactNoEscapes.md) 形式と異なり、最大 `10,000` 行がバッファに格納され、 -単一のテーブルとして出力され、[ブロック](/development/architecture#block) ではありません。 - -## 例の使用法 {#example-usage} - -## 形式設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md.hash deleted file mode 100644 index 767ec0c82f9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyCompactNoEscapesMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -5aeaad5d79a8e2e3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md deleted file mode 100644 index 939a32aa137..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettyMonoBlockフォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettyMonoBlock' -title: 'PrettyMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`Pretty`](/interfaces/formats/Pretty) フォーマットとは異なり、最大 `10,000` 行がバッファリングされ、 -単一のテーブルとして出力され、[ブロック](/development/architecture#block) ごとではありません。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md.hash deleted file mode 100644 index 3ef2eff29a9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -f8b6c1092dac50bd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md deleted file mode 100644 index cbfc85fe1b2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -alias: [] -description: 'PrettyNoEscapes フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyNoEscapes' -output_format: true -slug: '/interfaces/formats/PrettyNoEscapes' -title: 'PrettyNoEscapes' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[Pretty](/interfaces/formats/Pretty) と異なり、[ANSI-escape sequences](http://en.wikipedia.org/wiki/ANSI_escape_code) が使用されていません。 -これは、ブラウザでフォーマットを表示するため、また 'watch' コマンドラインユーティリティを使用するために必要です。 - -## 使用例 {#example-usage} - -例: - -```bash -$ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" -``` - -:::note -[HTTP interface](../../../interfaces/http.md) を使用して、このフォーマットをブラウザに表示できます。 -::: - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md.hash deleted file mode 100644 index b3a2daab7cc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md.hash +++ /dev/null @@ -1 +0,0 @@ -c4ffcff084b35378 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md deleted file mode 100644 index 789a2b1cb4d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettyNoEscapesMonoBlock フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettyNoEscapesMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettyNoEscapesMonoBlock' -title: 'PrettyNoEscapesMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettyNoEscapes`](./PrettyNoEscapes.md) 形式と異なり、最大 `10,000` 行がバッファリングされ、 -単一のテーブルとして出力され、ブロックで出力されることはありません。 - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md.hash deleted file mode 100644 index 3663d896fa3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapesMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -6a5f053cd2ea34c3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md deleted file mode 100644 index c726a2c2dec..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -alias: [] -description: 'PrettySpace フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettySpace' -output_format: true -slug: '/interfaces/formats/PrettySpace' -title: 'PrettySpace' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettyCompact`](./PrettyCompact.md) フォーマットとは異なり、スペース文字を使用してテーブルを表示し、グリッドではなくなっています。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md.hash deleted file mode 100644 index 798a33790d9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpace.md.hash +++ /dev/null @@ -1 +0,0 @@ -fc55fa2287030fdd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md deleted file mode 100644 index 698aa87200a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -alias: [] -description: 'PrettySpaceMonoBlock フォーマットのドキュメント' -input_format: false -keywords: -- 'PrettySpaceMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettySpaceMonoBlock' -title: 'PrettySpaceMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettySpace`](./PrettySpace.md)フォーマットとは異なり、最大`10,000`行がバッファされ、単一のテーブルとして出力されます。 [ブロック](/development/architecture#block)によって出力されることはありません。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md.hash deleted file mode 100644 index 43e0527d19a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -fa538cdb02e3664f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md deleted file mode 100644 index 43b93679ad9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettySpaceNoEscapes フォーマットに関するドキュメント' -input_format: false -keywords: -- 'PrettySpaceNoEscapes' -output_format: true -slug: '/interfaces/formats/PrettySpaceNoEscapes' -title: 'PrettySpaceNoEscapes' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettySpace`](./PrettySpace.md) 形式とは異なり、[ANSIエスケープシーケンス](http://en.wikipedia.org/wiki/ANSI_escape_code) は使用されません。 -これは、この形式をブラウザで表示するため、および 'watch' コマンドラインユーティリティを使用するために必要です。 - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md.hash deleted file mode 100644 index 6b95cd2f997..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapes.md.hash +++ /dev/null @@ -1 +0,0 @@ -54981db8033a16be diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md deleted file mode 100644 index 30b239e6c21..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -alias: [] -description: 'PrettySpaceNoEscapesMonoBlock形式のドキュメント' -input_format: false -keywords: -- 'PrettySpaceNoEscapesMonoBlock' -output_format: true -slug: '/interfaces/formats/PrettySpaceNoEscapesMonoBlock' -title: 'PrettySpaceNoEscapesMonoBlock' ---- - -import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; - -| Input | Output | Alias | -|-------|---------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[`PrettySpaceNoEscapes`](./PrettySpaceNoEscapes.md) フォーマットとは異なり、最大 `10,000` 行がバッファリングされ、 -単一のテーブルとして出力されます。これは [ブロック](/development/architecture#block) によるものではありません。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md.hash deleted file mode 100644 index c8196f4afa2..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettySpaceNoEscapesMonoBlock.md.hash +++ /dev/null @@ -1 +0,0 @@ -ac77f66371ab054b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md deleted file mode 100644 index 8336466210b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -{} ---- - - - - - -次の設定はすべての `Pretty` フォーマットに共通しています。 - -| 設定 | 説明 | デフォルト | -|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| -| [`output_format_pretty_max_rows`](/operations/settings/settings-formats.md/#output_format_pretty_max_rows) | Pretty フォーマットの行数制限。 | `10000` | -| [`output_format_pretty_max_column_pad_width`](/operations/settings/settings-formats.md/#output_format_pretty_max_column_pad_width) | Pretty フォーマットにおけるカラム内の値をパディングする最大幅。 | `250` | -| [`output_format_pretty_max_value_width`](/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) | Pretty フォーマットで表示する値の最大幅。超える場合は切り捨てられます。 | `10000` | -| [`output_format_pretty_color`](/operations/settings/settings-formats.md/#output_format_pretty_color) | Pretty フォーマットで色を表示するために ANSI エスケープシーケンスを使用します。 | `true` | -| [`output_format_pretty_grid_charset`](/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) | グリッドの境界を印刷するためのキャラクタセット。利用可能なキャラクタセット: ASCII, UTF-8。 | `UTF-8` | -| [`output_format_pretty_row_numbers`](/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) | Pretty 出力フォーマットの各行の前に行番号を追加します。 | `true` | -| [`output_format_pretty_display_footer_column_names`](/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names) | テーブルに多くの行が含まれている場合、フッターにカラム名を表示します。 | `true` | -| [`output_format_pretty_display_footer_column_names_min_rows`](/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names_min_rows) | [`output_format_pretty_display_footer_column_names`](/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names) が有効な場合にフッターが表示されるための最小行数を設定します。 | `50` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md.hash deleted file mode 100644 index 0248338e9b3..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/_snippets/common-pretty-format-settings.md.hash +++ /dev/null @@ -1 +0,0 @@ -0b71e6b6d0e9e47d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md deleted file mode 100644 index afcc3d43164..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -alias: [] -description: 'Prometheusフォーマットのドキュメント' -input_format: false -keywords: -- 'Prometheus' -output_format: true -slug: '/interfaces/formats/Prometheus' -title: 'Prometheus' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -[Prometheus のテキストベースのエクスポジションフォーマット](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format) でメトリクスを公開します。 - -このフォーマットには、出力テーブルが以下のルールに従って正しく構造化されていることが要求されます: - -- `name` ([String](/sql-reference/data-types/string.md)) および `value` (数値) カラムは必須です。 -- 行はオプションで `help` ([String](/sql-reference/data-types/string.md)) および `timestamp` (数値) を含むことができます。 -- `type` ([String](/sql-reference/data-types/string.md)) カラムは `counter`、`gauge`、`histogram`、`summary`、`untyped` のいずれか、または空である必要があります。 -- 各メトリクス値には、いくつかの `labels` ([Map(String, String)](/sql-reference/data-types/map.md)) を持つことができます。 -- いくつかの連続する行は、異なるラベルを持つ同じメトリクスを参照することがあります。テーブルはメトリクス名でソートする必要があります(例: `ORDER BY name` を使用)。 - -`histogram` および `summary` ラベルには特別な要件があります - 詳細は [Prometheus doc](https://prometheus.io/docs/instrumenting/exposition_formats/#histograms-and-summaries) を参照してください。 -`{'count':''}` および `{'sum':''}` のラベルを持つ行には特別なルールが適用され、これはそれぞれ `_count` および `_sum` に変換されます。 - -## 使用例 {#example-usage} - -```yaml -┌─name────────────────────────────────┬─type──────┬─help──────────────────────────────────────┬─labels─────────────────────────┬────value─┬─────timestamp─┐ -│ http_request_duration_seconds │ histogram │ リクエストの時間のヒストグラム。 │ {'le':'0.05'} │ 24054 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'le':'0.1'} │ 33444 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'le':'0.2'} │ 100392 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'le':'0.5'} │ 129389 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'le':'1'} │ 133988 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'le':'+Inf'} │ 144320 │ 0 │ -│ http_request_duration_seconds │ histogram │ │ {'sum':''} │ 53423 │ 0 │ -│ http_requests_total │ counter │ HTTPリクエストの総数 │ {'method':'post','code':'200'} │ 1027 │ 1395066363000 │ -│ http_requests_total │ counter │ │ {'method':'post','code':'400'} │ 3 │ 1395066363000 │ -│ metric_without_timestamp_and_labels │ │ │ {} │ 12.47 │ 0 │ -│ rpc_duration_seconds │ summary │ RPCの時間を秒単位で要約したものです。 │ {'quantile':'0.01'} │ 3102 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'quantile':'0.05'} │ 3272 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'quantile':'0.5'} │ 4773 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'quantile':'0.9'} │ 9001 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'quantile':'0.99'} │ 76656 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'count':''} │ 2693 │ 0 │ -│ rpc_duration_seconds │ summary │ │ {'sum':''} │ 17560473 │ 0 │ -│ something_weird │ │ │ {'problem':'division by zero'} │ inf │ -3982045 │ -└─────────────────────────────────────┴───────────┴───────────────────────────────────────────┴────────────────────────────────┴──────────┴───────────────┘ -``` - -次のようにフォーマットされます: - -```text - -# HELP http_request_duration_seconds リクエストの時間のヒストグラム。 - -# TYPE http_request_duration_seconds histogram -http_request_duration_seconds_bucket{le="0.05"} 24054 -http_request_duration_seconds_bucket{le="0.1"} 33444 -http_request_duration_seconds_bucket{le="0.5"} 129389 -http_request_duration_seconds_bucket{le="1"} 133988 -http_request_duration_seconds_bucket{le="+Inf"} 144320 -http_request_duration_seconds_sum 53423 -http_request_duration_seconds_count 144320 - - -# HELP http_requests_total HTTPリクエストの総数 - -# TYPE http_requests_total counter -http_requests_total{code="200",method="post"} 1027 1395066363000 -http_requests_total{code="400",method="post"} 3 1395066363000 - -metric_without_timestamp_and_labels 12.47 - - -# HELP rpc_duration_seconds RPCの時間を秒単位で要約したものです。 - -# TYPE rpc_duration_seconds summary -rpc_duration_seconds{quantile="0.01"} 3102 -rpc_duration_seconds{quantile="0.05"} 3272 -rpc_duration_seconds{quantile="0.5"} 4773 -rpc_duration_seconds{quantile="0.9"} 9001 -rpc_duration_seconds{quantile="0.99"} 76656 -rpc_duration_seconds_sum 17560473 -rpc_duration_seconds_count 2693 - -something_weird{problem="division by zero"} +Inf -3982045 -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md.hash deleted file mode 100644 index 7f45aa4ac2d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Prometheus.md.hash +++ /dev/null @@ -1 +0,0 @@ -742d7d5c9b937e1b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md deleted file mode 100644 index 72a018d0823..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -alias: [] -description: 'Protobufフォーマットのドキュメント' -input_format: true -keywords: -- 'Protobuf' -output_format: true -slug: '/interfaces/formats/Protobuf' -title: 'Protobuf' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -| 入力 | 出力 | エイリアス | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`Protobuf`フォーマットは、[Protocol Buffers](https://protobuf.dev/)フォーマットです。 - -このフォーマットは、クエリ間でキャッシュされる外部フォーマットスキーマを必要とします。 - -ClickHouseは以下をサポートします: -- `proto2`および`proto3`構文の両方。 -- `Repeated`/`optional`/`required`フィールド。 - -## 使用例 {#example-usage} - -### 基本的な例 {#basic-examples} - -使用例: - -```sql -SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' -``` - -```bash -cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table SETTINGS format_schema='schemafile:MessageType' FORMAT Protobuf" -``` - -ファイル`schemafile.proto`は次のようになります: - -```capnp -syntax = "proto3"; - -message MessageType { - string name = 1; - string surname = 2; - uint32 birthDate = 3; - repeated string phoneNumbers = 4; -}; -``` - -ClickHouseは、テーブルのカラムとProtocol Buffersのメッセージタイプのフィールド間の対応を見つけるために、それらの名前を比較します。 -この比較は大文字と小文字を区別せず、`_`(アンダースコア)と`.`(ドット)の文字は等しいものと見なされます。 -カラムとProtocol Buffersのメッセージのフィールドの型が異なる場合、必要な変換が適用されます。 - -入れ子メッセージもサポートされています。例えば、次のメッセージタイプのフィールド`z`の場合: - -```capnp -message MessageType { - message XType { - message YType { - int32 z; - }; - repeated YType y; - }; - XType x; -}; -``` - -ClickHouseは`x.y.z`(または`x_y_z`、`X.y_Z`など)という名前のカラムを見つけようとします。 - -入れ子メッセージは、[入れ子データ構造](/sql-reference/data-types/nested-data-structures/index.md)の入出力に適しています。 - -次のようなprotobufスキーマで定義されたデフォルト値は適用されず、[テーブルのデフォルト値](/sql-reference/statements/create/table#default_values)が代わりに使用されます: - -```capnp -syntax = "proto2"; - -message MessageType { - optional int32 result_per_page = 3 [default = 10]; -} -``` - -ClickHouseはprotobufメッセージを`length-delimited`形式で入出力します。 -これは、各メッセージの前にその長さが[可変幅整数(varint)](https://developers.google.com/protocol-buffers/docs/encoding#varints)として記述される必要があることを意味します。 - -また、[人気のある言語でlength-delimited protobufメッセージを読み書きする方法](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages)も参照してください。 - -### 自動生成スキーマの使用 {#using-autogenerated-protobuf-schema} - -データの外部Protobufスキーマがない場合でも、自動生成されたスキーマを使用してProtobufフォーマットでデータを出力/入力できます。 - -例えば: - -```sql -SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerated_schema=1 -``` - -この場合、ClickHouseはテーブル構造に基づいてProtobufスキーマを自動生成し、[`structureToProtobufSchema`](/sql-reference/functions/other-functions.md#structure_to_protobuf_schema)関数を使用します。 -その後、このスキーマを使用してProtobufフォーマットでデータを直列化します。 - -自動生成スキーマを使用したProtobufファイルを読み込むこともできます。この場合、ファイルは同じスキーマを使用して作成される必要があります: - -```bash -$ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_protobuf_use_autogenerated_schema=1 FORMAT Protobuf" -``` - -設定[`format_protobuf_use_autogenerated_schema`](/operations/settings/settings-formats.md#format_protobuf_use_autogenerated_schema)はデフォルトで有効であり、[`format_schema`](/operations/settings/formats#format_schema)が設定されていない場合に適用されます。 - -また、設定[`output_format_schema`](/operations/settings/formats#output_format_schema)を使用して、入出力中に自動生成されたスキーマをファイルに保存することもできます。例えば: - -```sql -SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerated_schema=1, output_format_schema='path/to/schema/schema.proto' -``` - -この場合、自動生成されたProtobufスキーマはファイル`path/to/schema/schema.capnp`に保存されます。 - -### Protobufキャッシュの削除 {#drop-protobuf-cache} - -[`format_schema_path`](/operations/server-configuration-parameters/settings.md/#format_schema_path)から読み込まれたProtobufスキーマを再読み込みするには、[`SYSTEM DROP ... FORMAT CACHE`](/sql-reference/statements/system.md/#system-drop-schema-format)ステートメントを使用します。 - -```sql -SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md.hash deleted file mode 100644 index c0fce751f61..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md.hash +++ /dev/null @@ -1 +0,0 @@ -0f070486bd8f40a0 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md deleted file mode 100644 index 6e8cd062c22..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -alias: [] -description: 'ProtobufList フォーマットのドキュメント' -input_format: true -keywords: -- 'ProtobufList' -output_format: true -slug: '/interfaces/formats/ProtobufList' -title: 'ProtobufList' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -| 入力 | 出力 | エイリアス | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`ProtobufList` 形式は、[`Protobuf`](./Protobuf.md) 形式と似ていますが、行は「Envelope」という固定名のメッセージに含まれるサブメッセージのシーケンスとして表されます。 - -## 使用例 {#example-usage} - -例えば: - -```sql -SELECT * FROM test.table FORMAT ProtobufList SETTINGS format_schema = 'schemafile:MessageType' -``` - -```bash -cat protobuflist_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT ProtobufList SETTINGS format_schema='schemafile:MessageType'" -``` - -ファイル `schemafile.proto` は次のようになります: - -```capnp title="schemafile.proto" -syntax = "proto3"; -message Envelope { - message MessageType { - string name = 1; - string surname = 2; - uint32 birthDate = 3; - repeated string phoneNumbers = 4; - }; - MessageType row = 1; -}; -``` - -## 形式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md.hash deleted file mode 100644 index 8ef9ff8db3c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md.hash +++ /dev/null @@ -1 +0,0 @@ -1e6043f7aa1b2994 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md deleted file mode 100644 index 470808f641a..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -alias: [] -description: 'ProtobufSingle 形式のドキュメント' -input_format: true -keywords: -- 'ProtobufSingle' -output_format: true -slug: '/interfaces/formats/ProtobufSingle' -title: 'ProtobufSingle' ---- - -import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`ProtobufSingle` フォーマットは、[`Protobuf`](./Protobuf.md) フォーマットと同じですが、長さ区切りなしで単一の Protobuf メッセージを保存または解析するために使用されます。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md.hash deleted file mode 100644 index 8b1a4055684..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufSingle.md.hash +++ /dev/null @@ -1 +0,0 @@ -ddc3ab9a25eaea6b diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md deleted file mode 100644 index 11dd0241e86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -description: 'RawBLOB形式のドキュメント' -keywords: -- 'RawBLOB' -slug: '/interfaces/formats/RawBLOB' -title: 'RawBLOB' ---- - - - -## 説明 {#description} - -`RawBLOB` 形式は、すべての入力データを単一の値として読み取ります。単一の [`String`](/sql-reference/data-types/string.md) 型のフィールドを持つテーブルのみを解析することが可能です。結果は、区切り文字やエスケープなしのバイナリ形式で出力されます。複数の値が出力されると形式は曖昧になり、データを読み返すことは不可能になります。 - -### Raw形式の比較 {#raw-formats-comparison} - -以下は `RawBLOB` と [`TabSeparatedRaw`](./TabSeparated/TabSeparatedRaw.md) 形式の比較です。 - -`RawBLOB`: -- データはバイナリ形式で出力され、エスケープなし; -- 値の間に区切り文字はありません; -- 各値の末尾に改行はありません。 - -`TabSeparatedRaw`: -- データはエスケープなしで出力されます; -- 行にはタブで分けられた値が含まれています; -- 各行の最終値の後には改行があります。 - -以下は `RawBLOB` と [RowBinary](./RowBinary/RowBinary.md) 形式の比較です。 - -`RawBLOB`: -- String フィールドは、長さのプレフィックスなしで出力されます。 - -`RowBinary`: -- String フィールドは、長さが varint 形式 (符号なし [LEB128](https://en.wikipedia.org/wiki/LEB128))で表示され、その後に文字列のバイトが続きます。 - -`RawBLOB` 入力に空のデータが渡されると、ClickHouse は例外をスローします: - -```text -Code: 108. DB::Exception: No data to insert -``` - -## 使用例 {#example-usage} - -```bash title="クエリ" -$ clickhouse-client --query "CREATE TABLE {some_table} (a String) ENGINE = Memory;" -$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT RawBLOB" -$ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum -``` - -```text title="レスポンス" -f9725a22f9191e064120d718e26862a9 - -``` - -## 形式の設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md.hash deleted file mode 100644 index 46422cdd8e5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md.hash +++ /dev/null @@ -1 +0,0 @@ -d2bce19a4514152d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md deleted file mode 100644 index e587d7ebb02..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -alias: [] -description: 'Regexp形式のドキュメント' -input_format: true -keywords: -- 'Regexp' -output_format: false -slug: '/interfaces/formats/Regexp' -title: 'Regexp' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -`Regex` フォーマットは、提供された正規表現に従ってインポートされたデータの各行を解析します。 - -**使用法** - -[format_regexp](/operations/settings/settings-formats.md/#format_regexp) 設定からの正規表現が、インポートされたデータの各行に適用されます。正規表現のサブパターンの数は、インポートされたデータセットのカラム数と等しくなければなりません。 - -インポートされたデータの行は、改行文字 `'\n'` または DOS スタイルの改行 `"\r\n"` で区切られている必要があります。 - -一致した各サブパターンの内容は、[format_regexp_escaping_rule](/operations/settings/settings-formats.md/#format_regexp_escaping_rule) 設定に従って、対応するデータ型のメソッドで解析されます。 - -正規表現が行に一致しない場合、且つ [format_regexp_skip_unmatched](/operations/settings/settings-formats.md/#format_regexp_escaping_rule) が 1 に設定されている場合、その行は静かにスキップされます。そうでない場合、例外がスローされます。 - -## 使用例 {#example-usage} - -ファイル `data.tsv` を考えてみましょう: - -```text title="data.tsv" -id: 1 array: [1,2,3] string: str1 date: 2020-01-01 -id: 2 array: [1,2,3] string: str2 date: 2020-01-02 -id: 3 array: [1,2,3] string: str3 date: 2020-01-03 -``` -テーブル `imp_regex_table` は次の通りです: - -```sql -CREATE TABLE imp_regex_table (id UInt32, array Array(UInt32), string String, date Date) ENGINE = Memory; -``` - -次のクエリを使用して上記のファイルからデータをテーブルに挿入します: - -```bash -$ cat data.tsv | clickhouse-client --query "INSERT INTO imp_regex_table SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='Escaped', format_regexp_skip_unmatched=0 FORMAT Regexp;" -``` - -これで、テーブルからデータを `SELECT` して、`Regex` フォーマットがファイルからのデータをどのように解析したかを確認できます: - -```sql title="クエリ" -SELECT * FROM imp_regex_table; -``` - -```text title="レスポンス" -┌─id─┬─array───┬─string─┬───────date─┐ -│ 1 │ [1,2,3] │ str1 │ 2020-01-01 │ -│ 2 │ [1,2,3] │ str2 │ 2020-01-02 │ -│ 3 │ [1,2,3] │ str3 │ 2020-01-03 │ -└────┴─────────┴────────┴────────────┘ -``` - -## フォーマット設定 {#format-settings} - -`Regexp` フォーマットを使用する場合、次の設定を使用できます: - -- `format_regexp` — [String](/sql-reference/data-types/string.md)。 [re2](https://github.com/google/re2/wiki/Syntax) フォーマットの正規表現を含みます。 -- `format_regexp_escaping_rule` — [String](/sql-reference/data-types/string.md)。次のエスケープルールがサポートされています: - - - CSV([CSV](/interfaces/formats/CSV)に類似) - - JSON([JSONEachRow](/interfaces/formats/JSONEachRow)に類似) - - Escaped([TSV](/interfaces/formats/TabSeparated)に類似) - - Quoted([Values](/interfaces/formats/Values)に類似) - - Raw(サブパターンをまるごと抽出し、エスケープルールなし、[TSVRaw](/interfaces/formats/TabSeparated)に類似) - -- `format_regexp_skip_unmatched` — [UInt8](/sql-reference/data-types/int-uint.md)。 `format_regexp` の式がインポートされたデータに一致しない場合に例外をスローする必要性を定義します。 `0` または `1` に設定できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md.hash deleted file mode 100644 index ca19deadcd7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md.hash +++ /dev/null @@ -1 +0,0 @@ -e091b80de197749a diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md deleted file mode 100644 index e64a93d06be..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -alias: [] -description: 'RowBinaryフォーマットのドキュメント' -input_format: true -keywords: -- 'RowBinary' -output_format: true -slug: '/interfaces/formats/RowBinary' -title: 'RowBinary' ---- - -import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settings.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`RowBinary`フォーマットは、バイナリフォーマットで行ごとにデータを解析します。 -行と値は連続してリストされ、区切り文字はありません。 -データがバイナリフォーマットであるため、`FORMAT RowBinary`の後の区切り文字は次のように厳密に指定されます: - -- 任意の数のホワイトスペース: - - `' '`(スペース - コード `0x20`) - - `'\t'`(タブ - コード `0x09`) - - `'\f'`(フォームフィード - コード `0x0C`) -- 正確に1つの改行シーケンスの後: - - Windowsスタイル `"\r\n"` - - またはUnixスタイル `'\n'` -- すぐにバイナリデータが続きます。 - -:::note -このフォーマットは、行ベースであるため、[Native](../Native.md)フォーマットより効率が低いです。 -::: - -次のデータ型については、注意が必要です: - -- [整数](../../../sql-reference/data-types/int-uint.md)は固定長のリトルエンディアン表現を使用します。例えば、`UInt64`は8バイトを使用します。 -- [DateTime](../../../sql-reference/data-types/datetime.md)はUnixタイムスタンプを値として持つ`UInt32`として表現されます。 -- [Date](../../../sql-reference/data-types/date.md)は`1970-01-01`からの日数を値として持つUInt16オブジェクトとして表現されます。 -- [String](../../../sql-reference/data-types/string.md)は可変幅整数(varint)(符号なしの[`LEB128`](https://en.wikipedia.org/wiki/LEB128))として表現され、その後に文字列のバイトが続きます。 -- [FixedString](../../../sql-reference/data-types/fixedstring.md)は、単にバイトの列として表現されます。 -- [配列](../../../sql-reference/data-types/array.md)は可変幅整数(varint)(符号なしの[LEB128](https://en.wikipedia.org/wiki/LEB128))として表現され、その後に配列の要素が続きます。 - -[NULL](/sql-reference/syntax#null)サポートのために、各[Nullable](/sql-reference/data-types/nullable.md)値の前に`1`または`0`を含む追加のバイトが追加されます。 -- `1`の場合、その値は`NULL`であり、このバイトは別の値として解釈されます。 -- `0`の場合、そのバイトの後の値は`NULL`ではありません。 - -`RowBinary`フォーマットと`RawBlob`フォーマットの比較については、[Raw Formats Comparison](../RawBLOB.md/#raw-formats-comparison)を参照してください。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md.hash deleted file mode 100644 index 69f2d70b9ef..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinary.md.hash +++ /dev/null @@ -1 +0,0 @@ -b287c6f91b808560 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md deleted file mode 100644 index 59dd0cedec1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -alias: [] -description: 'RowBinaryWithDefaults フォーマットのドキュメント' -input_format: true -keywords: -- 'RowBinaryWithDefaults' -output_format: false -slug: '/interfaces/formats/RowBinaryWithDefaults' -title: 'RowBinaryWithDefaults' ---- - -import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settings.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -[`RowBinary`](./RowBinary.md) フォーマットに似ていますが、各カラムの前にデフォルト値を使用する必要があるかどうかを示す追加のバイトがあります。 - -## 使用例 {#example-usage} - -例: - -```sql title="クエリ" -SELECT * FROM FORMAT('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x'010001000000') -``` -```response title="レスポンス" -┌──x─┬─y─┐ -│ 42 │ 1 │ -└────┴───┘ -``` - -- カラム `x` には、デフォルト値を使用する必要があることを示すバイト `01` が1つだけあります。このバイトの後には他のデータは提供されていません。 -- カラム `y` のデータは、カラムに実際の値があることを示すバイト `00` から始まり、後続のデータ `01000000` から読み取る必要があります。 - -## フォーマット設定 {#format-settings} - - diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md.hash deleted file mode 100644 index 256733e0300..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md.hash +++ /dev/null @@ -1 +0,0 @@ -ee3176d2ee3c633d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md deleted file mode 100644 index cefba30e43c..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: 'RowBinaryWithNames形式のドキュメント' -input_format: true -keywords: -- 'RowBinaryWithNames' -output_format: true -slug: '/interfaces/formats/RowBinaryWithNames' -title: 'RowBinaryWithNames' ---- - -import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settings.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`RowBinary`](./RowBinary.md) フォーマットに似ていますが、ヘッダーが追加されています: - -- [`LEB128`](https://en.wikipedia.org/wiki/LEB128) エンコードされたカラム数 (N)。 -- N の `String` がカラム名を指定します。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - - -:::note -- 設定 [`input_format_with_names_use_header`](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、インプットデータのカラムはその名前によってテーブルのカラムにマッピングされ、名前が不明なカラムはスキップされます。 -- 設定 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合、そうでない場合は最初の行がスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md.hash deleted file mode 100644 index 00500011a49..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -9c27513746d22fd2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md deleted file mode 100644 index d13e9db4287..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -alias: [] -description: 'RowBinaryWithNamesAndTypes フォーマットのドキュメント' -input_format: true -keywords: -- 'RowBinaryWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/RowBinaryWithNamesAndTypes' -title: 'RowBinaryWithNamesAndTypes' ---- - -import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settings.md' - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[RowBinary](./RowBinary.md) 形式に似ていますが、ヘッダーが追加されています: - -- [`LEB128`](https://en.wikipedia.org/wiki/LEB128)エンコードされたカラムの数 (N)。 -- N個の`String`でカラム名を指定。 -- N個の`String`でカラムタイプを指定。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - - - -:::note -設定 [`input_format_with_names_use_header`](/operations/settings/settings-formats.md/#input_format_with_names_use_header) が1に設定されている場合、 -入力データのカラムは、名前によってテーブルのカラムにマッピングされ、未知の名前のカラムは、設定 [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が1に設定されている場合はスキップされます。 -そうでない場合、最初の行はスキップされます。 -設定 [`input_format_with_types_use_header`](/operations/settings/settings-formats.md/#input_format_with_types_use_header) が`1`に設定されている場合、 -入力データのタイプは、テーブルの対応するカラムのタイプと比較されます。そうでない場合、2行目はスキップされます。 -::: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md.hash deleted file mode 100644 index 55c61e9084e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -acd05d245cce69ca diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md deleted file mode 100644 index 108de18473b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -{} ---- - - - - - -以下の設定は、すべての `RowBinary` タイプ形式に共通です。 - -| 設定 | 説明 | デフォルト | -|--------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| -| [`format_binary_max_string_size`](/operations/settings/settings-formats.md/#format_binary_max_string_size) | RowBinary形式のStringに対して許可される最大サイズ。 | `1GiB` | -| [`output_format_binary_encode_types_in_binary_format`](/operations/settings/formats#input_format_binary_decode_types_in_binary_format) | [`RowBinaryWithNamesAndTypes`](../RowBinaryWithNamesAndTypes.md) 出力形式で、タイプ名の文字列の代わりに [`binary encoding`](/sql-reference/data-types/data-types-binary-encoding.md) を使用してヘッダーにタイプを記述できるようにします。 | `false` | -| [`input_format_binary_decode_types_in_binary_format`](/operations/settings/formats#input_format_binary_decode_types_in_binary_format) | [`RowBinaryWithNamesAndTypes`](../RowBinaryWithNamesAndTypes.md) 入力形式で、タイプ名の文字列の代わりに [`binary encoding`](/sql-reference/data-types/data-types-binary-encoding.md) を使用してヘッダーにタイプを読み込むことを許可します。 | `false` | -| [`output_format_binary_write_json_as_string`](/operations/settings/settings-formats.md/#output_format_binary_write_json_as_string) | [`RowBinary`](../RowBinary.md) 出力形式で、[`JSON`](/sql-reference/data-types/newjson.md) データ型の値を `JSON` [String](/sql-reference/data-types/string.md) 値として書き込むことを許可します。 | `false` | -| [`input_format_binary_read_json_as_string`](/operations/settings/settings-formats.md/#input_format_binary_read_json_as_string) | [`RowBinary`](../RowBinary.md) 入力形式で、[`JSON`](/sql-reference/data-types/newjson.md) データ型の値を `JSON` [String](/sql-reference/data-types/string.md) 値として読み込むことを許可します。 | `false` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md.hash deleted file mode 100644 index b5d23d59617..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/_snippets/common-row-binary-format-settings.md.hash +++ /dev/null @@ -1 +0,0 @@ -acc9044d4ac76de2 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md deleted file mode 100644 index 3e3848848ca..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -alias: [] -description: 'SQLInsert フォーマットのドキュメント' -input_format: false -keywords: -- 'SQLInsert' -output_format: true -slug: '/interfaces/formats/SQLInsert' -title: 'SQLInsert' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -データを `INSERT INTO table (columns...) VALUES (...), (...) ...;` ステートメントのシーケンスとして出力します。 - -## 使用例 {#example-usage} - -例: - -```sql -SELECT number AS x, number + 1 AS y, 'Hello' AS z FROM numbers(10) FORMAT SQLInsert SETTINGS output_format_sql_insert_max_batch_size = 2 -``` - -```sql -INSERT INTO table (x, y, z) VALUES (0, 1, 'Hello'), (1, 2, 'Hello'); -INSERT INTO table (x, y, z) VALUES (2, 3, 'Hello'), (3, 4, 'Hello'); -INSERT INTO table (x, y, z) VALUES (4, 5, 'Hello'), (5, 6, 'Hello'); -INSERT INTO table (x, y, z) VALUES (6, 7, 'Hello'), (7, 8, 'Hello'); -INSERT INTO table (x, y, z) VALUES (8, 9, 'Hello'), (9, 10, 'Hello'); -``` - -このフォーマットで出力されたデータを読むには、[MySQLDump](../formats/MySQLDump.md) 入力フォーマットを使用できます。 - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|-------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------|--------------| -| [`output_format_sql_insert_max_batch_size`](../../operations/settings/settings-formats.md/#output_format_sql_insert_max_batch_size) | 1つのINSERTステートメントでの最大行数。 | `65505` | -| [`output_format_sql_insert_table_name`](../../operations/settings/settings-formats.md/#output_format_sql_insert_table_name) | 出力INSERTクエリのテーブル名。 | `'table'` | -| [`output_format_sql_insert_include_column_names`](../../operations/settings/settings-formats.md/#output_format_sql_insert_include_column_names) | INSERTクエリにカラム名を含めるか。 | `true` | -| [`output_format_sql_insert_use_replace`](../../operations/settings/settings-formats.md/#output_format_sql_insert_use_replace) | INSERTの代わりにREPLACEステートメントを使用。 | `false` | -| [`output_format_sql_insert_quote_names`](../../operations/settings/settings-formats.md/#output_format_sql_insert_quote_names) | カラム名を「\`」文字で引用符で囲む。 | `true` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md.hash deleted file mode 100644 index b7d49804b86..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md.hash +++ /dev/null @@ -1 +0,0 @@ -5d79fd1f6941969f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md deleted file mode 100644 index 007fb40deb1..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -alias: [] -description: 'TSKVフォーマットのドキュメント' -input_format: true -keywords: -- 'TSKV' -output_format: true -slug: '/interfaces/formats/TSKV' -title: 'TSKV' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -[`TabSeparated`](./TabSeparated.md) フォーマットに似ていますが、`name=value` 形式で値を出力します。 -名前は [`TabSeparated`](./TabSeparated.md) フォーマットと同様にエスケープされ、`=` シンボルもエスケープされます。 - -```text -SearchPhrase= count()=8267016 -SearchPhrase=bathroom interior design count()=2166 -SearchPhrase=clickhouse count()=1655 -SearchPhrase=2014 spring fashion count()=1549 -SearchPhrase=freeform photos count()=1480 -SearchPhrase=angelina jolie count()=1245 -SearchPhrase=omsk count()=1112 -SearchPhrase=photos of dog breeds count()=1091 -SearchPhrase=curtain designs count()=1064 -SearchPhrase=baku count()=1000 -``` - - -```sql title="クエリ" -SELECT * FROM t_null FORMAT TSKV -``` - -```text title="レスポンス" -x=1 y=\N -``` - -:::note -小さなカラムが多数ある場合、このフォーマットは効果的ではなく、一般的には使用する理由はありません。 -それでも、効率の面では [`JSONEachRow`](../JSON/JSONEachRow.md) フォーマットとあまり変わりません。 -::: - -パースには、異なるカラムの値の順序はサポートされています。 -一部の値が省略されることは許可されており、それらはデフォルト値と等しいと見なされます。 -この場合、ゼロと空の行がデフォルト値として使用されます。 -テーブルに指定できる複雑な値はデフォルトとしてサポートされていません。 - -パースでは、`=` シンボルや値なしで追加のフィールド `tskv` を追加することができます。このフィールドは無視されます。 - -インポート時には、未知の名前のカラムはスキップされます。 -[`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合です。 - -[NULL](/sql-reference/syntax.md) は `\N` としてフォーマットされます。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md.hash deleted file mode 100644 index 1ccbc772194..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md.hash +++ /dev/null @@ -1 +0,0 @@ -19ca4f55faba577c diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md deleted file mode 100644 index c2735850f3e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -alias: -- 'TSV' -description: 'TSVフォーマットのドキュメント' -input_format: true -keywords: -- 'TabSeparated' -- 'TSV' -output_format: true -slug: '/interfaces/formats/TabSeparated' -title: 'TabSeparated' ---- - - - - -| Input | Output | Alias | -|-------|--------|--------| -| ✔ | ✔ | `TSV` | - -## 説明 {#description} - -TabSeparatedフォーマットでは、データは行単位で書き込まれます。各行はタブで区切られた値を含みます。各値の後にはタブが続きますが、行の最後の値の後には行末が続きます。厳密にUnixの行末がどこでも仮定されます。最後の行にも終了時に行末が含まれている必要があります。値はテキストフォーマットで書かれ、引用符で囲まれることはなく、特殊文字はエスケープされます。 - -このフォーマットは `TSV` という名前でも利用可能です。 - -`TabSeparated` フォーマットは、カスタムプログラムやスクリプトを使用してデータを処理するのに便利です。HTTPインターフェースやコマンドラインクライアントのバッチモードでデフォルトで使用されます。このフォーマットでは、異なるDBMS間でデータを転送することも可能です。例えば、MySQLからダンプを取得し、ClickHouseにアップロードすることができますし、その逆も可能です。 - -`TabSeparated` フォーマットは、合計値を出力すること(WITH TOTALSを使用する場合)や、極端な値を出力すること('extremes' が 1 に設定されている場合)をサポートしています。これらのケースでは、合計値と極端な値がメインデータの後に出力されます。メイン結果、合計値、および極端な値は、空行によって互いに区切られています。例: - -```sql -SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated - -2014-03-17 1406958 -2014-03-18 1383658 -2014-03-19 1405797 -2014-03-20 1353623 -2014-03-21 1245779 -2014-03-22 1031592 -2014-03-23 1046491 - -1970-01-01 8873898 - -2014-03-17 1031592 -2014-03-23 1406958 -``` - -## データフォーマット {#tabseparated-data-formatting} - -整数は10進形式で書かれます。数字には先頭に追加の "+" が含まれることがあります(解析時には無視され、フォーマット時には記録されません)。非負の数字には負の符号を含むことはできません。読み取り時には、空文字列をゼロとして解析したり(符号付き型の場合)、単にマイナス符号だけの文字列をゼロとして解析したりすることが許可されています。対応するデータ型に収まらない数字は、エラーメッセージなしで別の数字として解析される場合があります。 - -浮動小数点数は10進形式で書かれ、点が小数点として使用されます。指数表記がサポートされており、'inf', '+inf', '-inf', 'nan' もサポートされています。浮動小数点数のエントリは、小数点で始まったり終わったりすることがあります。フォーマット時には、浮動小数点数で精度が失われる場合があります。解析時には、最も近いマシンで表現可能な数値を厳密に読む必要はありません。 - -日付はYYYY-MM-DD形式で書かれ、同じ形式で解析されますが、任意の文字が区切りに使用されます。時間を含む日付は `YYYY-MM-DD hh:mm:ss` 形式で書かれ、同じ形式で解析されますが、任意の文字が区切りに使用されます。これらはすべて、クライアントまたはサーバーが起動したときのシステムのタイムゾーンで発生します(どちらがデータをフォーマットするかに依存します)。時間を含む日付については、夏時間は指定されていません。したがって、ダンプが夏時間中の時間を含んでいる場合、ダンプはデータと一意に一致せず、解析は2つの時間のうちの1つを選択します。読み取り操作中、無効な日付や時間を含む日付は自然にオーバーフローとして解析されるか、またはnullの日付および時間として解析され、エラーメッセージは表示されません。 - -例外として、時間を含む日付の解析はUnixタイムスタンプ形式でもサポートされており、その形式はちょうど10桁の10進数から構成される必要があります。結果はタイムゾーンに依存しません。形式 `YYYY-MM-DD hh:mm:ss` と `NNNNNNNNNN` は自動的に区別されます。 - -文字列はバックスラッシュでエスケープされた特殊文字で出力されます。出力に使用されるエスケープシーケンスは次のとおりです: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\` 。解析も `\a`, `\v`, および `\xHH`(16進エスケープシーケンス)や任意の `\c` シーケンスをサポートしており、ここで `c` は任意の文字です(これらのシーケンスは `c` に変換されます)。したがって、データの読み取りは、行末を `\n` または `\` として書き込むか、行末としてもサポートされる形式をサポートしています。例えば、単語間にスペースではなく行末がある文字列 `Hello world` は、以下のいずれかのバリエーションで解析可能です: - -```text -Hello\nworld - -Hello\ -world -``` - -2番目のバリエーションは、MySQLがタブ区切りのダンプを作成するときにこれを使用するため、サポートされています。 - -TabSeparatedフォーマットでデータを渡す際にエスケープする必要がある最小限の文字セットは、タブ、行末(LF)、およびバックスラッシュです。 - -エスケープされるシンボルのセットは小さく、出力結果が壊れるような文字列値に遭遇することがあります。 - -配列は、角括弧内のカンマ区切りの値のリストとして書かれます。配列内の数値項目は通常どおりフォーマットされます。`Date` および `DateTime` 型は単一の引用符で書かれます。文字列は、上記と同じエスケープルールに従って単一の引用符で書かれます。 - -[NULL](/sql-reference/syntax.md)は、設定 [format_tsv_null_representation](/operations/settings/settings-formats.md/#format_tsv_null_representation) に従ってフォーマットされます(デフォルト値は `\N` です)。 - -入力データでは、ENUM値は名前またはIDとして表現できます。まず、入力値をENUM名に一致させようとします。失敗した場合、入力値が数値であれば、その数値をENUM IDに一致させようとします。入力データがENUM IDのみを含む場合、ENUMの解析を最適化するために設定 [input_format_tsv_enum_as_number](/operations/settings/settings-formats.md/#input_format_tsv_enum_as_number) を有効にすることを推奨します。 - -[Nested](/sql-reference/data-types/nested-data-structures/index.md) 構造の各要素は配列として表されます。 - -例えば: - -```sql -CREATE TABLE nestedt -( - `id` UInt8, - `aux` Nested( - a UInt8, - b String - ) -) -ENGINE = TinyLog -``` -```sql -INSERT INTO nestedt Values ( 1, [1], ['a']) -``` -```sql -SELECT * FROM nestedt FORMAT TSV -``` - -```response -1 [1] ['a'] -``` - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| -| [`format_tsv_null_representation`](/operations/settings/settings-formats.md/#format_tsv_null_representation) | TSVフォーマットにおけるカスタムNULL表示。 | `\N` | -| [`input_format_tsv_empty_as_default`](/operations/settings/settings-formats.md/#input_format_tsv_empty_as_default) | TSV入力の空フィールドをデフォルト値として扱います。複雑なデフォルト式には [input_format_defaults_for_omitted_fields](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) も有効にする必要があります。 | `false` | -| [`input_format_tsv_enum_as_number`](/operations/settings/settings-formats.md/#input_format_tsv_enum_as_number) | TSV形式で挿入されたENUM値をENUMインデックスとして扱います。 | `false` | -| [`input_format_tsv_use_best_effort_in_schema_inference`](/operations/settings/settings-formats.md/#input_format_tsv_use_best_effort_in_schema_inference) | TSVフォーマットでスキーマを推測するために、いくつかの調整とヒューリスティックを使用します。無効にすると、すべてのフィールドが文字列として推測されます。 | `true` | -| [`output_format_tsv_crlf_end_of_line`](/operations/settings/settings-formats.md/#output_format_tsv_crlf_end_of_line) | trueに設定されている場合、TSV出力フォーマットの行の終わりは `\r\n` となり、 `\n` にはなりません。 | `false` | -| [`input_format_tsv_crlf_end_of_line`](/operations/settings/settings-formats.md/#input_format_tsv_crlf_end_of_line) | trueに設定されている場合、TSV入力フォーマットの行の終わりは `\r\n` となり、 `\n` にはなりません。 | `false` | -| [`input_format_tsv_skip_first_lines`](/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) | データの先頭で指定した行数をスキップします。 | `0` | -| [`input_format_tsv_detect_header`](/operations/settings/settings-formats.md/#input_format_tsv_detect_header) | TSVフォーマットで名称と型を自動的に検出します。 | `true` | -| [`input_format_tsv_skip_trailing_empty_lines`](/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) | データの末尾でトレーリング空行をスキップします。 | `false` | -| [`input_format_tsv_allow_variable_number_of_columns`](/operations/settings/settings-formats.md/#input_format_tsv_allow_variable_number_of_columns) | TSVフォーマットで変則的なカラム数を許可し、余分なカラムを無視し、不足しているカラムにはデフォルト値を使用します。 | `false` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md.hash deleted file mode 100644 index b3cdf56bc78..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md.hash +++ /dev/null @@ -1 +0,0 @@ -73df08fb18ff0c5f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md deleted file mode 100644 index 82e500b1f84..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: -- 'TSVRaw' -- 'Raw' -description: 'TabSeparatedRawフォーマットのドキュメント' -input_format: true -keywords: -- 'TabSeparatedRaw' -output_format: true -slug: '/interfaces/formats/TabSeparatedRaw' -title: 'TabSeparatedRaw' ---- - - - -| Input | Output | Alias | -|-------|--------|-----------------| -| ✔ | ✔ | `TSVRaw`, `Raw` | - -## 説明 {#description} - -[`TabSeparated`](/interfaces/formats/TabSeparated) フォーマットとは異なり、行はエスケープなしで書き込まれます。 - -:::note -このフォーマットで解析する際は、各フィールドにタブや行区切りは許可されません。 -::: - -`TabSeparatedRaw` フォーマットと `RawBlob` フォーマットの比較については、[Raw Formats Comparison](../RawBLOB.md/#raw-formats-comparison)をご覧ください。 - -## 例の使用法 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md.hash deleted file mode 100644 index 46e0f664e19..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md.hash +++ /dev/null @@ -1 +0,0 @@ -d50a06b25fe44b34 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md deleted file mode 100644 index 4e3b1c53dea..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -alias: -- 'TSVRawWithNames' -- 'RawWithNames' -description: 'TabSeparatedRawWithNamesフォーマットのドキュメント' -input_format: true -keywords: -- 'TabSeparatedRawWithNames' -- 'TSVRawWithNames' -- 'RawWithNames' -output_format: true -slug: '/interfaces/formats/TabSeparatedRawWithNames' -title: 'TabSeparatedRawWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|-----------------------------------| -| ✔ | ✔ | `TSVRawWithNames`, `RawWithNames` | - -## 説明 {#description} - -[`TabSeparatedWithNames`](./TabSeparatedWithNames.md) 形式とは異なり、行がエスケープなしで書き込まれます。 - -:::note -この形式で解析する際、各フィールド内ではタブや改行が許可されていません。 -::: - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md.hash deleted file mode 100644 index 0210550f53e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -fdd3f8cdfa92d35d diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md deleted file mode 100644 index eafa09195dd..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -alias: -- 'TSVRawWithNamesAndTypes' -- 'RawWithNamesAndTypes' -description: 'TabSeparatedRawWithNamesAndTypes フォーマットのドキュメント' -input_format: true -keywords: -- 'TabSeparatedRawWithNamesAndTypes' -- 'TSVRawWithNamesAndTypes' -- 'RawWithNamesAndTypes' -output_format: true -slug: '/interfaces/formats/TabSeparatedRawWithNamesAndTypes' -title: 'TabSeparatedRawWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|---------------------------------------------------| -| ✔ | ✔ | `TSVRawWithNamesAndNames`, `RawWithNamesAndNames` | - -## 説明 {#description} - -[`TabSeparatedWithNamesAndTypes`](./TabSeparatedWithNamesAndTypes.md) 形式とは異なり、 -行はエスケープなしで書き込まれます。 - -:::note -この形式で解析する際には、各フィールドにタブや改行を含めることはできません。 -::: - -## 例の利用法 {#example-usage} - -## 形式の設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md.hash deleted file mode 100644 index ac03612c700..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -62eba132b8afd242 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md deleted file mode 100644 index a15566e51c9..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -alias: -- 'TSVWithNames' -description: 'TabSeparatedWithNames フォーマットのドキュメント' -input_format: true -keywords: -- 'TabSeparatedWithNames' -output_format: true -slug: '/interfaces/formats/TabSeparatedWithNames' -title: 'TabSeparatedWithNames' ---- - - - -| Input | Output | Alias | -|-------|--------|--------------------------------| -| ✔ | ✔ | `TSVWithNames`, `RawWithNames` | - -## 説明 {#description} - -[`TabSeparated`](./TabSeparated.md) 形式と異なり、最初の行にカラム名が書かれています。 - -解析中、最初の行にはカラム名が含まれていることが期待されます。カラム名を使用して、その位置を特定し、正しさを確認できます。 - -:::note -[`input_format_with_names_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_names_use_header) 設定が `1` に設定されている場合、 -入力データのカラムはその名前によってテーブルのカラムにマッピングされます。未知の名前のカラムは、[`input_format_skip_unknown_fields`](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 設定が `1` に設定されている場合はスキップされます。 -そうでなければ、最初の行はスキップされます。 -::: - -## 使用例 {#example-usage} - -## 形式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md.hash deleted file mode 100644 index 1316c3797a5..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md.hash +++ /dev/null @@ -1 +0,0 @@ -79eaea90369eda3f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md deleted file mode 100644 index b76a1e85c39..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -description: 'TabSeparatedWithNamesAndTypes形式のドキュメント' -keywords: -- 'TabSeparatedWithNamesAndTypes' -slug: '/interfaces/formats/TabSeparatedWithNamesAndTypes' -title: 'TabSeparatedWithNamesAndTypes' ---- - - - -| Input | Output | Alias | -|-------|--------|------------------------------------------------| -| ✔ | ✔ | `TSVWithNamesAndTypes`, `RawWithNamesAndTypes` | - -## 説明 {#description} - -`TabSeparated`([`TabSeparated`](./TabSeparated.md))フォーマットとは異なり、カラム名が最初の行に書かれ、カラムタイプが二行目に記載されます。 - -:::note -- 設定 [`input_format_with_names_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_names_use_header) が `1` に設定されている場合、 -入力データのカラムは名前によってテーブルのカラムにマッピングされます。未知の名前のカラムは、設定 [`input_format_skip_unknown_fields`](../../../operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が 1 に設定されている場合はスキップされます。 -そうでなければ、最初の行はスキップされます。 -- 設定 [`input_format_with_types_use_header`](../../../operations/settings/settings-formats.md/#input_format_with_types_use_header) が `1` に設定されている場合、 -入力データのタイプはテーブルの対応するカラムのタイプと比較されます。そうでなければ、二行目はスキップされます。 -::: - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md.hash deleted file mode 100644 index e14074727e8..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md.hash +++ /dev/null @@ -1 +0,0 @@ -d33779e2156773cd diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md deleted file mode 100644 index 8299f1d4a98..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -alias: [] -description: 'Template フォーマットのドキュメント' -input_format: true -keywords: -- 'Template' -output_format: true -slug: '/interfaces/formats/Template' -title: 'テンプレート' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -他の標準形式が提供するよりも多くのカスタマイズが必要な場合、`Template`形式ではユーザーが値のプレースホルダーを含むカスタムフォーマット文字列を指定し、データに対するエスケープルールを定義できます。 - -以下の設定を使用します: - -| 設定 | 説明 | -|----------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------| -| [`format_template_row`](#format_template_row) | 行のフォーマット文字列を含むファイルへのパスを指定します。 | -| [`format_template_resultset`](#format_template_resultset) | 行のフォーマット文字列を含むファイルへのパスを指定します。 | -| [`format_template_rows_between_delimiter`](#format_template_rows_between_delimiter) | 行間の区切り文字を指定します。最後の行を除くすべての行の後に印刷(または期待)されます(デフォルトは`\n`)。 | -| `format_template_row_format` | 行のフォーマット文字列を指定します [インライン](#inline_specification)。 | -| `format_template_resultset_format` | 結果セットのフォーマット文字列を指定します [インライン](#inline_specification)。 | -| 他の形式の一部の設定(例:`output_format_json_quote_64bit_integers`を使用する場合の`JSON`エスケープ) | | - -## 設定とエスケープルール {#settings-and-escaping-rules} - -### format_template_row {#format_template_row} - -設定`format_template_row`は、以下の構文を持つ行のフォーマット文字列を含むファイルへのパスを指定します: - -```text -delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N -``` - -ここで: - -| 構文の一部 | 説明 | -|---------------|-------------------------------------------------------------------------------------------------------------| -| `delimiter_i` | 値の間の区切り文字(`$`記号は`$$`としてエスケープできます) | -| `column_i` | 選択または挿入する値のカラムの名前またはインデックス(空の場合はカラムはスキップされます) | -| `serializeAs_i` | カラム値に対するエスケープルール。 | - -以下のエスケープルールがサポートされています: - -| エスケープルール | 説明 | -|-------------------------|--------------------------------------| -| `CSV`, `JSON`, `XML` | 同名の形式に類似 | -| `Escaped` | `TSV`に類似 | -| `Quoted` | `Values`に類似 | -| `Raw` | エスケープなし、`TSVRaw`に類似 | -| `None` | エスケープルールなし - 以下の注意を参照 | - -:::note -エスケープルールが省略された場合、`None`が使用されます。`XML`は出力のみに適しています。 -::: - -例を見てみましょう。以下のフォーマット文字列が与えられたとします: - -```text -Search phrase: ${s:Quoted}, count: ${c:Escaped}, ad price: $$${p:JSON}; -``` - -この場合、以下の値が印刷されます(`SELECT`を使用する場合)または期待されます(`INPUT`を使用する場合)、それぞれ区切り文字`Search phrase:`, `, count:`, `, ad price: $`および`;`の間に: - -- `s`(エスケープルール`Quoted`を使用) -- `c`(エスケープルール`Escaped`を使用) -- `p`(エスケープルール`JSON`を使用) - -例えば: - -- `INSERT`の場合、以下の行は期待されるテンプレートに一致し、`Search phrase`, `count`, `ad price`の各カラムに`bathroom interior design`, `2166`, `$3`の値を読み込みます。 -- `SELECT`の場合、以下の行が出力されます。これは、`bathroom interior design`, `2166`, `$3`の値がすでにテーブルの`Search phrase`, `count`, `ad price`の各カラムに格納されていると仮定しています。 - -```yaml -Search phrase: 'bathroom interior design', count: 2166, ad price: $3; -``` - -### format_template_rows_between_delimiter {#format_template_rows_between_delimiter} - -設定`format_template_rows_between_delimiter`は、行間の区切り文字を指定します。これは、最後の行を除くすべての行の後に印刷(または期待)されます(デフォルトは`\n`)。 - -### format_template_resultset {#format_template_resultset} - -設定`format_template_resultset`は、結果セットのフォーマット文字列を含むファイルへのパスを指定します。 - -結果セットのフォーマット文字列は行のフォーマット文字列と同じ構文を持っています。 -プレフィックス、サフィックス、追加情報を印刷する方法を指定することができ、以下のプレースホルダーを含みます: - -- `data`は、`format_template_row`形式のデータを持つ行で、`format_template_rows_between_delimiter`で区切られています。このプレースホルダーはフォーマット文字列内で最初のプレースホルダーである必要があります。 -- `totals`は、`format_template_row`形式の合計値を持つ行です(`WITH TOTALS`を使用する場合)。 -- `min`は、最小値を持つ行で、`format_template_row`形式です(極端な値が1に設定されている場合)。 -- `max`は、最大値を持つ行で、`format_template_row`形式です(極端な値が1に設定されている場合)。 -- `rows`は、出力行の総数です。 -- `rows_before_limit`は、LIMITなしで存在したであろう最小行数です。クエリがLIMITを含む場合のみ出力されます。クエリがGROUP BYを含む場合、`rows_before_limit_at_least`は、LIMITなしで存在した正確な行数です。 -- `time`は、リクエストの実行時間(秒単位)です。 -- `rows_read`は、読み込まれた行の数です。 -- `bytes_read`は、読み込まれたバイト数(非圧縮)です。 - -プレースホルダー`data`,`totals`, `min`および`max`には、エスケープルールが指定されてはならない(または`None`が明示的に指定されなければならない)。残りのプレースホルダーには、任意のエスケープルールを指定できます。 - -:::note -`format_template_resultset`の設定が空文字列の場合、`${data}`がデフォルト値として使用されます。 -::: - -挿入クエリでは、フォーマットに従って列やフィールドをスキップできます(プレフィックスまたはサフィックスを参照)。 - -### インライン指定 {#inline_specification} - -フォーマット設定(`format_template_row`, `format_template_resultset`で設定された内容)をクラスタ内のすべてのノードにディレクトリとして展開するのが困難または不可能な場合があります。 -さらに、フォーマットが非常に単純であり、ファイルに配置する必要がないこともあります。 - -このような場合には、`format_template_row_format`(`format_template_row`用)および`format_template_resultset_format`(`format_template_resultset`用)を使用して、クエリ内で直接テンプレート文字列を設定できます。 -ファイルへのパスではなく。 - -:::note -フォーマット文字列とエスケープシーケンスに関するルールは、次の場合と同じです: -- `format_template_row_format`を使用する際の[`format_template_row`](#format_template_row)。 -- `format_template_resultset_format`を使用する際の[`format_template_resultset`](#format_template_resultset)。 -::: - -## 使用例 {#example-usage} - -`Template`形式を使用する2つの例を見てみましょう。まずはデータを選択する場合、次にデータを挿入する場合です。 - -### データの選択 {#selecting-data} - -```sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS -format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' -``` - -```text title="/some/path/resultset.format" - - Search phrases - - - - ${data} -
    Search phrases
    Search phrase Count
    - - ${max} -
    Max
    - Processed ${rows_read:XML} rows in ${time:XML} sec - - -``` - -```text title="/some/path/row.format" - ${0:XML} ${1:XML} -``` - -結果: - -```html - - Search phrases - - - - - - - - -
    Search phrases
    Search phrase Count
    8267016
    bathroom interior design 2166
    clickhouse 1655
    spring 2014 fashion 1549
    freeform photos 1480
    - - -
    Max
    8873898
    - Processed 3095973 rows in 0.1569913 sec - - -``` - -### データの挿入 {#inserting-data} - -```text -Some header -Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 -Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 -Total rows: 2 -``` - -```sql -INSERT INTO UserActivity SETTINGS -format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' -FORMAT Template -``` - -```text title="/some/path/resultset.format" -Some header\n${data}\nTotal rows: ${:CSV}\n -``` - -```text title="/some/path/row.format" -Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} -``` - -`PageViews`, `UserID`, `Duration`および`Sign`はカラム名としてプレースホルダー内にあります。行の`Useless field`の後とサフィックスの`\nTotal rows:`の後の値は無視されます。 -入力データ内のすべての区切り文字は、指定されたフォーマット文字列内の区切り文字と厳密に一致する必要があります。 - -### インライン指定 {#in-line-specification} - -手動でMarkdownテーブルをフォーマットするのに疲れましたか? この例では、`Template`形式とインライン指定設定を使用して、`system.formats`テーブルからいくつかのClickHouse形式の名前を`SELECT`し、Markdownテーブルとしてフォーマットするという簡単な作業を達成する方法を見てみましょう。これは、`Template`形式と設定`format_template_row_format`および`format_template_resultset_format`を使用することで簡単に実現できます。 - -前の例では、結果セットと行のフォーマット文字列を別々のファイルで指定し、それらのファイルへのパスをそれぞれ`format_template_resultset`および`format_template_row`設定で指定しました。ここではインラインで指定します。なぜなら、テンプレートが非常に単純であり、わずかに`|`と`-`を使ってMarkdownテーブルを作るだけだからです。設定`format_template_resultset_format`を使用して結果セットテンプレート文字列を指定します。テーブルヘッダーを作るために、`${data}`の前に`|ClickHouse Formats|\n|---|\n`を追加しました。行のテンプレート文字列に対して``|`${0:XML}`|``を指定するために、`format_template_row_format`設定を使います。`Template`形式は、与えられたフォーマットで行をプレースホルダー`${data}`に挿入します。この例ではカラムが1つのみですが、追加したい場合は `{1:XML}`, `{2:XML}`...などを行のテンプレート文字列に追加し、適切なエスケープルールを選択すればよいのです。この例では、エスケープルールを`XML`にしました。 - -```sql title="クエリ" -WITH formats AS -( - SELECT * FROM system.formats - ORDER BY rand() - LIMIT 5 -) -SELECT * FROM formats -FORMAT Template -SETTINGS - format_template_row_format='|`${0:XML}`|', - format_template_resultset_format='|ClickHouse Formats|\n|---|\n${data}\n' -``` - -見てください! これでMarkdownテーブルを作るために手動でたくさんの`|`や`-`を追加する手間が省けました: - -```response title="レスポンス" -|ClickHouse Formats| -|---| -|`BSONEachRow`| -|`CustomSeparatedWithNames`| -|`Prometheus`| -|`DWARF`| -|`Avro`| -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md.hash deleted file mode 100644 index b2e9f4d8097..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md.hash +++ /dev/null @@ -1 +0,0 @@ -c38b84fc3b2cd22f diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md deleted file mode 100644 index 460b4c511b7..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -alias: [] -description: 'TemplateIgnoreSpaces フォーマットのドキュメンテーション' -input_format: true -keywords: -- 'TemplateIgnoreSpaces' -output_format: false -slug: '/interfaces/formats/TemplateIgnoreSpaces' -title: 'TemplateIgnoreSpaces' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✗ | | - -## 説明 {#description} - -[`Template`] に似ていますが、入力ストリーム内の区切り文字と値の間のホワイトスペースをスキップします。ただし、フォーマット文字列にホワイトスペース文字が含まれている場合は、これらの文字が入力ストリームに存在することが期待されます。また、空のプレースホルダー(`${}` または `${:None}`)を指定して、いくつかの区切り文字を別々の部分に分割し、それらの間のスペースを無視させることもできます。これらのプレースホルダーはホワイトスペース文字をスキップするためのみに使用されます。すべての行でカラムの値の順序が同じであれば、このフォーマットを使用して `JSON` を読み込むことも可能です。 - -:::note -このフォーマットは入力にのみ適しています。 -::: - -## 使用例 {#example-usage} - -次のリクエストは、フォーマット [JSON](/interfaces/formats/JSON) の出力例からデータを挿入するために使用できます: - -```sql -INSERT INTO table_name -SETTINGS - format_template_resultset = '/some/path/resultset.format', - format_template_row = '/some/path/row.format', - format_template_rows_between_delimiter = ',' -FORMAT TemplateIgnoreSpaces -``` - -```text title="/some/path/resultset.format" -{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} -``` - -```text title="/some/path/row.format" -{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} -``` - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md.hash deleted file mode 100644 index c58662ac29d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md.hash +++ /dev/null @@ -1 +0,0 @@ -88fe04293047c640 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md deleted file mode 100644 index a0a0aebdecc..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -alias: [] -description: '値の形式のドキュメント' -input_format: true -keywords: -- 'Values' -output_format: true -slug: '/interfaces/formats/Values' -title: 'Values' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✔ | ✔ | | - -## 説明 {#description} - -`Values` フォーマットは、各行をカッコ内に表示します。 - -- 行はカンマで区切られ、最後の行の後にはカンマが付きません。 -- カッコ内の値もカンマで区切られます。 -- 数値は引用符なしで小数形式で出力されます。 -- 配列は角括弧内に出力されます。 -- 文字列、日付、及び時間付きの日付は引用符内に出力されます。 -- エスケープルールと解析は [TabSeparated](TabSeparated/TabSeparated.md) フォーマットに似ています。 - -フォーマット中は余分なスペースは挿入されませんが、解析中は許可され、スキップされます(配列の値内のスペースは許可されていません)。 -[`NULL`](/sql-reference/syntax.md) は `NULL` として表されます。 - -`Values` フォーマットでデータを渡す際にエスケープする必要がある最低限の文字セットは次の通りです: -- シングルクォート -- バックスラッシュ - -これは `INSERT INTO t VALUES ...` で使用されるフォーマットですが、クエリ結果のフォーマットにも使用できます。 - -## 使用例 {#example-usage} - -## フォーマット設定 {#format-settings} - -| 設定 | 説明 | デフォルト | -|---------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| -| [`input_format_values_interpret_expressions`](../../operations/settings/settings-formats.md/#input_format_values_interpret_expressions) | フィールドがストリーミングパーサーによって解析できなかった場合、SQLパーサーを実行し、SQL式として解釈を試みます。 | `true` | -| [`input_format_values_deduce_templates_of_expressions`](../../operations/settings/settings-formats.md/#input_format_values_deduce_templates_of_expressions) | フィールドがストリーミングパーサーによって解析できなかった場合、SQLパーサーを実行し、SQL式のテンプレートを推測し、そのテンプレートを使用してすべての行を解析し、その後すべての行の式を解釈しようとします。 | `true` | -| [`input_format_values_accurate_types_of_literals`](../../operations/settings/settings-formats.md/#input_format_values_accurate_types_of_literals) | テンプレートを使用して式を解析および解釈する際に、リテラルの実際の型を確認して、オーバーフローや精度の問題を避けます。 | `true` | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md.hash deleted file mode 100644 index 820ef73972e..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Values.md.hash +++ /dev/null @@ -1 +0,0 @@ -e26c3713404d1916 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md deleted file mode 100644 index e1137b9e7d0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -alias: [] -description: 'Vertical formatのドキュメント' -input_format: false -keywords: -- 'Vertical' -output_format: true -slug: '/interfaces/formats/Vertical' -title: 'Vertical' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -指定したカラム名で各値を別々の行に出力します。この形式は、各行が大量のカラムで構成されている場合に、一つまたは少数の行を印刷するのに便利です。 -[`NULL`](/sql-reference/syntax.md)は`ᴺᵁᴸᴸ`として出力されます。 - -## 使用例 {#example-usage} - -例: - -```sql -SELECT * FROM t_null FORMAT Vertical -``` - -```response -Row 1: -────── -x: 1 -y: ᴺᵁᴸᴸ -``` - -Vertical形式では行はエスケープされません: - -```sql -SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical -``` - -```response -Row 1: -────── -test: string with 'quotes' and with some special - characters -``` - -この形式はクエリ結果の出力にのみ適しており、データをテーブルに挿入するための解析には適していません。 - -## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md.hash deleted file mode 100644 index dc2d447237b..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md.hash +++ /dev/null @@ -1 +0,0 @@ -077775393295b7e3 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md deleted file mode 100644 index 32c5b762f5d..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -alias: [] -description: 'XML形式のドキュメント' -input_format: false -keywords: -- 'XML' -output_format: true -slug: '/interfaces/formats/XML' -title: 'XML' ---- - - - -| Input | Output | Alias | -|-------|--------|-------| -| ✗ | ✔ | | - -## 説明 {#description} - -`XML` フォーマットは出力専用であり、解析には適していません。 - -カラム名が許可されているフォーマットでない場合、要素名として 'field' が使用されます。一般的に、XML 構造は JSON 構造に従います。 -JSON と同様に、無効な UTF-8 シーケンスは置換文字 `�` に変更されるため、出力テキストは有効な UTF-8 シーケンスで構成されます。 - -文字列値では、文字 `<` と `&` はそれぞれ `<` と `&` にエスケープされます。 - -配列は `HelloWorld...` として出力され、タプルは `HelloWorld...` として出力されます。 - -## 使用例 {#example-usage} - -例: - -```xml - - - - - - SearchPhrase - String - - - count() - UInt64 - - - - - - - 8267016 - - - 浴室のインテリアデザイン - 2166 - - - clickhouse - 1655 - - - 2014年春のファッション - 1549 - - - 自由形式の写真 - 1480 - - - アンジェリーナ・ジョリー - 1245 - - - オムスク - 1112 - - - 犬種の写真 - 1091 - - - カーテンデザイン - 1064 - - - バクー - 1000 - - - 10 - 141137 - -``` - -## フォーマット設定 {#format-settings} - -## XML {#xml} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md.hash deleted file mode 100644 index d8fbd419529..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md.hash +++ /dev/null @@ -1 +0,0 @@ -14d811ee2726c910 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md deleted file mode 100644 index 751b16df058..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: 'ClickHouse における gRPC インターフェースのドキュメント' -sidebar_label: 'gRPC インターフェース' -sidebar_position: 25 -slug: '/interfaces/grpc' -title: 'gRPC Interface' ---- - - - - -# gRPC インターフェース - -## 概要 {#grpc-interface-introduction} - -ClickHouseは[gRPC](https://grpc.io/)インターフェースをサポートしています。これは、HTTP/2と[Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers)を使用するオープンソースのリモートプロシージャコールシステムです。ClickHouseにおけるgRPCの実装は次の機能をサポートしています: - -- SSL; -- 認証; -- セッション; -- 圧縮; -- 同じチャネルを通じた並列クエリ; -- クエリのキャンセル; -- 進捗とログの取得; -- 外部テーブル。 - -インターフェースの仕様は[clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto)に記載されています。 - -## gRPC 設定 {#grpc-interface-configuration} - -gRPCインターフェースを使用するには、主要な[サーバー設定](../operations/configuration-files.md)で`grpc_port`を設定します。その他の設定オプションは以下の例を参照してください: - -```xml -9100 - - false - - - /path/to/ssl_cert_file - /path/to/ssl_key_file - - - false - - - /path/to/ssl_ca_cert_file - - - deflate - - - medium - - - -1 - -1 - - - false - -``` - -## 組み込みクライアント {#grpc-client} - -提供された[仕様](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto)を使用して、gRPCがサポートする任意のプログラミング言語でクライアントを作成できます。また、組み込みのPythonクライアントを使用することもできます。これはリポジトリの[utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py)に配置されています。組み込みクライアントは[ggrpcioとgrpcio-tools](https://grpc.io/docs/languages/python/quickstart)のPythonモジュールを必要とします。 - -クライアントは以下の引数をサポートしています: - -- `--help` – ヘルプメッセージを表示して終了します。 -- `--host HOST, -h HOST` – サーバー名。デフォルト値:`localhost`。IPv4またはIPv6アドレスも使用できます。 -- `--port PORT` – 接続するポート。このポートはClickHouseサーバー設定で有効である必要があります(`grpc_port`を参照)。デフォルト値:`9100`。 -- `--user USER_NAME, -u USER_NAME` – ユーザー名。デフォルト値:`default`。 -- `--password PASSWORD` – パスワード。デフォルト値:空文字列。 -- `--query QUERY, -q QUERY` – 非対話モードで処理するクエリ。 -- `--database DATABASE, -d DATABASE` – デフォルトのデータベース。指定されていない場合、サーバー設定で設定された現在のデータベースが使用されます(デフォルトは`default`)。 -- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – 結果出力の[フォーマット](formats.md)。対話モードのデフォルト値:`PrettyCompact`。 -- `--debug` – デバッグ情報を表示することを有効にします。 - -対話モードでクライアントを実行するには、`--query`引数なしで呼び出します。 - -バッチモードでは、データを`stdin`を介して渡すことができます。 - -**クライアント使用例** - -以下の例では、テーブルが作成され、CSVファイルからデータがロードされます。その後、テーブルの内容がクエリされます。 - -```bash -./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;" -echo -e "0,Input data for\n1,gRPC protocol example" > a.csv -cat a.csv | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV" - -./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;" -``` - -結果: - -```text -┌─id─┬─text──────────────────┐ -│ 0 │ Input data for │ -│ 1 │ gRPC protocol example │ -└────┴───────────────────────┘ -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md.hash deleted file mode 100644 index 12703213824..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md.hash +++ /dev/null @@ -1 +0,0 @@ -b8ed80b33d6dccec diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md deleted file mode 100644 index a0092b87ee0..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md +++ /dev/null @@ -1,1007 +0,0 @@ ---- -description: 'ClickHouse の HTTP インターフェースに関するドキュメントで、任意のプラットフォームやプログラミング言語から ClickHouse - への REST API アクセスを提供します' -sidebar_label: 'HTTP インターフェース' -sidebar_position: 15 -slug: '/interfaces/http' -title: 'HTTP Interface' ---- - -import PlayUI from '@site/static/images/play.png'; -import Image from '@theme/IdealImage'; - - - -# HTTPインターフェース -## 前提条件 {#prerequisites} - -この記事の例では、次のものが必要です: -- 稼働中のClickHouseサーバーインスタンス -- `curl`がインストールされていること。UbuntuやDebianの場合、`sudo apt install curl`を実行するか、この[ドキュメント](https://curl.se/download.html)を参照してインストール手順を確認してください。 -## 概要 {#overview} - -HTTPインターフェースを使用すると、REST APIの形式で任意のプラットフォームから任意のプログラミング言語でClickHouseを利用できます。HTTPインターフェースはネイティブインターフェースよりも機能が制限されていますが、より良い言語サポートがあります。 - -デフォルトでは、`clickhouse-server`は次のポートでリッスンしています: -- HTTP用のポート8123 -- HTTPS用のポート8443が有効にできます - -パラメータなしで`GET /`リクエストを行うと、ステータスコード200が返され、文字列 "Ok." が付随します。 - -```bash -$ curl 'http://localhost:8123/' -Ok. -``` - -"Ok."は[`http_server_default_response`](../operations/server-configuration-parameters/settings.md#http_server_default_response)で定義されたデフォルト値であり、変更することができます。 - -また、[HTTPレスポンスコードの注意事項](#http_response_codes_caveats)も参照してください。 -## Webユーザーインターフェース {#web-ui} - -ClickHouseにはウェブユーザーインターフェースが含まれており、次のアドレスからアクセスできます: - -```text -http://localhost:8123/play -``` - -ウェブUIは、クエリの実行時の進捗表示、クエリのキャンセル、および結果のストリーミングをサポートしています。 -クエリパイプラインのグラフやチャートを表示する秘密の機能があります。 - -ウェブUIは、あなたのような専門家のために設計されています。 - -ClickHouse Web UIのスクリーンショット - -ヘルスチェックスクリプトでは、`GET /ping`リクエストを使用します。このハンドラーは常に "Ok."(最後に改行あり)を返します。バージョン18.12.13以降で利用可能です。レプリカの遅延を確認するために、`/replicas_status`も参照してください。 - -```bash -$ curl 'http://localhost:8123/ping' -Ok. -$ curl 'http://localhost:8123/replicas_status' -Ok. -``` -## HTTP/HTTPS経由でのクエリ実行 {#querying} - -HTTP/HTTPS経由でクエリを実行するには、次の3つのオプションがあります: -- リクエストをURLの 'query' パラメータとして送信 -- POSTメソッドを使用 -- クエリの最初の部分を 'query' パラメータに、残りをPOSTで送信 - -:::note -デフォルトで、URLのサイズは1 MiBに制限されています。これは`http_max_uri_size`設定で変更できます。 -::: - -成功した場合、ステータスコード200とレスポンスボディに結果が返されます。 -エラーが発生した場合、ステータスコード500とレスポンスボディにエラーの説明テキストが返されます。 - -GETメソッドを使用したリクエストは「読み取り専用」です。これは、データを変更するクエリにはPOSTメソッドのみを使用できることを意味します。 -クエリ自体をPOSTボディに送信することも、URLパラメータで送信することもできます。以下にいくつかの例を示します。 - -以下の例では、`SELECT 1`クエリを送信するためにcurlが使用されています。スペースはURLエンコードされた形式であることに注意してください:`%20`。 - -```bash title="command" -curl 'http://localhost:8123/?query=SELECT%201' -``` - -```response title="Response" -1 -``` - -この例では、wgetが`-nv`(非冗長)および`-O-`パラメータを使用して結果をターミナルに出力しています。 -この場合、スペースをURLエンコードする必要はありません: - -```bash title="command" -wget -nv -O- 'http://localhost:8123/?query=SELECT 1' -``` - -```response -1 -``` - -この例では、生のHTTPリクエストをnetcatにパイプしています: - -```bash title="command" -echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 -``` - -```response title="response" -HTTP/1.0 200 OK -Date: Wed, 27 Nov 2019 10:30:18 GMT -Connection: Close -Content-Type: text/tab-separated-values; charset=UTF-8 -X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal -X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f -X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} - -1 -``` - -ご覧の通り、`curl`コマンドは、スペースをURLでエスケープする必要があるため、やや不便です。 -`wget`はすべてを自動的にエスケープしますが、HTTP 1.1においてkeep-aliveやTransfer-Encoding: chunkedを使用する場合にうまく機能しないため、使用は推奨しません。 - -```bash -$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- -1 - -$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @- -1 - -$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- -1 -``` - -クエリの一部がパラメータで送信され、一部がPOSTで送信される場合、これら二つのデータ部分の間に改行が挿入されます。 -例えば、これは機能しません: - -```bash -$ echo 'ECT 1' | curl 'http://localhost:8123/?query=SEL' --data-binary @- -Code: 59, e.displayText() = DB::Exception: Syntax error: failed at position 0: SEL -ECT 1 -, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception -``` - -デフォルトでは、データは[`TabSeparated`](formats.md#tabseparated)形式で返されます。 - -`FORMAT`句をクエリに使用して、他のフォーマットを要求できます。例えば: - -```bash title="command" -wget -nv -O- 'http://localhost:8123/?query=SELECT 1, 2, 3 FORMAT JSON' -``` - -```response title="Response" -{ - "meta": - [ - { - "name": "1", - "type": "UInt8" - }, - { - "name": "2", - "type": "UInt8" - }, - { - "name": "3", - "type": "UInt8" - } - ], - - "data": - [ - { - "1": 1, - "2": 2, - "3": 3 - } - ], - - "rows": 1, - - "statistics": - { - "elapsed": 0.000515, - "rows_read": 1, - "bytes_read": 1 - } -} -``` - -`default_format` URLパラメータまたは`X-ClickHouse-Format`ヘッダーを使用して、`TabSeparated`以外のデフォルトフォーマットを指定できます。 - -```bash -$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- -┏━━━┓ -┃ 1 ┃ -┡━━━┩ -│ 1 │ -└───┘ -``` -## HTTP/HTTPS経由での挿入クエリ {#insert-queries} - -データを転送するのに`POST`メソッドが必要です。この場合、クエリの最初の部分をURLパラメータに記述し、データを送信するのにPOSTを使用します。挿入するデータは、例えばMySQLのタブ区切りダンプであることがあります。このようにして、`INSERT`クエリはMySQLの`LOAD DATA LOCAL INFILE`を置き換えます。 -### 例 {#examples} - -テーブルを作成するには: - -```bash -$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- -``` - -データ挿入のために馴染みのある`INSERT`クエリを使用するには: - -```bash -$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- -``` - -クエリとは別にデータを送信するには: - -```bash -$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- -``` - -任意のデータフォーマットを指定できます。例えば、`INSERT INTO t VALUES`を書くときと同じフォーマットである'Values'フォーマットを指定できます: - -```bash -$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- -``` - -タブ区切りダンプからデータを挿入するには、対応するフォーマットを指定します: - -```bash -$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- -``` - -テーブルの内容を読み取るには: - -```bash -$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' -7 -8 -9 -10 -11 -12 -1 -2 -3 -4 -5 -6 -``` - -:::note -並行クエリ処理のため、データはランダムな順序で出力されます -::: - -テーブルを削除するには: - -```bash -$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- -``` - -データテーブルを返さない成功リクエストの場合、空のレスポンスボディが返されます。 -## 圧縮 {#compression} - -圧縮は、大量のデータを転送する際にネットワークトラフィックを削減するためや、一時的に圧縮されたダンプを作成するために使用できます。 - -データを転送する際に内部ClickHouse圧縮フォーマットを使用できます。圧縮されたデータは非標準フォーマットであり、`clickhouse-compressor`プログラムを使用して取り扱う必要があります。これはデフォルトで`clickhouse-client`パッケージにインストールされています。 - -データ挿入の効率を高めるために、[`http_native_compression_disable_checksumming_on_decompress`](../operations/settings/settings.md#http_native_compression_disable_checksumming_on_decompress)設定を使用して、サーバー側のチェックサム検証を無効にします。 - -URLに `compress=1` を指定すると、サーバーは送信するデータを圧縮します。URLに `decompress=1` を指定すると、サーバーは`POST`メソッドで渡されたデータを解凍します。 - -[HTTP圧縮](https://en.wikipedia.org/wiki/HTTP_compression)を使用することもできます。ClickHouseは次の[圧縮方式](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens)をサポートしています: - -- `gzip` -- `br` -- `deflate` -- `xz` -- `zstd` -- `lz4` -- `bz2` -- `snappy` - -圧縮された`POST`リクエストを送信するには、リクエストヘッダー`Content-Encoding: compression_method`を追加します。 - -ClickHouseがレスポンスを圧縮するためには、[`enable_http_compression`](../operations/settings/settings.md#enable_http_compression)設定を有効にし、リクエストに`Accept-Encoding: compression_method`ヘッダーを追加します。 - -データ圧縮レベルは、[`http_zlib_compression_level`](../operations/settings/settings.md#http_zlib_compression_level)設定を使用してすべての圧縮方法に対して設定できます。 - -:::info -一部のHTTPクライアントは、デフォルトでサーバーからのデータを解凍する可能性があり(`gzip`と`deflate`で)、圧縮設定を正しく使用している場合でも解凍されたデータが返されることがあります。 -::: -## 例 {#examples-compression} - -サーバーに圧縮データを送信するには: - -```bash -echo "SELECT 1" | gzip -c | \ -curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/' -``` - -サーバーから圧縮データアーカイブを受信するには: - -```bash -curl -vsS "http://localhost:8123/?enable_http_compression=1" \ --H 'Accept-Encoding: gzip' --output result.gz -d 'SELECT number FROM system.numbers LIMIT 3' - -zcat result.gz -0 -1 -2 -``` - -サーバーから圧縮データを受信し、gunzipを使用して解凍データを受信するには: - -```bash -curl -sS "http://localhost:8123/?enable_http_compression=1" \ --H 'Accept-Encoding: gzip' -d 'SELECT number FROM system.numbers LIMIT 3' | gunzip - -0 -1 -2 -``` -## デフォルトデータベース {#default-database} - -`database` URLパラメータまたは `X-ClickHouse-Database` ヘッダーを使用して、デフォルトデータベースを指定できます。 - -```bash -echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -``` - -デフォルトでは、サーバー設定に登録されているデータベースがデフォルトデータベースとして使用されます。初期状態では、これは`default`という名前のデータベースです。あるいは、常にテーブル名の前にドットを付けてデータベースを指定できます。 -## 認証 {#authentication} - -ユーザー名とパスワードは、次の3つの方法のいずれかで指定できます: - -1. HTTP基本認証を使用。 - -例えば: - -```bash -echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- -``` - -2. `user`および`password` URLパラメータに指定 - -:::warning -この方法は、パラメータがWebプロキシによってログに記録され、ブラウザにキャッシュされる可能性があるため、推奨しません。 -::: - -例えば: - -```bash -echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- -``` - -3. 'X-ClickHouse-User'および'X-ClickHouse-Key'ヘッダーを使用 - -例えば: - -```bash -echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @- -``` - -ユーザー名が指定されていない場合は、`default`名が使用されます。パスワードが指定されていない場合は、空のパスワードが使用されます。 -クエリの処理に対して、任意の設定を指定するためにURLパラメータを使用することもできます。 - -例えば: - -```text -http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1 -``` - -```bash -$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -``` - -詳細については、次を参照してください: -- [設定](/operations/settings/settings) -- [SET](/sql-reference/statements/set) -## HTTPプロトコルでのClickHouseセッションの利用 {#using-clickhouse-sessions-in-the-http-protocol} - -HTTPプロトコルでClickHouseセッションを使用することもできます。そのためには、リクエストに`session_id` `GET`パラメータを追加する必要があります。セッションIDには任意の文字列を使用できます。 - -デフォルトでは、セッションは60秒の非アクティブ状態で終了します。このタイムアウト(秒単位)を変更するには、サーバー設定で`default_session_timeout`の設定を変更するか、リクエストに`session_timeout` `GET`パラメータを追加します。 - -セッションの状態を確認するには、`session_check=1`パラメータを使用します。1つのセッション内で同時に実行できるクエリは1つだけです。 - -クエリの進捗に関する情報は、`X-ClickHouse-Progress`レスポンスヘッダーで受け取ることができます。これを行うには、[`send_progress_in_http_headers`](../operations/settings/settings.md#send_progress_in_http_headers)を有効にします。 - -以下は、ヘッダーのシーケンスの例です: - -```text -X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","elapsed_ns":"662334"} -X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128","elapsed_ns":"992334"} -X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128","elapsed_ns":"1232334"} -``` - -可能なヘッダーフィールドは次の通りです: - -| ヘッダーフィールド | 説明 | -|---------------------------|---------------------------| -| `read_rows` | 読まれた行の数。 | -| `read_bytes` | 読まれたデータのサイズ(バイト)。 | -| `total_rows_to_read` | 読み取る必要のある行の合計数。| -| `written_rows` | 書き込まれた行の数。 | -| `written_bytes` | 書き込まれたデータのサイズ(バイト)。 | - -HTTP接続が失われても、リクエストは自動的に停止しません。パースとデータフォーマットはサーバー側で行われ、ネットワークを利用することが非効率的な場合があります。 - -以下のオプションパラメータがあります: - -| パラメータ | 説明 | -|---------------------------|---------------------------------------------------| -| `query_id`(オプション) | クエリIDとして渡すことができます(任意の文字列)。[`replace_running_query`](/operations/settings/settings#replace_running_query)| -| `quota_key`(オプション)| クオータキーとして渡すことができます(任意の文字列)。["クオータ"](/operations/quotas) | - -HTTPインターフェースを介して、クエリのための外部データ(外部一時テーブル)を渡すことができます。詳細は、["クエリ処理のための外部データ"](/engines/table-engines/special/external-data)を参照してください。 -## レスポンスバッファリング {#response-buffering} - -レスポンスバッファリングはサーバー側で有効にできます。次のURLパラメータが提供されています: -- `buffer_size` -- `wait_end_of_query` - -次の設定が使用できます: -- [`http_response_buffer_size`](/operations/settings/settings#http_response_buffer_size) -- [`http_wait_end_of_query`](/operations/settings/settings#http_wait_end_of_query) - -`buffer_size`は、サーバーメモリにバッファとして保存する結果のバイト数を決定します。結果ボディがこの閾値を超える場合、バッファはHTTPチャネルに書き込まれ、残りのデータがHTTPチャネルに直接送信されます。 - -全体のレスポンスがバッファリングされるようにするには、`wait_end_of_query=1`を設定します。この場合、メモリに保存されないデータは、一時的なサーバーファイルにバッファリングされます。 - -例えば: - -```bash -curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' -``` - -:::tip -バッファリングを使用して、レスポンスコードとHTTPヘッダーがクライアントに送信された後にクエリ処理エラーが発生した状況を回避します。この場合、エラーメッセージはレスポンスボディの最後に書き込まれ、クライアント側ではパースの段階でのみエラーを検出できます。 -::: -## クエリパラメータでの役割の設定 {#setting-role-with-query-parameters} - -この機能はClickHouse 24.4で追加されました。 - -特定のシナリオでは、ステートメント自体を実行する前に付与された役割を設定する必要があります。 -ただし、`SET ROLE`とステートメントを同時に送信することはできません。複数のステートメントは許可されていないためです: - -```bash -curl -sS "http://localhost:8123" --data-binary "SET ROLE my_role;SELECT * FROM my_table;" -``` - -上記のコマンドはエラーになります: - -```sql -Code: 62. DB::Exception: Syntax error (Multi-statements are not allowed) -``` - -この制限を克服するために、`role`クエリパラメータを使用します: - -```bash -curl -sS "http://localhost:8123?role=my_role" --data-binary "SELECT * FROM my_table;" -``` - -これは、ステートメントの前に`SET ROLE my_role`を実行するのと同じです。 - -また、複数の`role`クエリパラメータを指定することも可能です: - -```bash -curl -sS "http://localhost:8123?role=my_role&role=my_other_role" --data-binary "SELECT * FROM my_table;" -``` - -この場合、`?role=my_role&role=my_other_role`は、ステートメントの前に`SET ROLE my_role, my_other_role`を実行するのと同様に機能します。 -## HTTPレスポンスコードの注意事項 {#http_response_codes_caveats} - -HTTPプロトコルの制限により、HTTP 200レスポンスコードはクエリが成功した保証にはなりません。 - -以下に例を示します: - -```bash -curl -v -Ss "http://localhost:8123/?max_block_size=1&query=select+sleepEachRow(0.001),throwIf(number=2)from+numbers(5)" -* Trying 127.0.0.1:8123... -... -< HTTP/1.1 200 OK -... -Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero: while executing 'FUNCTION throwIf(equals(number, 2) :: 1) -> throwIf(equals(number, 2)) -``` - -この動作の理由はHTTPプロトコルの性質です。HTTPヘッダーが最初にHTTPコード200と共に送信され、次にHTTPボディが送信され、その後エラーがプレーンテキストとしてボディに注入されます。 - -この動作は、フォーマットが`Native`、`TSV`、`JSON`などであっても独立しており、エラーメッセージは常にレスポンスストリームの中間にあります。 - -この問題を緩和するために、`wait_end_of_query=1`を有効にします([レスポンスバッファリング](#response-buffering))。この場合、HTTPヘッダーの送信は、クエリが解決されるまで遅延されます。ただし、これは完全に問題を解決するわけではなく、結果はまだ[`http_response_buffer_size`](/operations/settings/settings#http_response_buffer_size)内に収めなければならず、[`send_progress_in_http_headers`](/operations/settings/settings#send_progress_in_http_headers)などの他の設定がヘッダーの遅延に影響を与える可能性があります。 - -:::tip -すべてのエラーをキャッチする唯一の方法は、必要なフォーマットを使用する前にHTTPボディを解析することです。 -::: -## パラメータ付きクエリ {#cli-queries-with-parameters} - -パラメータのあるクエリを作成し、対応するHTTPリクエストパラメータからそれらの値を渡すことができます。詳細については、[CLI用のパラメータ付きクエリ](../interfaces/cli.md#cli-queries-with-parameters)を参照してください。 -### 例 {#example-3} - -```bash -$ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" -``` -### URLパラメータ内のタブ {#tabs-in-url-parameters} - -クエリパラメータは「エスケープ」形式から解析されます。これは、nullを明示的に解析できるという利点があります。つまり、タブ文字は`\\t`(または`\`とタブ)としてエンコードする必要があります。例えば、次のように`abc`と`123`の間に実際のタブが含まれていて、入力文字列が2つの値に分割されます: - -```bash -curl -sS "http://localhost:8123" -d "SELECT splitByChar('\t', 'abc 123')" -``` - -```response -['abc','123'] -``` - -ただし、URLパラメータで実際のタブを`%09`を使ってエンコードしようとすると、正しく解析されません: - -```bash -curl -sS "http://localhost:8123?param_arg1=abc%09123" -d "SELECT splitByChar('\t', {arg1:String})" -Code: 457. DB::Exception: Value abc 123 cannot be parsed as String for query parameter 'arg1' because it isn't parsed completely: only 3 of 7 bytes was parsed: abc. (BAD_QUERY_PARAMETER) (version 23.4.1.869 (official build)) -``` - -URLパラメータを使用している場合、`\t`を`%5C%09`のようにエンコードする必要があります。例えば: - -```bash -curl -sS "http://localhost:8123?param_arg1=abc%5C%09123" -d "SELECT splitByChar('\t', {arg1:String})" -``` - -```response -['abc','123'] -``` -## 予め定義されたHTTPインターフェース {#predefined_http_interface} - -ClickHouseは特定のクエリをHTTPインターフェースを介してサポートしています。例えば、テーブルにデータを書き込むには次のようにします: - -```bash -$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- -``` - -ClickHouseは、[Prometheusエクスポータ](https://github.com/ClickHouse/clickhouse_exporter)などのサードパーティツールとの統合を容易にするための予め定義されたHTTPインターフェースもサポートしています。例を見てみましょう。 - -まず、サーバー設定ファイルにこのセクションを追加します。 - -`http_handlers`は複数の`rule`を含むように設定されます。ClickHouseは受信したHTTPリクエストを`rule`内の予め定義されたタイプにマッチさせ、最初にマッチしたルールがハンドラーを実行します。次に、ClickHouseはマッチが成功した場合に対応する予め定義されたクエリを実行します。 - -```yaml title="config.xml" - - - /predefined_query - POST,GET - - predefined_query_handler - SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' - - - ... - ... - -``` - -これで、Prometheusフォーマットのデータを取得するためにURLに直接リクエストできます: - -```bash -$ curl -v 'http://localhost:8123/predefined_query' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /predefined_query HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Date: Tue, 28 Apr 2020 08:52:56 GMT -< Connection: Keep-Alive -< Content-Type: text/plain; charset=UTF-8 -< X-ClickHouse-Server-Display-Name: i-mloy5trc -< Transfer-Encoding: chunked -< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a -< X-ClickHouse-Format: Template -< X-ClickHouse-Timezone: Asia/Shanghai -< Keep-Alive: timeout=10 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} -< - -# HELP "Query" "実行中のクエリの数" - -# TYPE "Query" counter -"Query" 1 - - -# HELP "Merge" "実行中のバックグラウンドマージの数" - -# TYPE "Merge" counter -"Merge" 0 - - -# HELP "PartMutation" "ミューテーションの数 (ALTER DELETE/UPDATE)" - -# TYPE "PartMutation" counter -"PartMutation" 0 - - -# HELP "ReplicatedFetch" "レプリカからフェッチされているデータパーツの数" - -# TYPE "ReplicatedFetch" counter -"ReplicatedFetch" 0 - - -# HELP "ReplicatedSend" "レプリカに送信されているデータパーツの数" - -# TYPE "ReplicatedSend" counter -"ReplicatedSend" 0 - -* Connection #0 to host localhost left intact - -* Connection #0 to host localhost left intact -``` - -`http_handlers`の構成オプションは、次のように機能します。 - -`rule`は次のパラメータを設定できます: -- `method` -- `headers` -- `url` -- `handler` - -これらは以下で説明されます: - - - `method`はHTTPリクエストのメソッド部分と一致します。`method`はHTTPプロトコル内の[`method`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods)の定義に完全に準拠しています。オプションの構成です。構成ファイルに定義されていない場合、HTTPリクエストのメソッド部分と一致しません。 - - - `url`はHTTPリクエストのURL部分と一致します。 [RE2](https://github.com/google/re2)の正規表現と互換性があります。オプションの構成です。構成ファイルに定義されていない場合、HTTPリクエストのURL部分とは一致しません。 - - - `headers`はHTTPリクエストのヘッダー部分と一致します。RE2の正規表現と互換性があります。オプションの構成です。構成ファイルに定義されていない場合、HTTPリクエストのヘッダー部分とは一致しません。 - - - `handler`は主な処理部分を含みます。現在、`handler`は`type`、`status`、`content_type`、`http_response_headers`、`response_content`、`query`、`query_param_name`を設定できます。`type`は現在、[`predefined_query_handler`](#predefined_query_handler)、[`dynamic_query_handler`](#dynamic_query_handler)、[`static`](#static)の3つのタイプをサポートしています。 - - - `query` — `predefined_query_handler`タイプで使用し、ハンドラー呼び出し時にクエリを実行します。 - - `query_param_name` — `dynamic_query_handler`タイプで使用し、HTTPリクエストパラメータ内の`query_param_name`値に対応する値を抽出して実行します。 - - `status` — `static`タイプで使用し、レスポンスのステータスコードです。 - - `content_type` — いずれのタイプでも使用可能で、レスポンスの[content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type)です。 - - `http_response_headers` — いずれのタイプでも使用可能で、レスポンスヘッダーのマップです。コンテンツタイプを設定するためにも使用できます。 - - `response_content` — `static`タイプで使用し、ファイルまたは構成からクライアントに送信されるレスポンスコンテンツです。 - -さまざまな`type`の設定方法については、次で説明します。 -### predefined_query_handler {#predefined_query_handler} - -`predefined_query_handler`は`Settings`と`query_params`の値を設定することをサポートしています。設定は、`predefined_query_handler`タイプの`query`として指定できます。 - -`query`の値は`predefined_query_handler`の予め定義されたクエリであり、HTTPリクエストが一致したときにClickHouseが実行し、クエリの結果が返されます。これは必須の構成です。 - -以下の例では、[`max_threads`](../operations/settings/settings.md#max_threads)および[`max_final_threads`](/operations/settings/settings#max_final_threads)設定の値を定義し、その後、これらの設定が成功裏に設定されたかどうかを確認するためにシステムテーブルをクエリしています。 - -:::note -`query`、`play`、`ping`のようなデフォルトの`handlers`を維持するためには、``ルールを追加してください。 -::: - -例えば: - -```yaml - - - [^/]+)]]> - GET - - TEST_HEADER_VALUE - [^/]+)]]> - - - predefined_query_handler - - SELECT name, value FROM system.settings - WHERE name IN ({name_1:String}, {name_2:String}) - - - - - -``` - -```bash -curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2' -max_final_threads 2 -max_threads 1 -``` - -:::note -1つの`predefined_query_handler`では、1つの`query`のみがサポートされています。 -::: -### dynamic_query_handler {#dynamic_query_handler} - -`dynamic_query_handler`では、クエリがHTTPリクエストのパラメータとして記述されます。`predefined_query_handler`ではクエリが設定ファイルに記述されるのとは異なります。`query_param_name`は`dynamic_query_handler`に設定できます。 - -ClickHouseは、HTTPリクエストのURL内の`query_param_name`値に対応する値を抽出して実行します。`query_param_name`のデフォルト値は`/query`です。これはオプションの構成です。構成ファイルに定義がない場合、そのパラメータは渡されません。 - -この機能を試すために、次の例では`max_threads`と`max_final_threads`の値を定義し、設定が成功裏に設定されたかどうかを確認します。 - -例: - -```yaml - - - - TEST_HEADER_VALUE_DYNAMIC - - dynamic_query_handler - query_param - - - - -``` - -```bash -curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' 'http://localhost:8123/own?max_threads=1&max_final_threads=2¶m_name_1=max_threads¶m_name_2=max_final_threads&query_param=SELECT%20name,value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D' -max_threads 1 -max_final_threads 2 -``` -### static {#static} - -`static` は [`content_type`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type)、[status](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) および `response_content` を返すことができます。`response_content` は指定されたコンテンツを返します。 - -例えば、メッセージ "Say Hi!" を返すには: - -```yaml - - - GET - xxx - /hi - - static - 402 - text/html; charset=UTF-8 - - en - 43 - - #highlight-next-line - Say Hi! - - - - -``` - -`http_response_headers` は `content_type` の代わりにコンテンツタイプを設定するために使用できます。 - -```yaml - - - GET - xxx - /hi - - static - 402 - #begin-highlight - - text/html; charset=UTF-8 - en - 43 - - #end-highlight - Say Hi! - - - - -``` - -```bash -curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /hi HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 402 Payment Required -< Date: Wed, 29 Apr 2020 03:51:26 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=10 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} -< -* Connection #0 to host localhost left intact -Say Hi!% -``` - -クライアントに送信される設定からコンテンツを見つけます。 - -```yaml -
    ]]>
    - - - - GET - xxx - /get_config_static_handler - - static - config://get_config_static_handler - - - -``` - -```bash -$ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_config_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:01:24 GMT -< Connection: Keep-Alive -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=10 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} -< -* Connection #0 to host localhost left intact -
    % -``` - -クライアントに送信されるファイルからコンテンツを見つけるには: - -```yaml - - - GET - xxx - /get_absolute_path_static_handler - - static - text/html; charset=UTF-8 - - 737060cd8c284d8af7ad3082f209582d - - file:///absolute_path_file.html - - - - GET - xxx - /get_relative_path_static_handler - - static - text/html; charset=UTF-8 - - 737060cd8c284d8af7ad3082f209582d - - file://./relative_path_file.html - - - -``` - -```bash -$ user_files_path='/var/lib/clickhouse/user_files' -$ sudo echo "Relative Path File" > $user_files_path/relative_path_file.html -$ sudo echo "Absolute Path File" > $user_files_path/absolute_path_file.html -$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_absolute_path_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:18:16 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=10 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} -< -Absolute Path File -* Connection #0 to host localhost left intact -$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_relative_path_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:18:31 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=10 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"} -< -Relative Path File -* Connection #0 to host localhost left intact -``` -## Valid JSON/XML response on exception during HTTP streaming {#valid-output-on-exception-http-streaming} - -クエリの実行中にHTTPを介して例外が発生することがあります。この場合、データの一部がすでに送信されている場合があります。通常、例外はプレーンテキストでクライアントに送信されます。 -特定のデータ形式を使用してデータを出力していた場合、出力が指定されたデータ形式にとって無効になる可能性があります。 -これを防ぐために、設定 [`http_write_exception_in_output_format`](/operations/settings/settings#http_write_exception_in_output_format) を使用できます(デフォルトで有効)。これにより、ClickHouseは指定された形式で例外を書き込むことができます(現在XMLおよびJSON形式でサポートされています)。 - -例: - -```bash -$ curl 'http://localhost:8123/?query=SELECT+number,+throwIf(number>3)+from+system.numbers+format+JSON+settings+max_block_size=1&http_write_exception_in_output_format=1' -{ - "meta": - [ - { - "name": "number", - "type": "UInt64" - }, - { - "name": "throwIf(greater(number, 2))", - "type": "UInt8" - } - ], - - "data": - [ - { - "number": "0", - "throwIf(greater(number, 2))": 0 - }, - { - "number": "1", - "throwIf(greater(number, 2))": 0 - }, - { - "number": "2", - "throwIf(greater(number, 2))": 0 - } - ], - - "rows": 3, - - "exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero: while executing 'FUNCTION throwIf(greater(number, 2) :: 2) -> throwIf(greater(number, 2)) UInt8 : 1'. (FUNCTION_THROW_IF_VALUE_IS_NON_ZERO) (version 23.8.1.1)" -} -``` - -```bash -$ curl 'http://localhost:8123/?query=SELECT+number,+throwIf(number>2)+from+system.numbers+format+XML+settings+max_block_size=1&http_write_exception_in_output_format=1' - - - - - - number - UInt64 - - - throwIf(greater(number, 2)) - UInt8 - - - - - - 0 - 0 - - - 1 - 0 - - - 2 - 0 - - - 3 - Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero: while executing 'FUNCTION throwIf(greater(number, 2) :: 2) -> throwIf(greater(number, 2)) UInt8 : 1'. (FUNCTION_THROW_IF_VALUE_IS_NON_ZERO) (version 23.8.1.1) - -``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md.hash b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md.hash deleted file mode 100644 index 44ecffab322..00000000000 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md.hash +++ /dev/null @@ -1 +0,0 @@ -a506cb0feedad5b9 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql0.png b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/images/mysql0.png deleted file mode 100644 index f497bc5bab79b384c2fe2838b5cc67fac6ba3422..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 696049 zcmb?@2Ut|e)-6dC5J3=85J95k43ZioC>hBDl5@_P1`z}SCFh)z1j&*?a?Y`VmYlmu zO=`kB%*?$r?|%P%|IC|#4>+7tb*j#(UAy*PYgGrmk$;VgO@fVrf`Th0DXxTqa*q}T z1)J>-CU7RC(n}BcgJJqo?j;IJSs2cR{w?5t3PVXHITRERpx}Fd6qIw|)cXw-6lXRR zlr5k@5bv0ZYVq9Aa=-}tSRiJTk?18{r?FEDD#s>O@g3>w{ zdDAkMouq~%3d%#8n;%puCF(sC6tqEemA6iBt z*2Yfy6mHg5HjeymLX^Lh;0KOxPP0-{{8GfpQi$@c+#3opTL)tb9u_thHcAjS1qFqm zgOLfplK89NsssNCQJOh9+3~Zoy1KfuxN@@CI+(Ju^YQVqvT?9-a4-WUm>u11ob=t8 zZ5*k7y~)4sBW~7d>}YG{eA6uuI~SMWFHQVkXMgYD zuQh(JplW081mgIu%72{x&suMPzy61I{&@C3YbZFF8w0QM*Pi{>$RE%Bq5RGB@xL*5 zGq!pwZfmF*-qck7{vZtl|P*N&(gm%r15)096W3< z{@BnTF8x+jko9K7|6l>Xn%6I90s8`B3$p&(RzcXUAr5sOI8{mv+3^O}SqR|L?yzG+TP8*6^`ShT4)=NTL#|G*8q&43$H5)pEquea>q|KA_zzjy7Iy4?@ZY&zOy z*)=hP{_fxbqyL!UrwSD6t=t`jc z!9VZ@@jnexChAd65S$tRZNn2m@om8vRF}P5#M$^?hwRT^pzy@Om+fB=_8ENow;$9O z<(8!JUEF9^a)9@C@5{oA7f+~AIE3kG39?4w6S|A47< z528IgUXf#C_n;I|wuAixlPx@6#3@6Z;h*vaY=B1==lU z3WtLgDL(rp|73n=2D#fsrNU>2>ouifwGJuQ=kd9(WsxaqAn@!tnSe__DDUz+IlGpWP#1l3+x<$BaQBUXpi(k;ip+cXK_~ zj-Zst7V$+hFNc4gE5KwzWUN0VBBBYrN7mcDXlM0TUl~OdmN>B#SF5;aP{Iyfm)&pE zoFWA00n58-HM+4eCx*-vK1bS+H151W6f|G#mHq_f^z=w{F=OowI}%rtSoyfNUp&iQ$C{}WAmDVBVWIJkIt>MnR! zvN$(};BajqNwXg3{Bomqi*M{NS!g7y$aD5H7pny4Tjl4_jx#+8ND@$!#Kl-nc_{p?b^TA`Y&B2 zApFD<=?03Ae``AV`|HfEdas=&nZNa}X!4n8Y@_ zaRwA`P}jNJ+;Op-1bi+XRylOaB2mZyNt!&Hat| zpggRFkA0O*wEs+rn;m&E%%^t`?a zJSI)ycr7Hce&4SUSR?LR&sHWBSQ!Tx&lV`>8TCYVILK3_uWNZ6+lTgW-WY_)=a$=H z$8j>-6JZ+oL(u_*jM2~O=ewE+6vCpJC$~N!NV((q5hO|VHrYGXq$FnJT_2fedlOar zj8+dmswm3NgasD=f4$)wlcJEp{!T>bx4u*7vP(<#I`rW2@N(EmoPzLWyLN0Uc+QW* z_r{Ot-KtbFpvVvpltFIs6ln96NsYyh=5Nx_-=pSVc1q>``L@QJkksW}(qC@2eF98Q^iyBSkFE(eIekov!?-c#p_=GU7#o71GjGMwG!JvDaj@alKYDB zv6?3T!nhm>uVu_uhB*9`D?Pt!*l?5mZ>1ybXteQ*lXn*kQq_w{VOcDFJkEkqC zJ=KHHEXE5>KYVbNep^sJn91cc3ABfp{yV{dFTPAT&YfT18rpp`4FSP$8*gN`k=DF^ zW57VRd!LK$Qn$C?ca%u;0>Y;qtcc4&el9MRtts1|si&~{Ye{i&e}Obb z!hs~-xF>uLvu&>>$YO{X)r`j9ykJ4Qb9#Te$+lF5S-W#6v7LCcTFRQHO z-)%gH^^_nWsFJF&aqay`%o8Vkbt<+1J|QzzO%zl};nxPDK+BUxmtB(D^DV})l@~ie z;u%TD<}*KQbeqr_l3o3FH=W>xE<1aEXS*K3{pVWMMep2KRY$9>1b5@3I(a=(>-4AIsYHU(2 zE)K6(knrXxzyY~UVuEHWOj+Hx3Q%4AzLwN!I;{0Svuzfl-y<`!Q{b*c!gOHIK^Jz3 zUY7`Un3n+|VrwGqnOH#G{Muj&BOzVW;vlE5h#G#rVnysb=K)-$G6Q2zeyy5U-C?qqjEJHZ+6d>=Hj&~s|Nw!U~!Vj4whx8qoZYHrW z#r=)hs;`3k?Sz@U-$#xo)OCAgSR1Lw@)YnQ?{nP!i|$cU4)sG&K|w+%l0moWL+$XP zRPBCe5`kc(8rNtSH?8d6oMYPnPuNVl$OqCh4UKvi>$Du_t#L#G-tl8xI}5upbI0{T zds=i47R}21p<@A8*j+OL{%7GD7?q-%m*DdRJI9x<-H0vjNa>iup5fNZmGPb^m_y3j za`PS+^?e{%Ajb?8@EN0tCPE&b^MJ}J?TJ+Y0h2)n+ZrSg(k^X^!GV^r^BUo~p z@!a>RQ46yjV&A*>^VNzIlLcJmDJdyAR!*Nh@YQ=bY*Cl>>PzHgu@!$4uZ4}6g47Hx z)p>xw|EEwy7}UhHeUXnA&75|xJL@uM*LZg&uJ+Jq?KLO0!Qq-)Se2f)?8LCe#MizQ zSC=?Tey3M`oAZ=WLbO9WkM9t+!tfZ;HxG z^VT#ONQ}u{)?a5wL%)p|S79oV`gG5(Uo`jkFzJ6gqS(OTcWIFKIBw87@{nMDvSba| zYIqnZkv@t}VUPp0X>-g}a({_t$9$`*I!tJC$g@}0lmtL-4es5Sdp`O><7YL*CJql2 z|Bx>FyUVU-!VCKkf>&-nQ4gE6ZJICD+>d86%Iy|KPn^7wyXqPhCRGAy(Lt`z6s9oH z`M1wx$U<-`kF|voPaZ3f+183|au^TX8Im$uxo_@nPgLRC+| zHX0J-RC&mJK4w}w$n=&?cfr#(7`(qZUJ%8klU&XaczrfRY#-W+hJoKbhvt5`7B^Vq z92Lu~1tBcZX{_F8gV;3GhD0-|zG^yk$}RA6>96mVwRJ`@W$9^i6UiBgi22gD9SeM2(hqJVU zAt)qS1JW#ii8FV0N_%zYSzq+-DdF*86mQx)4~^S3^=fkC?4_Dz-P)bvTR0m-r@4bQ zJ!wAuan6=e#MKIyipKLoHjrw$AJTCVNwzR*ToSJ6)xOwXAiXz&FEkRkALS>5z?u&6 z&`Tf|k<7gTHPxdxr>cag6IGtTxUaGI{P z;pG|UE^?}oqZY?MqFm!u5xW6K!IPX(i)flsONc4x-s39wpQ#(k7k=~(_-iq%d>C%YoXU_lv?p@ z!W$*31hy-pnOZ8FZb`qu9xQU&BdUB&As88+3v3!#qaUfqI-pAU$+^LMcb(_HE|1HS-Tr45CC@8$}>c(EBrVvW!UJxD4G>;o<>3X;9PqTMSb zys9I}>Ed{(ucmCMHPvk|@3-WW#X;~kBC>!-sG zVBZNGgGTGl&zn4tVcM~ZV;^O26#$u}bfX6%M>>Wfl2!L6z6{%$E}PGu6~;D8)=aJT zJb}UJt>(TfHl3|mx-LKg_i$@3$)?v<8u)|E1^vVRp0c? z^syLKd1mS%&9*gOh_lZ^iW{yv2S*E?1?ph{hHQWSjbancbU===Gt6fm zgCHz1xR5(rex6?`$3eA6m!i4&DRCrKJ4TYi%t2J+eS<7h36Bgni`C)iRwU zL+t+3_x1{4N^wtxJS2e>W_2?!?ID;o(~bu~($i~M+GrDuw;J)|BTnDO2y3s<$=t3J&bevvJ@?Uft~QDTq}p4g69%u9%{3cRg+3Q;m;B(Q~a5gUtE@wXD$blCkav$CQef zEkgAGHkhKdx|iVxt9HCXRy|{BoVZ$JyTAT%VENgOVvmA3MwOt^PqA)2Yz`zZ0AlP2hHN9E=8@; zvk>xFu(o~prdU%|%UxNFo22d<5$B$%yT&C+u4`##GoJqJs?K@47M`G4>(Lj>e1MqJ z?H#&C>Ut&7)?GL#2VB){m&6^c_CZ*K>wKGZOEelan~+yp3B0a0$>8xqZHT{HK>Sc1 z-)e<|a247!1|o2yzc{ zFu@9GAZAFlH8*cWy!G*QP>pAGe2Gdyxgc_mZDfl0atG1dXSgy~8xNNudE>QsrZ~BY zeRw$5bTaQ5hPByQ%#_UB@}h?c2=U@;PZm5^GXkHM`w%1LR_8$A^G|9rvhrh7&?#LX z?X~^{!?rITR8MjFEND7X1zMsObi)?;Q-{Mu5?(>mdJ=&)0D*-IL zQmQb)gM96xTiun@al9L@Q`$mSAaQ}aPfnw2Uu~O0c#nn2y3DX8UBd5Ra zdI40&Y7^3YU=&3>ChjX5y~Qe){_8_l5F%N%=yAS88-e9*_LD$$02yEU~nUcgB5dED|P z-jiuo9AcKr^=~e_iT*sM+9GJra^rxoL+Ji6&rFfBA13a~eivA|D};jPb$qhy+ni35<}Kr zhj~8>9~i3~T8-1y|DjV_Ph(8T|2B?ATf(PP$Yajj4;Kp|7vfAp(FkusqAxg?8FVb; z(rhJ-dcwhaj4ZU3V#Oo{8B)cR?(oJ&g00><@#`O@V4IN>bzPMD{t`&c~G(iyZknx$ZAZjN`)fS|UHCgzzTJh-^Suec# zf||@`V`~tZG+mFhooTiW10o%UL5oH+Z$vMReQZ~aNS8btJ1C`6qvU0fK;20skoP6M zAC_|W>4oH4kS`9853?99^c}ABFf;cfqtzPZr0Y*3CnF&Z>T!(fAo`>n4c$x1j2QS) zV=v_BC&(lj^E9c5abbcYN&pQDU9a@3t4OqLZ**C=#ZXkCebpxbM z&6U=W$%RK7Mrgc4doVxqzW~@!RA6mX6@MY=co&7(sTjg=!Y*w(FQ4<+w&pR)n`&Kf z8k0L16tJy_SfIQ)DQ5P$x_})_6V56mJ)}LH;&-l-09FdV!K#WJnYf4GjpM{c_k*4; zFb}JGNfDRT^f1d;^Ag*M&q0ojUlm?+MoqtUMh*#IkFKA;oez9CZedV@oCwsM^SK6t z7G>98lr~=l0BLmpcEuV!SHJrJ_mp%JkF|tY0;F0o`iEuo(dNdsX+eCvQ?1jCC@aQE z_4So&b@B`QAN9}ka}jvFF3%OCx^;ans~XbWQzq_ zUNk-8zf?kMgpmqFp8~!&Ay}r*7+C$Qm7>(-Mu{!K6{Ez^6R6J+obAYz&v}bWd9&a{ zz0Oxg3uDD%P!8*Y$1_KO-=3F#vkTFszeEbNls0g5&Ibfl54IjX#mD(qJ~ezQZ_PwK@U(mqdbtx0Y4kQqo^Tst}RZ{Rd<5RX53 zqPca)YM$U34R@vK0y+s-J}a=Q5P}t;HMVKu3DUlr5ei9|)1qQHL2C7d+5+!LxU48t z<-%ceGmf-Q3HA6tp~mH}|n)s+?i zmsx%#wwRE%*6yFzm^O*4$NNyy8oH&Gj>%_tqnUW1_qy@MLf| zP}hU-b8#-V(c{A$6*yCt+*i{R@LmS>ZPwk+gd_6ehchEyJTgV*FZKa=vn;IrynI1j zM3<9mFSnFl^VvA$2rehA3&;vP3X=0m`AD8m<~GC6d8K5uZj#oSp#x1CHC+LO=Ua;a z%oK3(+p-@Gc~e}jQ*|w%hfHt7tLZuy{sOP%dzZirQk~M6Q2*3>eUQxiD_$pWrACE! zBy1jD{*|@qlpsydzzafc><+*et7q%FcBLs3)+akNhNXxx?0{LWsjLi<;?xb?SFiS~8;w(HmF?KtNolL3}!VjZ`U z2Lp8wQ)12bZ6I!NjoWm#04B|kP^}Ovb5wj>bpc%^Cd@xwOM=g>;t5}T!ezCff2VnV=K-V6=u@-xV`h7nBLy=I)1g#ua<}S4 z6a_g#C0v;?Jp;20e=&coFUo#*ag!?T{DNj(k<|lAr7y=`B&~AB^oEj$l=n>)>(CCc z)Qx+!?BNoXY7i+ik3fV62M1Xtd#0BmbbXgND}6wBYnw||O5VuBYvkffK-&)%n&JKEgFK=5> zl!<=CcIfm!;iw~G)*Q#Ab$)W5V{){yZoDv83;V|P99nv9SY3(y!n;7tX)0Tj;jx>( z&gRzr=AD;9 zGyeO#d*)-;cg!qwLYz{X*45dsYrnbeFR9@6%%Jawg|z_K z46Rte-TPqgt%5u+8~o)l_K%$Bv$LhT!pe$GITrw2LqDf=JT2HD4>tEvy?rA5oREL} z^*P>W*DCMHG{kaE?v~Tum5*41pXse%snI_HEhfB4ciQBD<@@0MtW&P3NEm(>2&gxH zXcWv;&Mqa{%9l@VZ+geE>Y9|0dIE}RO+&ma-&U!p1)iw`?utXrB56*#!Kx~kjP*25 z)4}-gyF905I;_*4E+Wm-8}z{DC;^uyU<4#adBQFp+1JJ(oWth-HMq+LCMu70~Ut{qs&q< zN~sF)P~uQO#$p;F*U3(>@Cmqaz}osq110P$oy5ONK;aeVIoa3 zWo7vO$LqN-%I2ANi$6e~=gVRKjFDc~VA=HH`V1YFgcE8lMOgO6n+@itVbdlMM>pNFrIVSIntgdzRvc6o^C$B=WU z5J87-(}jcrmD;+C3d_La{QN#1+jmrp&W64E*tEQCglH+MYl;jw{d7&LG+dBz&rpRkkduHrpht-l1QMF z7mACvNhW%*!qeLRuuIUqk%;3>cY&06r48#=y;&gVgSuIdQw?B%Q)dxvEIadv3iL-+8`k z9|@+C8#i^!r5gCfco1UXDs6dJ0YHv=XCsf=Yyzo-|Hqi zTb;OxPXQtpcQ)Wrn5K<#@bv5$!m3STaAQ>Dp{rJtmpgBK{&wX-MTNXyu%`P67K_J8 zkJA>`hGjoMiX?gn$m%JaS6u9HUis0XQl_aoZ}W1X9(jQJmXZoIIM-l?SLYkrQ+H_3 zqx2vD$r&YL@?iL<4wZ8BVnJT2q|yR)<6xCa0o%!&{g3B!_sVD+YAJECb4|8qD&jo9 zKmpz?$8w6rVf@|e9WG{@(MDv-0kC4r0DPa-L@NMPGubxm*Nt_oYm`=i0o(%AN02mP zsSY4*X7!th`jZ7nUtVT*sOiRVt5SoTJ*nh)XwAG!eUh#cA#?9irTwywBkgX*nr154 zC4bJ_lX&FK#>H6>G4=ioDyKKb9H4$BwnlEYX5J+j#_4Wk=HOVvo2?hj%44m~j?qGh zca;ySZ13lOiQ)hiPyXa8{;^UCB(c3@uI(5RNIy|BADN_v!&M6&8YvYS|NQHz0 zn-58imW40f{enokspI1*G^(`{7}a49QYr!-O9#Y6fIY9vkUN~;B;!*0RsgDM?wFc4t;PI^P0T`_qjbi?1iFo7x3-rNZ|BevOtgB?R~mkO#d{84 znCbA`Z*VoI_s9dku(oJ$b#!;{f8>4^qi#I~pcH2qC@H%@2x4cq*8lkUzGn)jPp68T zQ({2Hl4ZTi6ra#G}Ox{=UCfXo8dQ5R0(Fif;hhyM5C%Ez-S1-VE2ag3!i$nlab-=ZfMXAC>j+Am@ z(70llzJk6QFO^DEo&# z{b}V0z}F8O+-nPvApRWBk3iR`wlW`1QCs{WF~?5E=P;de&q%bi$KggnmE5D^ZUeoN zb`@rAVCx02&Z1$xpLA(nOz4>6x=AXH98wAEnJ0Ki&=0Rc_jg1K!I z-v}-(;6Xbym`GB`4cB{P`dG%cf=P^yc#045@ie!v9$DkkIQV*RMdS{o4kicHiU?VU zhBq_GdIqS+i2oDQSEworp0F>=IvJzg<$lfN{^F+cMW<6i?U-&Z1zo#G!27 z10nDnTL6-!WI^{)=G~+>x1Phj6JAl!qw&dc)8wVdrSlshNSx>NmYQ#$^uc_pWi~m8 zRssGj^QA=I$0>88!W;?p$Bwc4$z^D1{mCZi`!S5_oJ2oEd$WT|`$EYe94>D(b~Z>V zC-7FL-Z>T*3FvwY4J$Vs&xX2a6&J>jSOY^|EuHRI5lA<%9wacJZC5F}U!EgFcu~HI zt9DhpG32Ho0Z-1zk-nrx8 zkq=@b&3z4*#}Js$n%nD%LRCh`^`er|uj;HC-|aG7iN@&hhp)+}zdyS=L4lX>7U^!M z7?b>2L8UJf;~tP-f5Iea`|-mFv8DRbJv9)o%m2QZ7!Hm3=Rt0#I>l7M0?*Vt?=ggBurb#y#NmDBc?9LM&{sPhj| zI_QY2#clRDX02F;z)5On0MC*&ZpDIK9o_n_Rl|~NF#?5^`_m@lq^5uX(Fv!qVY7n? zA-F`nwCV0+j`6XfYpC=@;e;Sqd=e9X>4lwMvrt&Mah5IyLIX-|a6JchNtWi+v_Gg_ zPmM55bswBJOyPG10C~EZ;5R^W@QXswpD(j5!zS-7p~vF)8h1t!k2ue~M&YPTi4YHU z_LR;W7PsQHjyd|NH?#b?C0?c5HP;y~fTe&B6LY@eo z#bUe7Lp@=4(@{6&T~p=ryA-5PjDZADY0^uwc@AT^FJUVP5Mf7hTTXThU+-`%YD1xj zIoi8LL=SJMBu0Ws4%8YzTo44K%7T;`DspTV*Q~S#KCM>0b`s>DWYMmh4QyIxd3PG< zwSQ(>ZlT&ArvUaDW$>LnDxnWjjht-)*W;dAUb7J%;84fs>-W^d>SS1~$JC^wpJyb| zesyn7?p$aE*iQr2g9P|^K}BWr0Yea6CPTs01YeTIcVjBj$7&DJ;)@PSiX((^%!ES@G+|l=dU1E-oDSoQbT==@ry>bp~<;KPoqYA(?;Q3s9)9bL*tGW&iwF0G_7ky4IMQ;UM*w}gN_e8Q!ja#Hjk_T}!2c+M6$zwIG zSAXv@$6G+C6~OGDg#)aARyHNj0nkxhJHU1J^e1qxI67XkG3TI-JZ1!PfrlwdpHtVM zdibX&P(M)Ha{!4Q^7_{AA8?N8qomX`OsY91eB}xSLyLnm*T1#`oq}rlD%MQ>Z255O z(3vBB&7Z)4zw1(9#P9*bgD52h>D7mgdQ(`mTkMqviXON4a%J78R1_E}6hLPx{hJB& zxK?+`L8DCMdawt3P4pGWAaU_ig|C)Xq_&oTKFyV9#Jp51Gi5WJ5*7D>&cc;ueHa;} zhT2_M^Gk_-8_r5OW6AyE?+yn>vRE6`a3){e&?Gi1=?_7#sluj%0|%dRy^VkKm1toY zm)2(XnGMWhv z`|#e^oTjsj79Em#?tY4c<gBuP>yu9>}WcD%9xRa}#x77-iyRK7A z8j#Ykqn4KnGLY)s3!9SlteCBGOT99soL-K#)i~q9A>lrNxm&u_o;(Qz;T>^@TCcJb zexGeh=`Hs5$+%z=blXecXww~oqrVP&im2(uBtqDzRsJN*S7SbZ#SBmxw#jM5n9>Lq zj&5!E~@c{Gl^sS0dBGtfC6@zVvq3hJ!}=f zj``;^&5MDFVe_;@C7(|&`RtW66|mG|xn+STd3 zpY!TtxdTg1j5-bewIUbHwX3o6`#RK5O|!Uk(uoy3yTEJxoA1N&WAqxBxurKd?wK*nXsYY^CLtj>S|7fF*9PxTSaw7wxKszhHM-~&44+6M$CTF3LCP7T5{{F( z1~X0I+~6hQa;HrtLTqiS?a9)pCnn%FOk6){iDtdKBrjSp4_K|dGq5UnxSBZ!vH=;a zOY%9$8z7YA5bYnU`7;+@z5=}VJMM;j-g}Z8B^(O`d$*6tC6DH8>uRGEHf)zWzi_z1 zo@iJzd;#?KJu2ZL-p!Z$c{4#UMm{o@@970Y$g7viRj{r!Z!T3T_1-nPq|-v%m$#5QzFl{SB zO545V-I!)srcNGH_4oGmwA-;64uv%QU1NuvQ}Fu&o`6Ep3ct*_FZTFcKTiR9yqjY> zR4)7-T@u#Iny(6aE+|05%Azr>ipu#EGF*�H?Q3E>ZVR%MKdZe#6IWTnmN zvjCewNisdAQVu{KEYV%$KB77vn;qFO(7^fjt?%*@GR2~g%*H|%8h67B#A8O)(nVww zwN8kjq`{8KfT&n9?CHEUb^{p6D7Sg?opAmI9tNcICe<`{2q0k+p?XtEPfY^6!Gz zG-_dW+tzz_KS-CsgVz~>2?r5+9ShS>RV_^QpeJ!zI)vkBpmUxvJ;;_LzPVRG2R{urbEVs>E{`*jZ zYpPb2#j0$l`{ec_z`5GQtq-N?8U$W0S%qWb9PiBR!c;353fKeO5Bsay)-vP=n7?n^SqT+|)nh7S54NC#LB=FaVNtBM%ez0s7U^E!@ z7}Q+PyF!=cyHedMb%k9=0Q_an!Ve}YOh{tlG@HyPED!}6!UFFfZpMF3P)Z%L@JUL` zQO+2tXT0P9v>9ON6oN%mEI^Q%M6dzSkgPs+T*k~pz%Jhdq&L*>SZX6wY>-Fr_g#Yj zO{Vi7jy1Fe@HH#4vnWg7^r`0DQ#Qudz3s8rqk+)pA*+ss!_%-jFqc`4Pcyu#&T^{6 zc}mx1h6q;aDvHC?K9yYfx$X*9@7XaWSGhL<+=u~S3!l;%62M=fmc$FVhC&Vj!DK|a zabMlx0#@$p2%{uP7!lGwxQ{bf2yY0>ZnaE_D5d|f=T-F#fsI|Z|5KthTy7a=e} z`ra;fvtw^Tt-%coUIg8ot;!NJ8B*{XV#O3j=aZLfWr1&t_>E}YY+ z^_CoXEdlGE?-RZ(zx6X544nbarik@E;!A-fI@glZx@@zvl4iov7gbIpwg(H@><3p6 z);?2CXawK&rRHSOyCuiDI^0C*90uQ8wSW$Qv1&^kYW-tID&vPlj%;o3pq0pw?`NTO zvXP$BmNhw2xuAoNax?u4nl7JHkE7%Oy}M(tcamTpK)SS4PhU zqO=^l#f>fBt7D~h{{w`O!O!%(oI~cCMC{_%##^`H$nC_EKAVP3liGvcuh|w=7Ngf? zUCU4Eft6ZmpvZ9+PzCZu<%+JH@EjH59LWs4_w}1|9&Be`dso$)Ic73&-jg7^H-o&I zxyalI9$vt8>aSUFsdrT@l=p0QJ4cG<==h~RO)l)}|16+Y?pB8T)B;7(XHvU<;hC#T zvy4sMnTZy&)83e$IP$M3BY%Mve6#PEsfyd``@`1G+fNpdEzIp1szl_TOu23WFhI~g zcZ`3-BDVM{bnwju6ZOp*-)v7pMK>q`Pb_KOh-MOb!#w8;DYBBiD^^{3U0zWg>+rc#g1TQ!he4KVt9I z&L+|VT^<<%NCdPc9`X#G7MNu#j^jx-Thv*vn}1d?4L`PxVQjoJ2dT^)_Jqqt02Ik= z$44CESg9!5q)G%XuxaP9?UYj*Fz;-wSD&o6)Ib*`8qDoYYfoxr;z|}Y#;#q{+*A0x zPCpHmPZK=^tEHyZ&iG3zWtr3Tk7T9A0rXaVeF&M(>1v!i&)%Zb#$<858{yWVLn^15 z9IS zeqhi#_4B)E&SGuc-Y|zN$k(HPGbQ)U&(B&5`5|;1(zY6>%|e`{x3mEE9DwGz@61y{ zYHXvPJmbqY$O+DzxcTc#+qEmu$qE33qGKgf2MGBD7wvCWGfM8Zz4fDq7i7A?H1Ag{(J40y zx-?KPP%?If&gw!5g|EzV?zD(q?HtC9CG-Vk%IZv#zpxNYpK7!u4 zwTHBAmV!2o@OtGN+nDV|)LISz_9vIlHR{jSmrxkzEW!9}to;gv_gGLe0GFKJHRbxQ z$I_|&VGVn`e~w{Oi{VFLj~GT)O8~B=)#{Yq6H>!JzsIH?$Lv|@jaH=?J3r?7s0ZlKEqwy)pDSM!hp$ zWQEXWO0&#vbCf7obD2`viTQ`lvIoF*e@Bj3e$@3aciPdk2{dX=GYOpcW~trgfLo8`qz4#9Hi$4qqTHyZe)HLyK*eQRx&1O`fprPl zq@@cP`MhL>NM$rO@Sz)-0j&A)@k+gw)84a{1qT~hmje>lH1qS#h)IFboIdN9j_j4W zm4KvxD)H5%TCVJ4_g)osX{O}2MQ_<_DvLZFf@IBxUovt`a1ifUj`jMZ-v(Ku;|EzK z=6=0CX}+Ef7mJ`AnaJicV$=WOwy#Ega4#KDcxIx?+amYc%oj8SigTQ~sqrn)^Z_J4 zJGWvP=eo+)$5>>7r0QVF7ah3^B0Q(#8bA5Mbj*|Hb5(nnglihCZv$;=1V{WL1f6Bx z9@Y36cB{FUNc|nNeokKk9>cD(*AWo5b^j1v!ZcTISUwc^G+oI92*XT6H-3gU8=6fk zk;fE{hZjz?$+0oy5){-l^O`*(u=Js6>y<3vSY7>j)#wtn#l7vpi%2-;efZ0s7h1b< z6_( z-6}rbP@w^$yYFD~FG4NLv}QamJB($-5LUYLvs|&R?VUPoB8Fg)K@9+`_Iz4wGhhvM zy#iFbv5W&`8p#Y}B#GmJH3jCrD)~;^^Ld<&L;kV|)74~kMdkg2Q5ndKukN8VzK*jJ7RWbKc@H3T@dlS+Gmwso2z+drms$xR&?jkC3C1gyyi zF5lTp%-#L)kdlcN+`@TSId#wSUKga2V?_YokYe_d1I{ChB>Xf;1$?SdhPaPlYx&N87ZOrg z%K5ZW^0JS$ITjGa%w=Z@R}gt5JD(RBOIgNaivURmSB_LtcMxHh1Z%o{A5VwMgHg83 zvGb!%Mv~F7RRAxwdng0&R@S$g0Gl%lHyD zi>Q(BG{))J~lxOqwi>#L=pB_EV_4)~ZLy@9S$;HNHB! zue)vMXG^4P+YC&RAkYQ(CFvOZ7=Cvl+TlQ;CwPV36_?&}fi(C5GwrRR+iR8OORYZF zF+TmoS<^A0ejQ@as3RdIsAq73US`cEvIfW9CbNI^UhuGt-1Mn{q7J4C z{U6rOIAUGclVzn)cbBDSR#w=_>ELKwG`<;(#P%TRV4HFSB^#b`6g zaz7&C>3mE-V}YDAx1?lekCrjz3xw`5LBscw@>MSK~e#1bZkm zPOVe8m!$S9`o-}g{pJrtk+-u_T``j-27#yzU+}Qxr;Nc!9p$EDB&sBZUUYUzfS(Y3 z?XK-{2D*l#SS}_IVx5%$Es2_>>?65kfmbGzMJWS6S*k9skDqx&xfsoRIF(2<6a(Z2 z<>l&pD{4vEIz%upMz8m_Qx3ZpcF_-y62d~T#W;zjOwH>Rqk|m z#QfeVs_E%=)e?EF9t76ka;ZORU_P!}X zmUvyiQ%Xuk7Kme-isRivpihszDg4!498L}nwQ{3+oEAKn_7dqv06Qa`H8)CZ?ox7i z&G`lIr>}|~5`rYTDDgZtKkAa=4mEjT8npSmpk(QfJPv5oD&kSKcia|$pbHki!f)?X zwyW*|4wGV#;A`7SnN&Y~EYr&QWi>)h#|0eGMv}|HII&GagEEWBqIplp;Vrf0i{Y#! z&Xdr0Zjh-Mf(Pe2S5uiupZL0?Xw7KY*4ey!XfZH=rna%KWUmB-*G4mT8?6|J z$aR<7x$9r$3SDukNAUBcvM4I}QY=({Wl`%ZdQ116#$OIT0hNHqgj_nGqas5P?KCp| za9DGXL0mqC{Gsog#`F6K55sUYao3_(z;yT%4`VYh)I>X&5($@}lrAo8#58Y2`#vQY z$m0VH-;q6gwx_o@eO`z$VlF{)HSRO-w7~n5XhtPP<+;bELjeeYil-t#AFC@>*o#YJ z*~77*e9YqXDN6?!&u3Es8kD-3A~H@`Qp4|dx*r0{v~kW?!-IwC{fJn%;lbFdW0>Dg zp{{0_Bf7SchmJF3YQr3Ns8$TLm(q=g+UJEGxyXgQV8SO(;FaHIYdX!HIzIR)nY9fj zW8yB**s`MQ>NDJPMI|T_pjT&sO1}20pc|Ob#WA2N`SUAx%_RF54w?b_L8h{B%6bD$ z=Yz8?xe|S`A^%E~7Yp3`o2Ebf1kDYeYivlicbuG-JBdS0i-xsENW>dGgU{6rqPGTa zx~Lw2>LBkPK?2=qvObs+SeX-1Hg3z-&M%5wY$_e>R^j)g;oaZY`_L9W>2GH$`t@n3 zwBco;>(+@_&qibqj(CFW7mpv4CN6cW`)XQR4tapa(fJe>U+5uA(B?tVa&FFMp_N<| z#S?3Z?uM`tWq$rmpF4d5IXTeB>6(Bb>B@yar9n(3lYMZ|iLC zSV9e-sBWP%9m|O-zQh7QLF&m&z8WKAV1n`!-h{h$ zn4+=RwjjQl8fOd-o^*S z?voik&^=L_M-6B5kMD#L*ZE&d%}28^Dn7_9G(3uhbBW~G2`1*JQ_XQaKim*{yGy55 zF^WogiIM?I)0K|=_)lLd7r4hmIZ=S3nC23J4KzW9lGNNKgZa%TOW9HuvgN7lcE8$) zL%3#4Nmlu9FJ9lxio6a$6Gz~qzqD(pw-c!CDP-a=Jz|@zbvYKfaBjcK(4A<0NH7Ip zK_%`#t%j>qw25PwOq01p2x%v1|s#Y5d z5oiMO31qK9hQ#D<4wM-YvCRAsypFxf1!@p>I7O!$5uYW$;s|B8(#)3qo^h9&7}Y}b7=JLit?3ElN>O~-@gtdU(xze|Hd=Hr~`9cA5jywSD z^@^1aA)Ey!6MsKN&_Aq#v>xCFW>6l#kZZ+HJ2_UGDrvr$kJIE|3@RW|1yonJoVujU z3F+BeoA{)b8>rj0leS3>B1e+d1!o!x`-4~iYtr-ol1acmF&<1#*>aPzg|CAr^bubK z-9HfAbtmJ8J28M3U*t!W+e5l;#}OLk49`hr8no?a<3#p*f&Raa7*5GHjk?=@2q2yMdCmD7@5q}3XR^vO@@4$3-C&Sh zN2*@xA9;Cs5~0@;Pk*|%-GN4@us)QE+}##LLvKZzC-A}?vVs9zPkyC8`cI!A!~;IH z^i#D^nX0Ze9M(WQC*j5hhMUT+%Nz-$Wb(Lc56g(*S7|2+crgDe10`efcoq%7;p!&f^)N6@x`#0=EqtQ>MqcCmPv zg5d7%oc7A4;?Y5~$8f5m+uby*h5%%LiUP$mf#M3XFBQFFgZ>M0hksh$B+Q6#iVU*3 za;L7I&M^j?7B3xg(>%Yg%Vlb=D4;wC)1hEAvc&e{ zH7CByQKfPY5 zoT@0%IaNJPGd;DwyWlGRv$aOiLguP8cZtqQM=*sMvH5@bMhswH2dG?R_Ee`=3asC_ z9c9P7QRl4pa^WDl%Pa)85sgEv0o@8B%TQJnL8~k)?i(2ixC3IwDY{|wUY=c%G?l!Q zTO*mP=f!%MF008;d7Nb+2Xhp)H}g^st)myRML7TUYUn(IRb^xp0(G7>ufcc{@P z;L1KPi^!kK5VOEBeQyHO;}jtJxMSJ?*|mfr4}5zNAT*OC1GeD_D|z}JSih8mRt=Pf zEg2h9zmB;@6k3d5MNA{``1>I@thu<_6%HY(wi8vpkhpqg3kPBwWY~T)zhnHcFgx)t z9%-xRc-0n;d}l%S8GCV?cx^7RS?>jXW7T7hwB?cnDCQm{coaYFuD5)*(t z9!ws(ovKCQc+t)Ob*1rHT5T=C7Qz;~i7dv$$1o<3Wnlsp1mKWhuoPr4P79x-l4*Q+ z{>k3omxPp*Zr-Me(49zYr=RCoqTb2MDtN+uBU7=CB_eAKtNinCaAO&7 zfToZx6HoONF%}*p7-K1Ny18}{3nLFW<%}R{-m}q)1GRHCPgl$BH^2Unead+cxOb}A zvgvvNEb^Bwr1f`r7Hj)wf}-q~=Tm&Jh^-zlsQVc6fmemkjQ;8H{J+0#EqnjMB_&0O zHtD~3cmF^`{L5DM0m=C@`CT=yRa@xfm;0yxx&YqOE_OX@Kkjvv`wSribgF+k6aR~E zy#-(=J_IO8KJ*nN{_7?EzpvB3Z2(WgCXxk!7j&c=_}6#;*Jt(L{_lVJfq=C)99g2T z?jh;RUkPvjz9s(CR|TqYlrfvJ_kN2`{)?;i4{`OUcO(+a!aVQO|BO%ip4M8?CqM`C z^v}6&=rGcEaQ{LCJOTDU;kJ@!qc|UZQjjR)b^a}${bLKy8xb!4a{xw=4#uDI){r=` zv3~xscl1B^^)=Qfs%5d~er)lwf5JVrHbVAQ$+Y}!;m~5z}p}@U1tu+I#Kg1n8w5-Dj}wcN48Jzm46OMJPN2> z7MP^^6P9QzA`Ot@Z-laBmh4ix;c%NM`cS*vMNQ?{*Pt z%67;^z{;-&V1Hj?fBA|;2@!0*dTm42=zshvAVJTC2L=r>f0{wVWA9wY-*%%2MO>d> z4?h0oe>fEgz|zJ&PhIReT zKEf~FCGxA0r1uBbsP`lZ z_@m!elPDkSiE=PnOxsgN#E`LVT0i{l)`g0z1Rqf=lF^9hkT!$^4?Pg z+_!!IzN0fK3kB{#N+aN z!g4z9zA@)u`T9o!j>QZVkoUFMZe5`k34)44O6y%#3>xJ$ICSbH5x7kObl_4h8lo5r zsPl)Pma6c7KP^?|Fa1OCd#8f;NR+OapDRC;$fmX`Vbjgsf&&hbd_D)~1C$J`cnm~i z0a^a<<)HKaLqSI3X|wT@KTzpLoK1w)88?|b0$ukoic+-I<+eayOsfGWd?O$>s|I2-kWjQ9!Z>=m6N0iB=F^G}VE}!aGwBNTk{qsrr z+n!Bg2>*k;w-uuh9*cJkOV9Cv`QKV)|KH#LcN_gb{<07j480S+oP8}(%KX5PpdTmqb z@Af6_RH_tQC~7g~pAcux-oR>NV$O%KW5+g{ge!8-C z)cgsDopmjW`q-c4M<)3fKQesm@Biz_`o}f;yMIs>4*w%BoV{6Spfy3Fx%!Vh)Bm%4 z1oUAOUk4~i1oZ~2{Rvl2`>u9j{Eti1>8rrIcQNE*VG43-Z(e|*fGS#P9?d#uxe^5x z0B-q4Ubj;)bM%z+#V;SA-fE=D*jO#AkUC`cZ;2`;`k@lBOg#bNRQe0^^+U7Bxuoda za9sAA&(GM*pGV)+lY3m9hR)SGwtc<7@|^5(XB>N>dcCWnFoGJ+*KVrznw46l2MYBfs3t?po-fp^ zJ{c=w)M`}Q3V$WyBs<<12nWcC$vGa;SBovdlw$2cv8#=@H{k$@;a_HWw$GdH>4Pvn zsnaqaV(>QFuZ+m24j%Yv(Dn6Qae;l`cDWU)P(~NbA_xlxHctb4wl&bkDY2MQisd%? zYCc`YxH#AV)eNIn%t27kNC4gP;@6yN73QI*JDySZAE3bz_P_=UfKh9M#im!|5{;f7 zKGQ1>P*}V5Ugji8<#hK8rin(z3Trjw%?+bep@38mOCa70hSb#P*4S^6ODFP-vlVP` zFd6lmCv=>euhdBSMgspa;8(RC)vu8h>5GJgz(clID3_mc`mRd*Yi}&;v&E{a(O70T zsSWE={hlQW=Ktcj6_7v#iPvwoaZXBa!^pGi80}*~UxIuwz$}?0Fk9}#d4?)Yc7>A3 zao1R-T35KK*d||o;Dt-cDrFHQ+WlwS2K(wEkbBThmAcWr9}h9gqV@8=ww=&$QfZlc zH6$}(6fc@Azd@(;Ty`Tv3OX8?E^uS5R|r30ILfb>qj0Ai%+_l3t~=Woy13jay!LzY zTxQTE|N6-{G!N*hY^Ib3&^&q>3?{>r7@lqm2yQXM0@h;GD<3TC8C_LwjJ`I8~qW8StktZ*R z5_#0s+X-z9+&)W^jD4wAX&#lU+;WR1o<#bc=j8BvhMb9~nRx(adb)0UPKoQ7thR3#Mm6 zl{aN*Zg{V)%}j>NVzFf;BA$ny5Hq~tDzl;j1opGN;8whLl{;Z~fHfo~T+ zE(g-K?K{2=XK#V?m#TxTU^RZ&&Tx8ep4M6EQx5AUvpnA8@lWiM28~*~DmyCon*gXK zoKB-Q;O3fKB1Y!KsBjHRTh~6f#ca?W#R}8fJigI;WYDvEO6o%!7Omx}n4^?S=_85V zrmX4GbzWFIlFi`-hi;8z9C8g(GMm%ZuU(U_>cxWo->a};_qgpbYLU;n!aEFrt4-%e zlnp4_+IjJJCY{;mCS&V1FVreh%j<-D{MRN>ckX{Zn-O?uzv)!4gM_|)`E$7~8b~ge z&8c!`0t56uZS(29{|avCuU_U^SZ}lD)K366mPB zv=}%a$icMNIC{Qst;dxk9=~T@UT~V}j^HC+@^Z6@C@?Rl!tOaJGYpJ#^b?R2b}rQ_ zEm6zVF43!&V1cqF(wx(l^UB3)xe8Ix?{Yo?GDQ`}ZpC+88Zv)*qm^x|cwsUGYeaBa zpKljUG=8vOZD$m>sJlq;;7*zWz`REJ?53mE)h7py{cO8Ps4!1x9V&C!k_9NQsWLE1C5c6#-86`NN%+a_X#NOS z+#Vq&fFcklF3>$tEbz&h+8nFd*by>i!=Wzm3zd?zYco2w{o(3TfM1X*gH?s>a_0R6 zcz-NZ|1Yj?W%|7_L)&h9W6Hv3gjpuqIez+0{U{z6+dD6Z)AQ@&0T6(B@3)akvV&=U-wv%x*v)rTra{dH2tlj1b{W2qs=_e3-l-TWGQwZiSJ}U_a<$+CSfIm`7BeC31DO+idCMJegRH`eCHnaSt%IC+g z*p+i6IJ-h#q`!ezg0+0cUnQO#)P+Zz%6qUx|VhorYkjP=^FQr9WkwqkCv zfl{vA6!lEkh$})bUnA3-#eB3^MU?7P5rVejJx zA}2hiH|-L!Px%$Ph_wZrggtO%%!LWq< z#F}Ttd9HjsU4Q^*uR*wB31QqSwHoPmJP1lMhQH_FQsWNN@cbZ=IEkk z;fAZ4BJ$p6RqqN6SuIAa)N-|=!`ynFh>ZkpC$1J2U%WZT%|#favDtMG;bTPOb7x4N zif4+@jA#qSv-ziVb`l24sLwT7Z->Cb-bW0=XYHnssIpq%1C~}_;yLZwIYXt^MegG- zql`1f9>DF(08Zx3meVtDke9u!w4D0@ZcL*e{)30Oi>Gsoz(Y!t^fh#Dp~T>=%W?qK zb;VDx=-obIp0>j`ULOTso5F5!v-C2&~LAgRqMFVz>ZjL zjylpE0FIad^T_A)jknF?nklV4(YKLvwN5dnIky}$!_=QOYEJCe?@Lw*1^_&{9NPYZ zSx$nVQhM3DVodPaG;Ui14nH3NiBEVY_Zgz`;8mM)-Rl6LdMbiV7QN-!e7f!NC60R` zIzfu^cUJ-aRys%Q7cy(qe0v0+Kdiw>Rw& z?}Ex^BJe5~pF$#La9bM0eRU^CG#T4kMpTnodY(`R9|s-l3iGM>({kwHuJJcO05<&<;9p% zux|#D>G4ws7nilX2p?UrR#f6l+4-XprW^LD_7{PYW^7<%W$c+EPZtoIpW>5C#lO2e zbyKNvI@k^xL1HCjI(MKS0pZ3SNm^J7i`UTt4D^K?CGJrSG19u{Q>R!OT|t@@IUQ88 zXg}s+4o;OW;eaiwftOAYLAqQfVd9KTGVjb{MFjO(`k?i4mh7{9(KH`C5aSh#2+&}d zmJ8JOCGyT(KMl5h?S=0?LH!9(yDC}Jz(53(m1e7A8>kK#z$4v(xAW!46^zHJ16+<> zKdjuj%oR!~uF3EjfyuC!dhJ0N^;7RF7Cf&cHCeF}DlJdpzMc3X0QO?!a5`^u0hl=N z1Pi^cLZe3^v|#X)S-zEe1WFCx4hXmM?Ld;9-5bN$qS6ls%PvS_F?}e$ zbk%p(CKGqFKUZQlOMZw7w$#_=l}@pxx=}xoQk? zaF(a54Q%8e_~9`dg}dcUgr&cI0zbE3Vd2$D6+RyhfF=lpE43s()!E%E(vJ2uMt#>x zPv}*(70t){)7F`DHcIJZw&(GBZO9q)xI5i4T))qifB;7oPj+VLyT3ti7JJ_f{bXRe z>zC=_w5j{!?s1}t5scscxqD*%wt|26ny(^+`_T%Abr2uLr*YoaMZk7#D=6i@^aK8A zz7`8gjn~J9JwU{q!iAb-!rHLhb`S;3N?Zwjl=w%j2h=nwQdm`7G{9QDfooCyx zI-lHJKm2`znKCW9XZQud%Rvu`n{)ZxgC=k2(U0T$`<8B-gG*8l zSg5zCxQE}a4XH_{_LltBdcyY-OALW;9vNZMzJ5Yw{j~_sM2?2qG_VjUH7O_JGtXj9 z;&pSBFSmWK7%9^d3w`h~tUn{-mA z(?-8W-avC_1+T^QNwOfzIWHK}MtXwzsD=y$odFg+c;z2Hp%SpMz?|sap(p91y>2c# zkdHh7Qd_AJ;eyN*)OX6M$+le`LHwOiJ_lSPW8ce=?bv|4u;={e${y*fBx1K+MzKih z$a3{ujtUS)&d%I`av7k`paCaqE!AexeD_HZbf&x!Jgi7MDVd)2Om^0`M zA-xN$JGaMj^QlAowJFi7kYH8&rqTLMSYM;w6G`7frQ3~~oy{W6Xpr8vVKp88F<<6> z((F7{try2!2vU^Y@PHLjv5Eb}gDr>cIUJUlENnD>XQy~9)01f#IOZ?kSe$ZF=+$c8 zJfS^76G>uDV*M`iFw(Q|AxormxXKve=~WKYNp$H)TYP?@M*T2NL>5Za%Ntp_SmZUM zKBxIF1ib98ZIrZOf4SJc$cA2()7;8nN9H6l*2-cs!bLw_yt0Htz@0F`nX-iKB-3AC z4JvapIU@SXL^<-61zHTnnoxv(YA_xU`RRiJKGXFomUK+BrU@`XQj$sD!;)6j5SBRQ zLERw)88#8-upMCjPz;oBzpeRk{`0{3BWC$QAU$6+K*x)H!Dn?)0=)dE;DYG%(vu6vb83{TG{1N+|5Gkk_(FBZrQxO>T^`0XUJ-`cpgUr?%0@4i$0h6_BbBuf z`w^JlTgu~h=zeUZO69maX{j=2_WHSMz|jQi5{?dlpT9zW^3wRrgee9L(u;b=g3g3@m$T9O}A!C@TqHg@rFw^>ojxhUtoRP zqE!`^i%WD9I@k$e>m`9B!^l$1tpEuvYnW_e8-Q?qww?vHiingV&s%?J!{YC{9n_!Y zi54+}ci{L0R2F(-(O7M z{rJnt{vML(R3x6mf~zO~e0eU^^w4%N`4vP*Wk6L&7U< z-9c6^5TYH$9g-^1?^}nXth8DnQ2< zI;a7nXwIu`HY4{d#bm*v9H9cO+fu-EXi-hV43 zV*T3w&l100duWxt6FjOHyBYano+^qb@>Cz|R_DTUf`wMB?dj}LClEKh?c&HFE6DtP zfoP32&(&c-xN-VoB^aeXs@e$fLdl;=3XsZN?UrU|3~NeeewH0G9n+u2(SpOrFO$yw zGG*~4+jh0!(&%jG7y!%DC1OibbzY+;Zv8aVW|DauPAQqZA9BU=I13wT53+u!0|JcW zeT`cE%jl8SP*LL2OV)Qd^36i1(KUj&celrPKVyTqQ^1GJapay!QOH7c#&#s3M6W5G zda&u+sF$&&-CbBz@mtzh9|IppVp6>23Uq*yeGC*&`SCGLze`T)7DPXilT3BG8KsNU ztwl>#-?Z1!K;F5nKuI1qj}*530n{zJMTtEZgLGiM2`CQzke(03GWRHa_)Kh%qvifH zy?l4cpBR~Iz45l76EHO}?Kx4;NEWR2sDoSo!3cuekG5EAI}hQq>d&Q8Z~*KGa4KNzrWX16mllst)}- z;1`yQC1J01{nWh9%4_|lN_+t-!3Tfa6I@601-kSeXS%HZC|c(bI^a^h&+;+b7I$;` zV7B_014jlp}s#JwbTPu$IA6aF%KB=h2rIyl%^LjLl93 zQI}1$vLwBvb{*Rl2?zHB_wBY>mau0X2!a8WD@`)}Z4#+A!_UTC*~0ah+3UO-wXBYi zDTk4VsABzb$3CNtmBppmK-&_?SC+tGBN0WXNlv5ul9?TX_$YwOC-&FFv`;OtQf;jq z?r5?+_D&ph8kZ-s>fA?57$4#H%F92?fC=mTDb;c_?&elkhn*=wp_ca;AG?7G^>Pp@ zOEDm7XR6%~k+s<0_5-%eU#(9;w}G>{Ra*xUl}bn zR8Cfcy`K!-jr7cdX)@Ex&F{egx7U<(225fi?-E?>2hUzx`}g1fQVZHBBZO1$2(w7@ zH(3@=L6P$%u!#saO7c+J9?z!{(_r@)T+ji>QYjHYp@kFxvYJtJo==kl(zV488$4z= z%lJ@UQ9596CjDk++>u_#tj_| zUh^-fD)t_k3>F1kSgK8$1)7W)h8WTkg-tsUbZ&;Wtd3@PTE#4t!x;YVnEP+HU$+=c zmM}gc)_X`i4Z3j&v>;0Ej%|p=I$rO0_z(aNU|UCxy+pV8t&b>AI7YlFF@1>XUHu$vx!i zj2TzJG|S%y8Kc-#i9tFT{nH$CHj5)*C`^CmF>b~;*}K+rlLX^-wi|Uu8N{fGM4<-R zUAHUzq>!gBESG+y;3pAE7RfW09R~n19FvAyjrZYfS37kSC+2{~?C}+ShLjr3q|^53 z)FpxWS~oV6VXyIy2tQ-FM}?07157>AM^FA*;~Pn#+x?2h8F&KXNKa_&UHcof%Vlnu zyyAWZL*PA0t=(86ik$7Aup2X+l&SFvm*cJ5WUb{nV zLwqmu{hUuWWr2J2_w1Tzb(WB<$fux#WNsl(^PE;4*M?z8!-Ia%Tb~`Lcxn%fHsb&8^%xt%w>MiPXZkGyma1L0#zhxvnL5D7g#*F8d{ zCm+BvE|p4Bdh`?|d8~zF$)9l=)x@KyDTD)`*tILL;pl_1>gxt5#7k+xr6G6GK(sz;SBvz*6ink<46)OYT4|r<9#HD(qoQ(N?lXjo(o* zPRXp_O$`chp(r^0S_5?N*wK(y*_P(1lFX?9|EJ_;-(jIi#Orn!xZaExXgmYx)-Mdb zAiDQ(S&N*g>#x(Otyni?b(P=t5$Ubliq zc9+b|#xhLo%?(CLTQ5K{wVWD%GO#C--b&h4gC~YV43j1~Go_`Is2|~VytT5)o49`Y zY|2(eJ@-ivvKwn`77&c1&~TR>;I3SK>LD0KDJjocU+alZ7YolArfAxbJtN}Hmr4CP zRQv#P5Zsg5Tl~yg6lqg<&2dCLz*`ldE?^xV?C#9hyES>{j~_>5FPl}4rIy3@h*mc&MnvdnOr|}O5 zc^AvH#=Oxk6iPTZS$y!0pBeb?#^|%M>l{I2ht4!LkF9v-`cTbStS4*FP(m$+cj|Kv zLV>tQq(;gdKRF6A;15;LmNVwJ@*bB=;8HZ_;{6K$d!GyM8KEZm$17ivDYt3ZJ7!fHR<1^t?m;Lyue(E(Eg%E(AVxwM3t#QFHy<8ZeSsK6UQx&Dl6zZN2Thm-2g^M zvq#TXtj z)FT@wOQgK|etayGpTBNuO+pNAAkO40gKV0u62>2>|mF zzW;?nJc0qI+X=@>rj^J(I2+lb5)>UL!+D-r%fs_Idwn?NRdo6Wu<%>Q!}q$P`LEjv z%#;d4w7fJnp2`oWEiEMjBVzG>=y7nV1XX_$Kb2~znp$^+N!psAMcoINwQglwYIs#2 z_nS<1&WpWDtvpTCq?7GA9bCk3eL3 z?2FA}&s89)&(6M6b?ZG&IVh*W<+#0s9pZi+0FeWTY~Zi#JDTg2bdZxFu1RSpk0U+S z2};5-arRHo2n0L!D3Y-k8ZOq=XkxIR7ZGfl1DDXQseZDm~(VNvp<3f`sWDb0^m?HSD)<7jjw#J};@HY^XIRor49l zwug(aY#!i|O*art6t0uqmKQ9h_)X7ZQvvnia%FwvEuVo#eEyj1uJybhYGUB`J*dg~ zQ8r{Rf6aACEvW&T=2lrCl_lL_ay~?OPrY*A=mI#7?McptY`~+ezl~!Hem7HK_yg?= zK0Ag7@7&%;94!`{1h*X0ZQ((?4L^SYhuQtPS^xmk9j6}PnR$!w&{@*)VA`NhnY}sm zQ_dq};jd3<_bc1iyTT_F231HeRcq8hNiJcE7T}#F=+%p465Wqp=zUWk&XA-5Y5~$a z0Rq#Qb{7gggb_O%S*-zI!aa3Qub=I8?Br3$6Cjd^U)M*bU9_U8*;N5+Ex=pcy!`= zSC14-+#l^ykBC(_bU#a|7C62SBwkF#o^xC@&K%lJBs7OPFrUuXnKM6TkLIMb0#Tn4 zEEb&Wa`#S`4VgAd9x$9$H=b|$W-*fS-Btt_WYSxf=x4JhUZa=zH>qF1p#?2>&@7WpurV{w#1^x zaL3KH+7_Ea?W=nKcB$ui5-7J$#RdliIPe#Ss=FMCt#v2fD@J(qh-j87j@{xhYmZh@ z$c2>3_|({b!i>b)x<~)@aC%_=V45R{b>ASz927QMT`T_RHG18J2|kk7O1oA(Nm(fj zr6mnaKIH6YM-mr%e!zB@32WsI^uf1|Tv-i8AR;CQg$}l{v69aI1aAG+&dSs}R3!nD znDHR{=?!AfP8yi=mRqhi&ndOK%T84NbTq$)1zrUG106EbB@W%*{up{d*cjlc*#94^nQ!xS)lHlI532U_2b z9P`bFk(=5B$rClj+`8S#;ud7o%^$CP{NH)^78N-kNet|dI}|1(W@+x27@!6Mc+0w{Ox}YEl=q^k6{(Hp1hDK32f~!{ zi7ImNvU)h|NEZ*{*t+=aLLG_Z_mZ=aVkb%EHM~G`yWenBVS)eho5`d+y+Ax<&3+1$ zAa^Gl=5ivw@pL6Vya1q>g@j_K=&G6621P8_`2}}qB+onqX$PGpFIABj z2YUXUwXo1zDQWa9-_I=3y(B@ILDr|GEzqrBFxQsVa#$h59bZ#+O-#PN=Z4esFvVZL zJ?KDp*XYFX8xX}rUv)LT5=^`6+SE1}zZL+%d*qZ>f|J+A@Zz=0W2n>carG_mhTjPA zX02e`DGC)o_eIh~Z??>x_&wMk@Hk^SE4wL}5YSUP=B5K$Ia|>3sKHSiX@o-JF6MZf z3i(sNB4JU!0jn|1VP|}t0oBJ0cc{|jjVmaXEEjawdfj>+bKA#bnU5x?DZQ~*78~6& z#_s5%OlvU3R@Cy)h;4L%-I>T|WM|{hQG}M#JTes+Lc>Ae!bF{dHvUZmir>y~c>l^1Dg*WhaM&iX}!W3qX z0~@L!c$GLUbl23g7isybC3)3bnDlk#t@h#K#C<=^QxQghSd0XYFx_3@RJ}2IaxK@; z40fEK7Owa4>vj4o7j6ceWPOjh_;PlHs$M-ITf`hB;NMy}Xt3yyp91EgBYlnZ<|R)| zWQy(*x&;+v7u2ycx0~yWd@mmx`oXwiIAmb>Ky{a;Vbes#EvQjZ$Y=D9fAOSNF3^Sc z?auq%TzTI>0i0KK-!kJD2K>F_$gRhwgI|ncIfgspx;cJddY!MdQ4o?P8qMp5_Cg01 z4lKoE_)@izik(GrVi`Aa^M=lWtz#Y(t`EdfW(wJX=2`}ZyAYdpgH z_zXR7zeESr(>i=(-*|w>mDXr1Y~&N}33#TqZLR2g-jR zs+9b!9~ndveV5!rXK;X!X?EjiPQu&QXLVvV0w~>`nd=D8fX*eH&4P$6l2iJsfDD9) zn-e%H{s=yE!JX|hJ>{61pbwoNl2$-G(&r(;u6rfRtI3N1ohfqM&3e&?oE-6!dJQU%9>aAZ)VFoL{9Rskx!vn%@aFQU)_k)-})r_%@kWfDOc!ZA>DnZbb!yO{?_9vHaPJ;v|6Urkk+pB{SVR^k&}iAi|K>69)%Cg zAPuKbg2*K4GsuaDR)&Pi&c=&WkLn4X-*36OgZ;+3p5RmNjv2Z@O91}1*?8SY2OKwE z9Mng~xZ*7a_#83(UWpQyn=QEflAv|wG(oSJQ&zqwNx<%>z9rf65K9twv3kBf!PEu< zLj-E6e7C#IxlAADHm-Pj;IK}~w$TWaqClI7D-djhJJ{o++VPqSa~XZ#m4rnIv%FuR zj&8Me@)v5*9C#gSc6RLTK5s?J*Tl=JKXkVO@mk<015V&p@gyKfo~Ab8t*188Hyl2a znVfOGx!723A~za2*B;cC}>%{g>d zP%C+B`;BL|=8HpfvQpt}y;-!IO?=$V=>)3M+Axjs4Hha1s5X;8kn(_MEAp|wt3Lkh zh$s-7_Z{q>jGJCon?I-kx!O}}`5JXA{6$Mpr7Pz<0M!M3#RE=b>KjXz1!6K&faU*U zTwE@3-|w|$!arcnurYo)lr*E=y9%z{2EQHX+tn2 z8?SOcpGL-B%h~)-38BMVQ19%yZMX!LWY=%xjEbTwvB>L}9-Ajy@)&*?jkgdQ(8jX3 zs-Qh`FQmgj8#zb<%?xec6P{6_#@i%*ulZTTH96m%oStBm4b|Y*fbf`{;}@$rTO-}h z-o0eAxF{=m0zFZW&Izx8axM*cSG>A^%zs;_1nil0sp~y@=CWgL#-xIyDnm;8n-?48sZmZxWJI3xL=TlXzC z8)@wKrl;l{ZVLIAsT{$$bO|gINCrvz60cxG~Y#o%Z>6k z%1lOF`}hyvd)+(h41KmPg5KwI)0?MRKMnVon3;6p|FQPf0a0#S-&YhA5JVAa5D}1; zMqmIbX_OdXNI|;0K}0}Aq`RbZ=xzn2JBCilp#+AGZ*$K*_rB+xd+zam@B5#~=*;YA z@3q!m>laq}4CDJ4sF?x^*K7h6e2Wlc0k>&U^6w2NBqts$D@%J;d!V%?3$FOc*BXd; zIgc|3=M2>|^5aqcV6_>{GIm42AgLdjut3Z9xqb^^?4s4qKt0BF0p0mVLMBA2h;%A{^!YGv$cZ0~;XYbDI` z?mJswz2xw88woIO81R!|3_b&_2pauN*;J|~K*u|R*SsnwHliS*?TliMKQ;IgC)JJK zS?jp*u}C^qTWt7~r&i;J_(*^0vx7E2wG$5{{B~+E7}}^u(hX+Gumboi*3qdyV3mvq zC#eEbh1~t^Pj-SZDZBZ%Kb{7Q<`J(^1J+lRSBEk^Tv{om!N}ZAOUSGueOh7SJqGGW&6kz+` z38oH~&Fg*24eTGghrDmiajk4^4v;)g9Z_Rh;SYU|v}0RNA)lDdM%I^376lt^Ha{Za zH?rr1XWqZv+?dHXfdsE41^O|ep4RSO1GB#krvczm#$?>oL6Og+qz<|p0FIwJW#D_4 zixap4>F|2INNkH_bZSz8l~w6*KK!l`Iz^H1MYtx)co0N+@ewmRsD#k>H2+FUu;B41 zYeY7W014k7Dx?x0|0+t2Q(3LTy!unT3}6jaWA;<0a-SWeLSs3+Zt#t0hXDU>H}-+$ zOBxUlll66cu4}-Dgg1{ws|d$f`1td64JnZM((RVB68{#xWmb?0ZXO} zak%OSDhS3hZH$*6jC-a@1OW+#fVYBaRQ(8TQoKD#y2b!0Y&A{2bZH4gJ@J7@;vUA; zOV(nAMe<`Fv|U`Rwf(&_4$~oH{7ehuH?=sIw5XA8Jp0(%)yCmfqbMk!2k1~t27+b* z_scJCPZ^okdi|!!3{DT7;PD0Z+ag<%!82pL_&?ng6Jx+Ll+Yx=m#rgN?c+CS>dvo# zg3chmY3Iv+QzFJ172-06P(cvigtLR&;xJnFZcoIKV|>^&bAl|VKkX5I+4DT8!0Al) zWUOe8@X`lJWl?cPA4Z^jwW_%O$nVZnD``JFIpG-Celh=)ihF};oYDA#+8rQ^Jx_+-2YWAXoAz%~>BSoiHQ7iC8tuEmVEQ41_>- z_68I48;QB>Ng^wNR|q|*P!J3t<`UvKuj2Tlan0OMb(8&+91BA0JLvJvjQR^8g$v4K zm&8iwy3$0pG6{}>PUT6=2z)~AbK~>gQrks_Op>+e0e~NpR0Wn6gw$kdDYKV;N?2^I z0D=6a_?EzSNY2P%&`NjvB(vA~DVUh)9I9YB1luSe@>FMzT}%<1m`*%{KmCj~{ek+4 zalz|=Q;`DXD0W`WwX&L&>3&UC3FJ8Q>4Vmz-83k3f4LOx?HiA_0k}Bve8gg$ckp+x zJ2t8iW)1GfZYHgop+Z(Qe~j8P#pE;R)rcG(^`?sZuayLMXa0at?Bv%Zd!+Qc_5UKJJ=V5@Q`o?*q?s$)D z1F>aLXq|-Hw>wee5NOQR1C(AF=LOH~joL$-?d%>E;z&GBQ-OGB0O}CjG-Ad}NXZrD zDi4b8E&hiq?%)Xm8I)_}TeZS@3g-47*oH}md^U>hVf+|qV0 zwgM_SLA3z;NktN|Z{dqYz@1EY=3Yyka7n*fIhm|nb$i-eIc|Rhic zEDpCT(fsZJ=aWYugG5q4`*>KOr-BDshQ2yN>#;pePQ^NNC01-~Cbhnk`JO=(j>w zL?s#xNNxto!QBoh!T?{YP;>wfK+Ra+X^}tA>z0ezPEyVVqu2zfay+nERMq(u3*_lF zcqnaI2cHjivL1vdd&0hN;_}toF2;T8Of3=i@rB9tRj#`1ZC&IV7s_!M~ziGCOB&|*Dtw*l8pL(84-Cvo5!qe|hH zA9M-~lX}!3jliXpcNa#y6RD~l{*iow-y>A+CMmZvNYr#-t@lZDm(zj)r7p(w!?MA~ zIM>LDcpfE2o;J~nk>NxI+;ie%w*JY!g<_fUi$bc)+qy`sb|TM9!?6;AFkYK5$%AyE zTc9-?{k#H>VO>Q-kRkxiQtL3!qJabUuY$UE~xsMz#!iC7ROd; z1we$Xd@3x4s5J*E2u)36X&_Hr=UbfQXhz)k^_Gt+XFMY0>nFPzZTW7H&Zuhu|7BRtHv%kO54ZH4&W9`s1}uPnB-4v2eYP zEZXdizt){>S*=zYgYIt~nv>^M%jeHPr7Z~*mFg=z54T@3zB2(R;1mo3fWVmLk~adr zrLzb^C2|1p*q#^yS+MRHILKOj(LGxP^xx3i)P9|Iy%5G;p_vJ@4_jtnMKK$nrxx_( zAvjWb)@>>QYqHizG;VWX(F8O!HQwrT?Wuv(MGX4Q0F4ld`OX1{S5Oq=BANIAdB>V2 zxkosq6#!sPKGgguC3({w!xo+n#52-cgM?E14xU$9YBhca!vIH-fSTu`1tiLQXR?Fu zKQO}%FS=@ZvZIA<7Gm~J?F7(YFx4M8t>5A@V))zNmByTg@pl+>LDX8;)K zw8}cAx@}cWvR26U`#bZ=!+5U!S?{Y<@aFs1aJRMRmSw{AWpqF~ zl7Sfzg@5poo$;SeInlYIRzG0)``w~kBN!d4;hL9r?2fO*gR`$pT&gjwHw*18^KVLY z0eiw>tI{-ydV->wc#L9B;1)}=YBF~x$$IG%HH}> zzQEpdM$Ee`_Jv^&r4;klw9+Kxx(=hXPu>%59eWv0s<3RFnKlL8i+kNbx#ux!2@sPz z*xReN9?2`c?XR!+Og4*q^<>H(wdD0eN=*hOviNeQz(L2Euh9{jD%^5Y^~nXu)Nlcp zo$)+rptO--lDG}u%q0-@Q87wnTi{;>7F>v9FcEK{A>gjZeM9_{N6vdf;_r$#&ZXLN z@GiWm3LxpxSeyLJNhAMXg6O|I86e(st)^Xmyll0=D4eav65zYeDk>e2_ZkIj^}#%3 zy*(u9s_z|ur?(W5JrjjpNZjlQp-P)EM8HqR3VSz4LFXRy+)qILX(Re30BEvBU#B!T z*cx{o!JD~yOTZXX){)R4NXlaw0XnE^!wYp7@v*{%Iev@)fxW~(6o*)IYyf~KnKsag z@TjFuq=dLetcWsFPdn*QUlL^v2+Y5NK585PmTU-JlJg|-9eg2vw8Qd}0$bM^EG-j9Zt9~>0j`{}!6*|7Z5KG~ICl~0a8=EVzj=Zp2dt&|-B=tnqD z!-FJ$IGNO`6Lj>=r^%d?U%q^~nKEME;C^&^yz)=n*u(Q!YRV| z1C@0$0Il^S`2qii`_Y#?_ao-IcJf$ugPn;5FqS$y)+?d5nQ!SI*9@l(0_f02YAvVv zmIE+4O?}!5D(z{rSm;3Dqq~`r4?HyP^USdI#{%LT%m5@%*cDX;Ks7cN7xMt{#f8g} zFERV8PfTh>O~jPzqiE=OiIMKKoDl>7eO`ab-3It#lQ#IvW{H7MPp=}YJQyFC8bEO5 zJ$JhiBZr_^g!OJ-h@*^Xje0aUhvBQ#&(8Odwf*F@MiU_Z}gS9+nXvM(7ALs7? zwnLCr6j@}lHy8-I!8>pMVX6L%RMC&WeOEeNPj7Ee4f}Ti%bOnxD%D#@i6@YQp=~7g zNa(`F`V@M?-9^gQACzP+E&bj7I1iXJKHY!!ItYC@D4{Iq6EKJV7JT7=<4X!xpY*-csWung%amHLBM4o zm$L#~B&}=HjXl6W#hT!a47|u0E^h6 zhhI=ooAVQw!`fXVt*Y4R`ee>4`B4Sktc?O{sRXjJlH0lRs?!d2dumm{Z+@{?h%OL% zLtttUc)C4gVF|WV^Dr;VP4*kFqs94*t^l|T*xHM*5$goi`^-7gAHB&}^z(V(N!bMG zc`A7&_WLjWHDXIod=RJx1`L?^GkK&xmDvj z0#)Os!zz7w;hNP0yFaWQ4ZqRMI^aaIXpIlkTBj&xbQG8d)L1O{^qCfWpQJC>`Nh;{ z8YPp@mXF&_#bTAX;`&AYb-j%uVrl+W3*eWtvt0E0+}aa^D#jUC=$qAtU2ibvSsHlDYVJH-+PU86xlp=vE3}$9}m&85htxijeJyW^t|d8 z>5vTW(s?DBu9hg|FXB zhtvJ^-dK92^P8C8_mK!4SY^YcX$4@otRf17>kj8-_AN&op1f-Fr{W&h?IPN09T|%y zj(&|mzVW-&EXQT0O;kcpMMd>$Y894(=<82=MpLww;ukQu@Wi^NCt%^vqR{UjP(mjf z9Zj0mAxtn7^LHx`etajZJCsc+N)km6$jC*cEHL$6vjHs6WcQs|00aD5?gDV?B`kJ`=+q z@>)(h1FV@^n_#n_2YCq#oRLBH|GxnV!C)fikJ?>x<_S=9$C z9KH&;=G`IOrrUj@Qz9c-%kMAm5$)ldUNimMDSlySS6zPcjo=Q{Tb}%pjkhQl z2Q80RK7qHtTw%J-WpTH+G3@kcw{}Fw`6aWGi}hMz;W&# zK-kiLCZNW?#{>-6FCOsBxCb(R%3e-+sacMh#A!{+n^O#xyO~s0k|AgW_TNj0Q3JSS z&C*hJQhMyY6jOPEefN>B3jJ^h%(gPbC&8cbTkr+-_k3{UpaITyHX}$>x(XvU0mFL! zATf^5`C(e)K$h8p35tGOifQ0#&*z4FSW{)jTI9WxQZcQree%wk^2Y~jDusAy_b?L5l6Vj=oLqE;>$fN(s6?mj;7NiI zPWw0jFu#m}Bi+Ni0qEq>P?y1?PY?qjvis~USXaS!JyJ{EpzRy<}(;dF>i7# z{9~~}XD58O2(j7#SPGF}fxF`+A-fOk1g^UfiP>}&VrqCtH3)sZ>H47 zw*fdnGomAI@N5+%+7saagUH-9oVuLYz+vT%^(?P?DHuilJxw9c_G}o{iaxcpw7gB& zt(^H70y-yt`9g_|4Ttj_)u54i*-Gen32p~0I-W=6)#n$Xpf(9$F?+FLd?O;z)I8weGVZQtt?z10D5>RPocM$16;Y*+=}H+-M`-toQ%4A9ER21-9>NQOR0N)fXV zFz#y#=}QuL?U@G1{VOgtH_Fmt*N@W6=h1R`F zZQA3&0>P&RR3}OS(`xMOb!9jx2?&$*Z-~a7gWY@Ta9Bq&*V}OlFK|ZrD;|wFAj%{_mQ(h4YOH!EpSVw zns1#4Dug3d%_{lp1B;fRERhdeNCcF4Fx(D#IB~i^Q8-YjL;1vJUMy^5JqXB{M9?L# zGsC7>oLzUjd1Q;$Bi~UDf^nP7au2!ao9NGH7_4R)t@4SM#SDBHRx+kf*w|hGp|$T% z9NmQAsbdXikx&{bH!bPS@{$KiYv_XKhoZZ6?kw|W#?y7xw!7Wrie62Rww`z#+cQp* zkon;{5qW6!Ezf`_L|A}{{AnDLWXkJ8$@v^zJ*WVk~S26Yu#>x8mAgqP63dj*vNpOpJ zutrT2#96nDs-E;@;v~ybnFcYWH;4q4rU?Yc?yy7{Z#f~fDm5(11||UsnzCYG!Ru&| z5ID(Ra3I5Pk%9YR5N1(DIHlL=Q_dI?0h1Zn#*bhjPo9nOPEZW^#PfD;)$LYHx+1$r z^Z_L9rBOX3?MfRcH)4KxurR|oqrrV)JYLbh(4)+1W(3O$0G?&Sa>)ueoBh4?bBDE} z%%3Y=PkA{)YFQ<}*>EBs`}k$uvYo^Px@z2z1J7jN=bBabvP_@r^@Ul+D6h=pF(CG} zoZ^prHtM3aT|?oJ<#n!ZfKG%$iBc9C%bRQ{B{H1o?#n7nTS5JPc;N(F%Fa!7=+>Ug$?Qb(gYkB-G36|!n27Js(U{rgK9 zc_~Oq8HqUzIdwdz>1rLt4`gETvb64dor^%}js-h@^+89WAQVyoRntiDSu@9Eo8w%AzV{~U04X7+zy%Q3UJC=ete~*Jlr{r9jGE;AWHI$;EM?PoZ18Z`me%O7sw&>G29H`}UFcQw z8XyMhRW>rw+Gm{u{lTx1q`F_#Gkns7@C8lHI7Gff>dQep11xvJ)hg7JSprQ#)gFJF ze{JZ$h(J?lT&6=s7(ky?H35s3MIeouMXjjlRcF)`iH~QoOdb*5bAhy5nAa~=^z(%Q z4L>$fIv^mx{~^^&Qg)-1zkO~g-G2H~@|VMt{(ROQn>KTM&*bCZ(4=l~nvH0JsO*8< zGIlHlKt^=}!VQDXpFYJP{|r*iV$&Li$Yw3*hLVZ;usANZ0Lm&KRgC0*2F~9P@-g;v zdbYEmCusjTzrTFlS(?W-^VgK%C8-G7k$;8H|GXBLzcmt`O~=6Jh`FjEj^Eq#w@LrW zary?t$f$^31uUc`x3a0;u z*Uhfq1>S=XXk*%bJzIZ#*Iy&*Pprg4x>;pe*#fGG|6*_a@viu5AW43m6SE@o2<^>( z@d^I)L%tzi!G0_`E7?Y$e~y z{>{bnKdk((|NJ*E@xG`4@#|uLp?B=&tf_vKN_(=$Nuj>;cMX?5MK0Xb-s#y3267}|K(cfU8ue^>r0rgPL%<|KXVPrluMb2P2bu@Z)KAK?Ec!vC{i`|~IA z?_Poz3%pYlnc3JGVE_APw5@ZmO*he<_I4sw!l6#XU0$clv+BM3Y&Mk-Ao{}RWC3DR z>^?U)_bYP7-|W1fJ?-9ubhDig6xn|41=v0O^FZ*k|98*JD_MH;Puy|;+2qtj19xhB z=F(r+YWd^eR9^HZw!RPj@1Fnm48i4tZ&8;0cO9Xh{e1s=-Gke_CfD&j7V)Xae7XM* z_7M1dZRrO1;$`mtNBc;=N%bLcEX1Voqt=7`AMDK0%cChVlggT5Y&HM$AuRffYdE{sH<=%hz4!@@KzkJDT zy0|X5NfQhIe}BRMks-X((Seq2I%yHX&z{r&^tvK+d0^?~Q{VD`u}$vdNr0@-_0X1G z)`z2%4-9L=xXifQ6P^Tu)=@(2Q)}g4mP+8-M@k_UAZZxd?2q4l_9FOR{{JFj_848! zxpbN3N^y34ix1wE1_-|hB@aL0xqLToJ2l~}yKHP@@Ppiz7SR%~iw3(xb0WU~|5Fj7 z2;PrPU)o8gaNowpKRbPBgQfzP+-yK`i zSv^r!FtRIO<61nw?yAxyorO)EZY=%}PI923Dgt!||wZ zKNp$dvfBy2EihhcmRk^PjhG=8V8#vlM;#lY2pnG777Y1eNCqiHJh&arN@Fzd`Fh^Ul6Tp`^%{ijWG>p(KD85osC}I|Cs*jSWVf>C>bW z{vgH2O68`gkcA9gr#OlG&+{y=wiCyT0*DYZHepwgZg2!o4y+m$N7-tP(CK*AOtJh% zxcyLVb*TEotB#1LhTY>d`Vv-xgnr#bw|Udh3_779^EA>h{X@H*Lk_;E(z)5Jg)`=y z-JuKB9M#6q6{9o&P3Rn9jt?{S9Ir?;iEhFl?#akYB zT`x?ZvV)jhiuDU3`Qe`RUp8Tr`u$H^2G{U=-&}t~^^a<#$%OD-dxVrLWJFauV%z_^ z10JBmMgVS605q5W#RKiKfiih?EP}|^q!re)q)h^w;nl|T1l`=Z@(|!`-yw97{~q?J zp9IeCs$;I6Z;ooF2OVrpOC3+LEyqMM{C9W~M z^wCMOKb0omGD|)d2;z_h?*o~)^B%v0$}k$4PG<70k}ip}$EW!>Xp?YQmb=7Tg#Y@> z+i5qcKKk+DI{pSLUD9q+!r%8?lNa>$LCM9sXm7wTmFadeVbyBXgjPyREXR4+mW2B% z7P(;;5j74eS0s|(z6|aCSjkt|TuXxj>iwgkBCiX<)w31R)U(rQQM`+zK%Bq*WsLuy z5xNv~>f31z_ktuk>5^jUX=#_b$YRodsaf!7u1t5PUX$YaN8Mm8j|qAzQGPhAj^9q3 z4`$l3L}%=L?>MCSVS-h9D0Kd)Lt&>8_But}{~Ehdtv< z=EVA(-);v9eyPqdn(22XE6X);4dzHtm_2?VUK*R5WpsMbkLKUvPI9ehv319s$c^)x zbpa%6;Kbok{fR>JAkEj>B*x`1iY-_jS@ zD!5ti{Gk%UD;CbKQ14jSPuy*d5Y%(UMmk58}Du(EzU)qx=1# zl;o%(Ptc)uqrKO~)hQZ zCFZM*CA!lrE(_(6wzIEUjm@3hpLPrE9BY(!^Xk1?@xN&E3%Z`s+?|pB^z1YY!YwgU z@Z>&od~jvF#(@KJIoz!#A{_ z*Ord%p?BFaEas-*adu}?!4@7Xh#MvMcr1Az z@c`X-E8ERMKKZP)#~k#K;Kx%{(Cb1JgIQYYeKEZK2^M_U)FjT>6!m zA6acSNb8=n3qm)m3lli4w8gOQb(m^~>Gis|o;uOf|vg$9=@`?#t2(etyFc#9j>4Y4J<=M-kz$ybxTrk-Qn>!Qtr zZiMsmosswDc~xXoCnZrl*Xj}~@8q{C2q|oYEzdZcvK$^o(8x$g$qG=Jjci5>(V0ho z_KEUaPh)&T*=*fQe^N8zAxKmrEl5goUT)md_DwO{+7bEW?DGlZB9c{=`dn9R?hfzsXLaY@x#$)P1XQ;WMF({L}0hui`j+# z^EF(>i_~w>_-<03wpPmeg|bQ`>4)ea&l@N-f z?2_y>N^4qp{1aD!SsHMI1&&MsbnmS`=Mn##yNI{&6&!}4>=&Kts3W>9{#~hV*$cP# z_EoFgjEDD#U^1r3OzI`ls5L^E+&Z3^wHr<<%C)YM;O&h`D6rk@pD~PW?DIBjVp1Syii=^vc@GAZJ+^OD!JG( z44?S0s`R~7!S8JuX61vPYfr9T#dqW{bl9&j4odUI{9azWhEc156f_$#r(J*XwFX$K z0HJ|dX2!ia(}=D&`~6$7gfyxJ0tHbvHb?1(a`nTJPwX-~=_u<7?^M}ugtUm57Dk3G z9kaejjTpAm!k5U`Xe=h^k(__tZh1WZwj)=$XTxf_^Hgfe^(Y@KL12f+L05zdqj`#g(6&pkfLM%w$m=C3gIOM(R0gBS6FNfx7N@_&{_In(k|Ja^e*k@Q4 zWL~i6!{i7b-@P%IXy!1x@S@x`!TwB#CZN-9jb}}^6DXm#`IhgfSLP1ewp);A)$Ugk z4mlql1a+5hVWY;7T;GkhygdBFcADxrwC0+>(4HFBYLBPB!|blPYR#-cz?OnocXS%a z7oQ8!P2DU+BtV!oXR*JGb+%7Av3a*p)HB2%kR6Cat0cLr+us_RjdU>9Iz8lD=n5P5 zs=vn^HTAZ`qxWFSO;j{;znC-zS!euW#^w7B_L7OW0?A!|=40%zUN(;;t+9rfJ!fZ2 zNj#oyHrcf&2J8bpPmJ5RG=T!f$K#l>!8b>48)HQW>au+@Glg&Ha3DyztbBEkl=&v^ z91jTgI(5#GL=bDJ2H*6eX)0AY7D29BrH%eGjX851QXmv`c4$)cqjHRLgcE zb6>>6-Ghhk8_uSCKnBh*=`|V@XHCttDMjI)@JaBb(Dbo7F(~FlrXK=WK>lVbiR>0ZI$R&UI zOo}VWSz~k3W&qx*RPcPhKnt@(_cNJN|HkX#U{CtSK?|*P?K6qnsJxUG?O;K2JZXXZ zy?bs<^SfoYY|WNyL1hUfb=vdQ2VbGq2psYAOkV^iZT{dVRp?$JT;F(}3SH4CGfq;G zYPZZv3ALP}s3fFrI8d>*_rEi2x%8nC%(Q~!g;1oNp)zsh)=}Z{W7SG0F{UbO*yef2 zJ?^#Exq@wN&eq#R(vzh|Sx5nUCeyLi3O6+W$n`qyTM#$MA{LG5bBCauM=lROjRP7A z&i6Q^m3)OzAx3`C)O@q=qXm-&9Sz^=9Q}vjSVd(7`3A`*Z^kUySVNZi?KJYP%EqRhV!FL-3SMJp3^%IKvqF~>B)Q6q0P z<6b|lZ7`MPkO0J3?r?fC@;I^mwy2)j5dVQ6_u@&UleiI@HjLaTT zdMeHUf`fGIMLwUd%4DfiU+&x9`dRFd2_&ujq+xHz*XTnAMLx!^E}G0^y;%^l&zr=} z8dMlB^ef%Rw3Yh1b+}m4ZTf(Q4$3wEJ!3?b*99AbD>FqHamc-V60&gZ!PmSAPL zBWYi`&N;vC*wDhQe^)SPd8o3PoA*Sy@HQQ6ZosT&T=V*VneA6k^}0t#y7O$iBu0&n z6DMnDJBxvEl+rmFE2*4sLlZr$(8${ZqbHhKgX&>KOi0nNvc$gXpt%X!fg^fLlgEb3 z*?M37@?b$PZp}p|j@bt7$TAsROs!hl!AMJbW+s8Wxr@`>z8Gl3QFQ>{w2srG(gfiQ zzw29TJ3?94J+}H=A3B#%W+i(b_mr=G)3CgSNn@$m_-*e;v`hWuMp^w?PeE!*tF@Bi z7Hf`8_38+}W^(8r$NZW~C!FEqK2aIgeDjpsL}0-Qus>mX7?B;E+#rNZR|%RZs~;)9 z^)pbNe!{l9XDq9?|IlFSG4if+IU|TR?6FlvJV{(m985@Eqa(PuAG<=w}n`vhMK7~lw_{;&WmEqCXg&2_|1&-WW(>3t2 z#$2Cr%}h=4U7AxO)!AN?6}GfUzs}D=g0JhDrM4CgyK+ z$Lp{tT_otDDaYq8N}j*lFHiOEQpFHj{=gQ-HfnKU?{~`GgdN|} zaV-^1j7G&y<+HB)ptlr8YDI6r@cdkVYqMsib~$Emg?mDiXF>~Ejx{~8HO$kip;M)v zR{=qd3aHm~9XOj1Z6;Eo(jCcXkX_VMj6h4s%2C7w_W7~(hqPx_QL5USYu9?pu;z?( zxWceazp1xcOqRVq-S50-HeHb9ybJ%qK3g3(Qe)*`c#z)_Gh$OCKdw*3ygb*hRg$;A zF8IJ{N>J-w*MSPT@-h{r&UYtM&By7%xBR&AaJpC1PY&E)73@0amSS93ZfK5=d#ZPS zp}nRo`z@wbG)w54WuyjwRK*f0H`WF}@4M__Zm#EqMDiB4#a@k4_;>Of9-J){(=qbP zIc>^Dw7PaVeJcpw(67&v(oo8I49OBa%a!3!f+*AB90JJ>y(7y*jO8Qy4HV7d5us^Z zhZS*tZs1vRt?;p6_5JmbFOaM~w&887*per3kHY$b^t$u4oe)nL#x;o7DOCwV2q$o? za0>RauOR7gpOn?5VMgo(#dP=(jN0ihuy4$pWsDa#9*)+jQkKLS_9kg<)-68;Zi*7y z1wADVTLR;?$-84uz1n6=Q?{h^Qdtjkw=$e2r-rdgkn&DbyRQY#S1$5%Tj*W9>~LdR zWW#B@6X3A1qb(CJzD`$`;QdWjp!rhlnea(0z!@J+%Swf72xyVYldw*?K`hECQO(w> z2G45oJ-XsJO)ObEMs>WxbiSR=D(eg#6uH$3&YvDWEhphJnBbm4W|S&5$yOaiN7QiE zA1WGY9v>~i5!X8bZ#lvA8w|6yHO(UtYp|Quf(HNF#nJ~cufF?5Xy4nm8lnR5Tc1Rx z0rcl12qn^NItz{Ag3pb^=O1mo-{jxl{v95<_sz`saJ_~r_UwFT{n&qKww}^mL-Ps1 zq1qVXHKYA@gv1D5J1~G%A$;MXeiP;KlQy-_k8L9NvF@N zMR^I3x$1@`L<2wqp2V#3onkZg*&i%8L-oLLZeH`dRLoV zLGaSu4I45*LzO+Z>By02VmIrq@7-|zX!j#5Uhklm79aMNqlSW}WNrO$ zx(}saC>(X7wMIGsid1kM@jmH-`>d9J=sV>moVXfZXWhear8RXZ-tT1~R& z;~@97yd zKnXSEs*dUF%L-Sd4}Sd~ps|=i7~NiyanX@3HL)0a-Kh49Io~wlbr#2Q)k2DMmqpH< z7%3z#q{bf3r?s4Nohv$La(dMYkO`4P)e{$nv9AX(wIc4&=xY%?W@YoPAt5|mzk{#P z1}b-ZNYdJ5VXsKPZZ>4fI_={4$jG6W`l=IpVb?wU zH~<}Iw((*!uf%-p0pNDM$x##d0vM^5#}xa+%al(b%K6H}vA2?>I!|PDbzBoigh-nwv+d;9|b{Xe?l|MMRKpLuTr<2wqz=MO-s1&@n+{RV<_wxY%O+K_eP zfW#d+la(c)pG&d7HQ4#q#yxU#ePh2K`rH-tk;EBqt$ojn7@fFgwm&ZSpn=td;jTeX z!dKHq{gZpW3=%aqC?Q5U^wu2t#&T1H-I9+{6f^2N9IdiD=0|iXkre}L<||mEWe~w6 z+3Eb>5QN89Pk3E+s>+G*@%3~04R1}}Ek82;xUk$max$y6?U}k1eD1l(`lkr{05KufG1#V#g7~=T$76 zq56v+X%nZf535fqR^{WHV)bdAL6@%cdy$cnee#G6ty9sU0|g&0&TAI!yT`E1VNBIT ztk-e*A)0FZg&gaQ1$(Oar#;G%2@tdBQfBL=<^$-HzzpeXc$OThahls~^OpQ|47}E$ ziKX?KB9o^^4aKnt>tuP*Agd9Hln3psZjHN+6(1<-rD#7MP&SY08Xm`+J*m&^lymq3 zZf+Nw1;fHZ?lW6=ox4~Y)#v~{z8t!GWJ)7bNT0JDQ9&ta?xPa<0ti^Q(Wo%gGJY+u z9jDw(W#)@ApY$D72=u2 z-1R-&CWub(qu}txI>}REzPan`=2m&MYbaBH90V6XD``NK8FWhYZu?eDHB3=fwI@MP z!M+$AyNjKyU*AWDyg|p7>DT+3VOmadhe)J5l06i;$CaT!;JqP?K^?9@*VbC6y|96h^($tL*0<36WWboNraq zBq->$xc%?4CTU4WevCA|qH;%{J#%vgk_PFSwYkokg>YH1xyN+#!r6Sny$!SYYusAI z{#_#We09b`fm2Buw{WDOsKfT6tR6sDOJ8$z1Ux*M=g2q-lH}Nj@PH&JX&A#ztE_c19BnnqkuNz6p%zAKL zv|Lf&Rrc?3UB3bCyl>SIUjUoO;xWO&7daZ~Ls35hMitOLtJ5j%RNdCuC8?1OHtA1w zQ7kni&qLyp^GP8`EEW%n^<_hxysMgV)&7gJIJk5_ zkK?;_lg9K}4xBIRWT|rt+i&IcxI7QVvYt-hu}&Y1iQy_IrDAb2zv6qjl`RrzsIx?a zJ}B^qkt#5$u?!yJb$oc}6Q_N3y2K<6Q!k=|8*n5c?ZQ6X9l-f;7C=u%%$%>5R#8adn@Dc8V$M}vy-|62i^oR_y;6Cf(fykErnJA6 zYHfFf{n*q^g+{rhKcnf-^#lj0mAp>JG&+r|=ljRx>E)M{7c*0HSF4_#p--$Yjd+}+ zop*Q)o3+F?B?vuQGbJke6b#2;+AxjP_nKnry_HJ^GAifG{z)jh7~Id~I$rsmRH$vj zJ^~k6{DCXjtXTp*-vEbwd~mDLqTGnOrZ-tAZ_qL}2O2X}2BCWK{+`2ihHz8L)d~^c zQ~f><3inZ+Ip!R}j;QWcV%eJq30UjM)=<(!T|FeX_^UpEyf2l(j-Rcz5`ZQC$fD#y z-@FGBQ)3ePtuEIYA%eHV2t@ju#W_#4``e{H`(6v(mDef1*;R|hY^NG6+|56oAEZ*O z*%HQ=NBP9fs!oG9Gs#TTzC}xRMyX1Ai=0!+ zTUSbZ(qC@5`sL1LFP&kO)AX6{>{8J#y6h)gL;-mi_5GNt9%WB?)A14)eZ}}Ozkd@F z|G9~NiI5?N-r@5XQbqAa%OdP#_6DoG-+-R^ojQ*49WPh3zKdpnjltpI(jl;#= zSo-kBuw*|Be-DJDWo>D`(IcCSbA5KJ0wICKA?Gg^f!}zWQ5JZ4&QQ_R8+6Q5DoF1SR#5xF2~h(% zV(nzMDLM8zN)wLfSPqS6HD6)T6kZ8E2o4+~S}t^GF^)Om;TV;*hwbhv>O5a4w{JNo ziG67~)ONP%3gU5Djnz~Q&f*WcGQ4%*D_hDX&UcKt8Lt?vq6AKjB2(CK-%IoDG-?RWD?cn{m^ zV0vQll#i|F_xc{*53h|B>X@B*sQI|TJXpB7{mBGY&PD1Gn{2esJjUA^wA<1BxvsmD zuj%UI#LG6Ta>BB`=n!m|l+_xO7?$!u(2ZJbrF5=jZ_H!hRt^l0^IQ2$Ld1@1^W>(F~+@q^)vbuoxmea9XLMjdF-l5 z+?#!u4oh`(<mpe_Pmm{&iH_^-D7r?5!wM{UVU%nep~#Z>c?p%m!$OV;rUse z-lAsG$u+^`y>a)QALvONZ&WSE)((V{W99{8fef{|C z9ZrvXnbE)rYP}_3!S;)?@9=XMLwu56Kwy09M&28bAndp)*xCeIh#RYPHL<;{*L~uB znUX&knsQ}3kq^f7Q^Pf;>yFYjkKQYmLLop}et)R$XtBh4dj0N&t}yxT!fkJY&}bH| zNBFJ5T)o9y03RiYw2PovfIA75-iq8jIBqai06`ASu+EyaWp~coLYqhS&rVKNo-fEm zO4%l-es%BleMnmT$0pz(Hue2Tv}5#7;d%fH7puH)tG$t_^9rb%hY#hcD(LBbamS~b z{a}VDuwVvaIuCW>2kSo;=w4kq4>#Wu>HsFSb)?1jes@;9?SzFJ#TuP|xaVk}iI|m< zTh%`Et2=>}P#4a8D#pCi0^0zl8e+zj#)yJzO6qwb)zweD&5qM4{SOPIBUkY7D$}da9T4 zkoe65U{PZGF(vZ$jQa+ybhOcp0PmI~auVJJzKs(5JyF!l0vKXevsG34Br1D2gDFR}@ z{Hg_TQuMt$de}6S+3mx%>tkV~!{f?X1+3pQv8fv;9tH2mke0}lubt28wSBvxn(mO$ zPQGNH_{?_UMy1DuBhH6oGSmkjUe+JW!(<{xRqeCYyxU?1KDq@CRqx99bM+}W%$RN}nVyJi~ zQL<@L?cf_W>CU9a)dC3tykE7u{K8G`8q^h4!)EQ|my`^p#op z15|#jmj=&SzyAefNCp^ynQdrJyuGOeYp;^PnOWxCm|ae82nQ`NB6d6vD}ltpp4YvS zuImxUl)ObV#FoHTIG-~6#=%j6{i?0J#?R<`rZUemMi@7^JpC#bU!joPFi>u$$`}Lm zHOQql4Y#5-`KJnAw(@V)$<)|Csm*1C7HEzs3uc=nOUG?8Drf5^8oAdMh6%^<@yqc~ z=(A`i=qL!veJA@(u+T(6&WY`t6 zv_9Ui%rmr|f*49${(q!>bwHHq`u36vDu{>(N(eHPfV4C!-QA^>ba#t_ba!_%NH+*b z!_ZyQ-95zjuzNPnIeWgdXMg_%xlMGiXK@9H zam4JZ*T&8WDb-puhx^8ZTl^IWN*kVQro7S3k@{M!zX9~lg*Ozga!n7!mXZgV4T1*~ zSrT*I4M0}_pcnz`Mp9jLCY<5xgM{BDMZo0XKyrrX z%En=%J*fl*{S-dPy+v8DPzp!0BicH+Z5KaOn8e;e)LB<+gGDLB6sNDK+u73jB_tqY z)*XM@r4jcbrg7!F%h%*XO3Az|SrbbaXEh1ykrS@#NoXUcD$HL^U1m4F1LvYUgc*la z8wKPL|$b-A1J9_HB)=7qHm93dY&YE#_Y5T``{r zdzNtZ6HyxM+e7RAKQhgC%zjKvZZO-Ar5jkSS-JrnVu4rPMAfLsP{Sq5gmSNDBW|X( z0&;gtjEJ9xY>GRK4z#n@hg*<8rX9E;eM|7R>fdNdq>{^i@2W*|Sfy*$P_~d@sSDo_ zTY7I-rd_a~ALhDkGx-B1r7q@CHEO_@Xk9jX1=bsB!BR3H0o->} z9v)sDdgG13dc#+dpCQS8#^-g{VKz^BXyo&g1E5e0#-U~VfXuH0+Hi#ruGx&B3@9rB z#hO-j8khV-dhHwZaTA)F61BvHhP)C!OE)>$g5(Z#*ZT?fFMGXLlZas88EP=0ZahoeCGAn6Sk7|=(&ycWq8|+9kI33T*R20&c+Cb3= zIEQQEoqHJ60eS=qwnh0I5A9`8oI((JeIQPa_NcoxN2wjFYnLEX7M@Z7Ld}|Sbh}4WcbJzvXc={Ll(S^x%D?OMiI(L%KzR<&nDmcnjjp4`@Wfu$mrwN@ zI0}7l($}o)!)wD)e8ik2{Xl2q(Y4cO#o3O6Cyb6IQx+9;O2)UynHSHNRzfbF%qp*r zj%q$gYch-8D>2Y*Xj}tP7ZgsRrKVO905~UmL(o~*4Ics-oh}%vj!2ct%->_RwVqK$ zYSV{Zvz=iA2vWfjcYe~lIV8Hmd}RS>IV}e2M1N6<4FeOYor|$Jx zjkdqA-@sdnQY!WShH0r&{<>u1YH0z;L<^`+uF)ms+)r*pzd(#Dp-<2+WuFtJt9zj_ zjBq3x5BMX?O#ur^$kiph>vS(g^Lep1X!bD!Jxs-PUW6S@rjaWkuS??V| zR2$NJ5T6eyETVwZ~+PF3~HGIqr_n6N+ zddYQv5SsTq(TsB1ach}?&->AZvD@Ic=i|8vziDF0QCN6$U?=N_lPbJ?=PcyHMXh1% zdS4V!A+BhYU0tOieDSW~qNTg%3Q4`Rd6{&N;638g?>)2M777lCPUj~XESk(LwXR)_ z@}tU(X@DZw*JVrscC8qtJ6UT%%tluhfpT@{p9ikTqja`6@%Eu;rjJ-ty6+f{L~f3| z{GiKvhps>5*b&N~dbu%_b?v9;kb<_a8w>!y&7f>w$X3`wbXXfc(HY5Kv+T1UEpQ9T zdB2KDY@$@5@)3z}M2qvP&#G^uY{xw+-{@JrH5=YVXHD^kerM$>=bR7xnWC{3MIZf+ zx077iTM`3SuZ*h|s;AiAx)M71RH42X4}O*AyeCeoT=l-;>(nzgE8WOPH0W^~O(!rP zz%CjN<5qvt&xXn};A;wPC>Eae&elRZP2HWt7g zwfFbuG9+-_#D!med#=gfqRJ5!*$Po((QcLKh^I^or%n>h9c?#J*Y3q2WL$8aq-qOH z2o}>z0#J|+x`kNb)@c9zNvP^#N87Th6G;0By=@A7TOBId{r5jA+;Q$h7T~!^_ozDV zX?PM#6+5U`7S4a?q%)M?sb{06ijbD>JqqsD+lKRf1~7HxG2K+C$`fT9t=>bxVYg`p zAYL)^vureaecK6xWP0W>FEO5{`$5(&MIXy*yh!wW%yd6m*XY2@n-z#z)KJ7Y=EI5SJI)w!BUZc!kBa(KOyVc*UaS! z8u^K9c8;b`wuAU0LmqdD%4g6Z6G(CNE`ma*BQuZI>gomM3e~0!UEvAp86IfwjM_Ir zerV?!OkAf4!yu9f_keL5%t)fDLOafp5Y@Lg9$BKd-7E5%sm%nEr^-nLo0qgE7N3W5 zWnmXuO;5)w4Lox&RMfye%-UvBw7pa-iX80_H>rc$0lV!SdqL>bS~>0V5mg#avGelT zsJNDym$6cXfJhV#9j=B_8}Nr$9c51Wfd*FgrNoUT61*$@)RLsp&-SY`fdo6Cgb5v?PT`fFv zQ6j?-L-eh(?-Swh=Re^m`_~0E;yM%w5rNrf&YFk9JI(#qG1%Gid69Ps&V9~E{CS8& zgM+o|E1A_HV$oWz()MMHuL@|BhY6rn%lP{0b7IY2_r&F!pA`ZTyK!xxCs}O%1~?FqA>A7ca+6)a9^2H&2wFM%NCD0hj5xx$!FH)SdO%#zXSt)yDgF6*GOytQiMj4<|yJ2gA;G@cs|79=fQF9^74|w?0;Smo#Z^=Q9RTQ{BKDFD6u=a`YU= zm!iw2hvAqqMI0S(?#agu(zP4OC*?uwI;zK0_%7jxen{|+Z=SbEw%Dn*4!^=pTcRX1 z@eMz`Yqi_5sSxt?i>+?DFOeqK4KOei)H=yEhBnj%!Ez;f>Vpl?SliMzdXotqqj1M* z1EJ&nku9|pz5=nc)(uY?YY4tO!NnWR70yz#0xt)U1d7z;nVNroP?oCzpl*rcqrP7K z+x0G9IWHWC=yRp$v!IXVh0Xk>(JmTyqGDq3(x@Q)TCvU?-f3yS#B8|K=zYF6rKd6r42nkPFv z%ZCJ}%PUDN9C1uKnI4rRil>M{$6lrWgWIH2E~UMsYS;ZrJX=tLefuDX<%v~?9GG*8+!H-vou?c(bUBH zFY@SU!q76&!ASA;QK}b8`sMI<@!c13Wh$P*aV(!3yl&l7o6mim>++$!AA=0?{Ar}7 z>#?Tb@z+hKdqOk)7O|V-TY0T`*kP}875qvPB?&kpY(GDUdND&y$GMSruD_Es0S7SR z&!<`cVQlsj z&-5@7(j;v(k6=B)-sch#B-8WpA0vTw9lB9vfPGno@+_f3!}jz1W-UZrwnA(dZ1V#xGewAw5Eh?bW9F9R?8Vsng zV9vtrC%QistlM8-s=w_O_jCkhnBDmR9fJQ;wgG3hf&?|Y=`zbUf(lVUWSF7drmEW+ zPT6|}pi1HTi4LvG+zE^!{#7QE)Z)5rak@YPrnir&Rotti5en ze(jYXKL6|jP*D!en$8n5cGQ6v=%b5jfRBhm)J6plyB3bq<$tmnXkdjHPrgXPaFIOc z9B)SUA0P}Y3VJsibZJw1_i~k>e=bb|U!;h12TFHFurPz^AXrxpLD`Q0pW*WmXuE7x$OBeFI9+O90G}2jdCdX}FXb;x->} z?<87iZ7;cW;)tk2tRQD?_^wvrNcioU+O%XIhe3qY(K$$!LpM6D;Gj?PA$tv=c$T1) zlRZ|gtWzy5*dzX^4_084Gz4a`RHO~6I}WrVg_|csZydHFmJ0h)>YxgY@yyy$YBozc z*zL;t(DH3&PKRb@*(EUB`c9~*g>;W*#=Pet`Q1mAH0Jdyq*l7^HF-Kagl?6>p0)sp zgJulLA;`+Z4*L8d!CP=tTK#BHGA@1cdifLb^dT|6l3IylgLR#L=HWUwYsbPBTgQo6 z;KV94=jyC60dh0EE!4E*Jya3v8d@sE+i%V563^hhcKUdnHCl#>U{>h}kTHuhkDO}* zSue@bT*q{%XsRa-#`Y6AGQIOLXWV*XJuRKG8g%DRO(0W`&I!}Coxp^7LuJ&6L}ieu z0sb}yf9UdR@}9x=d0)!gfs>rXk&QVY?5nS>ZBe#IF6I8D`|Wc-dpr z&}f4n?EIrDOXoYfo4k@!P2jtw&Ww?jPX@A=*Lo&qqj35(!A||m_ZgR@x2tLGwrexd z-~0EfBsrD7*?Wu~i+OSJ&V?zA)G=57Qn^k04z3DXjv&lLIe*%u)bICo0f{TVX zo5Q^ttYFE*s9dA2I`KNjv_dop4ydSN>7GYF&5dz9aRu|TCo2w*3rtUk6*c4NOjl7z z6dKLJ0=2~CO+^!-SY8%+b~cdl^}t=}eb zyA-@k>vg->69nYO{r8YEt-FC^0pQeHVQ+15pm?8k7TW6UM88>wN{mT(==P1txY>Fl zcw|Aa`kJOU zka{;KIx)3{kV|UFT2PJ1c~0n0Z_d3@fl)(bAstw8&F2*aTe9$=Wot00!XKcPrOkSJ zdJ?VE8!P>&dltJ#*5;f_7NRgOy2KROwwPQ+qrMF7TxLRNlr37O2jKz! zI!1=qcu$#15K$ay37RmWh~+hi&^EllVB@_7E2PykB2YymI-j+P6Yr&W8Ok+i0VV zTQE>eCMDG&p(p-ZEoMu5E!&laeVJrewFlh@Q^d-A0c~lluI0D>p{LX%wHWJ8D(3e& zi=d20j+r}T(E7a)=7*OlpERa-FrO;4J4e1y?sn2LwW9Sduo@sb7VUW00avxWTyqkY zkUe6nvA7(STNgJV)8+Dxj(&Wwa96O&^|;^g`*_Jpkyk3vse&`gq|-VktRAIbyQQoo zNBGL_Cl<7IfVJqO=7;#47#AMCi&Nu(v+Ms8_I?h4!R#LR%!W!Fdiz`3cTzG6^A6mg zK&>H8Q}ap*P0(%z40!?Pzr`<@ceWxfDg;e-6+ z6|l>t>PIvxDjP>ZW}vy+A(dL*Py8NK=jjpznqvomQ5OvjVpww+FT=hJl1I^=SX>d% zz$|!YfFcCdN_90jU5+!Ok06HzzyI#*<9ha%`sMTI&y^x@dCnGP$LpuBHx`fC)?I;= zZ2j4QnrjTJSt2KVv&8ab+D;UZZx(2;N70QGs*~bBx;kA5(jCm~)7&bDS07IpErD`8 z#21HJDCN!P0y<+bEtx#zP~<+HL)zFYokX2A;=j>C76mY?9wl^@(J!tc9~N5g0clL_ zMIdj1HbXYAI|&G3S(a6(49A~xJC9wcoH@p@+vL_Bjl^M{@V`aA|EdCcLSqT)Y-Jh{ z-~aR0^2I31A=jqZEo@j;TjSLmH<@>z57{*@8P3N~KU-gZysUSrFN#n|V4sBCBQZQm zIgDreDMYQ0BzB>%@AFJ5mqGI3tQ$zYop0=8f3k|v1$xKDximL^Muw3_BmB6HWI#Jh zH@+i5m8qAsN;};KK0@P@h(kL#v9mU&*<478R_hpG(0o3mo@T3w?-kF=dJYw~y*{o( z^_?uIdq>w<(LAUyJ+wyg0a7(;Ws*n;)#vwp1g4sV;gZ~VBtTpj4L`!{tBCxpo9n7f zQ+Yw638LcqN`!h%Ewuw``5jgwaAGT6wvNtcQaw29GW2{G8KYdUUL_>GCb8+?;LCfO zX*;lxK0;I0ro;2!hV`K6N@@fgRq{+&VJ{KEqH+}A%d6ULx}*M$54wC0pD7p+rb?h< zJpEAL7j>n{w1@w8x0-cNh?8L#LLzjh?)iP<&qHQj=%#zk&!wqOnM zj*T189qTrzF7$j@plwtL$d$jimYPgTY*#K|R3HRO@?4gxIp~^gJUqzfQCGRuZM!q` zcB0IPj&;i1rFvt-qs(Yj;dRi#5V^;#s(7d;^^r|mz>}=yRoaSamhKFmnp@k(hbMu@ z13`eMwQDE`v*zAu_crrm03!S>o2*`widCxl>PYDq0?pUxyBzt%M^!BU@EMDIy!zV4 zBenUR?olLRM26|bgO0Il)-RWHBe2+Fs-+TCvrJuLUhcgs$-G5q&xb-@pTeM3XsfKf z#eTFH$k8@ta(z+KQ7HTRzsG67I5PF1Pxygs%O1w(sn{?FKK;?b*Y9XlN-3D|>FMdI zingNb5&;FTaDI4j3z1Dkic zgmyD*oYRgPPFu`T$$SZT79caH8vLQ*l#ij8z_`NPKaRvm1;#EFo)2t_vM?UoV0^fD z@1DF>x3JxL-O0?#gYaw~ERf=}#=P(w$<^8Ex@EZO`FtOP(s0ddB}*%&b)xdn|%8=*ocxsR_J)} z3rm9#UE@yCA*A#lXk_00`;|^Q>M_?A9B1aQoeU=O^Xpbu5+@8t{oax}&zeSz_w7A% zPMn!xxG4O(-y{m{irx#;IZa4g`f32{x}?(TZ) z9!h!mu)mS!p}<31ePLa#1j7p$+|tkpd>+njrSXmmxZK3Lc;G-gYaMnv;>_yF-g;v& z;~j7Iicy_&=Dkpjylmui=>-$1d|9&R>7nYd4H1cv`2Gn($JKWh81~L<8Mqmm4XzCD zTc#Ow+O;A~%<~C8DFR+jhsz^YhS<(ha&mGY?!cXcs8W-2*RMHU5t?|@DuKtVlo@8! z2m{&@hM(@1`sg-%N#>_fTVWHqTXo)k#T9o0+X92^`>4CA_t zetLUWdxwOWSPGxTn2JWVA_7o`PHs6JUy#cZ)k^jz_?1j%{}F=zA0sudDO!58kKDtA zv{2PS2FV`}Pd7^>GxU1!seamWDckEg zEd%|Mv7=47ct(~;>L_+e+uojz==gk~cxrRp4D_HU)bHTBk`lRHe09mErdG%gR@87% z&J=U#E8Xb!1pTQhK+f5!8*h_bg8Q=`N{T}w%NAPIW^q?Km`5((sB7#4!rbWUUV9}V zrr~rkX$C9Xs4jMa+X88Vm8DptLeSJ4&2Cev(Y(bplBWpccKz`gR|mAhj=vUm$FWKV zRLSY;)DDzl;xj4f^~SF;v`%}JR~=8hwjBi$e>&ejqJTF#!?!BtS61{V54R>^CJKU4 zVXywRPxrHzA;v^5Hwln&BajS<9(&GK!~?-B4Caxgy}YdC6KD|?6D$7YE!YM13u>Q@ zlURa%KRBc6@7G@%*Czyv({Y_kChS55qKDqq@}WvN?)D7xm759(KG$0piX%|f1Q_*# z<)5F1e>SafCIn22^b=|WczxsL_So0Asi~=UQ<)AjD1~L1e0E?n76m+RXqHA2^=pK`e*OVx?LyO ztnPEVoQDkn(Jrt+` zN2H_V!#@rA`?6XS3)hP)OZ%HDB^s2@ZhpNbJ&8&78<9P!M1TG#|B8sBpzwG|5WSjG zC^6E~ZR*hr8A}s9v3@Jy|2U*jTpuzN)TCm~!gHA|otAmHF$zOYVm2O=$|q*6+dkf! z6b17lxt|u)Q<1v!Esr;q(a_N7yQEJMOhIN9QCtxB}@ z?*-h%sYv)}V%>ocwg2@X{HX)@Pq_xg2LwUR)GSi%_pj-^HgMD3djn6_vhFO7sG5Z+ zYP!fTt$WN5|`$K2lEBF zlPe%et(iXKtf9XZ0RmIkJ=UpYM9c=sD1!dML0Oe@V@gm_gn_iBV~J{JM&tPF!TF{@ zmakQVm4JBGGg^5wGoq-4O+vLzhw+3Yi@)Mp{&U0#Rz+mjP8Bi?mpAQE*`bh zdO&|PoHr^2vQ)7;Q0cFg?=FT7-T+fFFTn&G(kHF|B44E}SIu!PK>D(O?aKM;?7G9{MEvFW za(rw_zSrl^x@@iZyE9G)`G)meK}0+nD0h#UM&XwyvpQg=6{7(^Tw}CtfSMUTs{QWg z7756){h&~2IHjxr7!A!ajDkR#4zi|9IwmH$M2BT%JT}WuXuAiy=E!hTL_ztm zl#`VR)QR%q7J!5E!--yvFDQQ3?6nViQ!(`0^*1^wro9robNy*KELT?uH}M4SO!a>| zZBow2a$4_y^vp!fcC*`W08@TMP_(qR7PGVjR6=PO1#ZpiW&!Y=0FYh{Nl2+;hp@A^ z>d*{@0dGJYrPWl53!ZFvso`*^sjb8|*mWaiFWLi+C+-R`)=LAbbt<0gi!inkybD6N z3;G!P1^$Js73F&8&jfQ|iysKH=^4AabQl6Bl!C+tQg35B8kNHW_3MS8S#jP>jM97n zPgZlgytluW6=?kFm{{Hbs1bD1u9**Pfqg5h?Otj1TZG_tDe4LsLmIBm{XlPWR<|C6 z%XXmpPDd)>&$I>L1EugIcF^uO47_#sF$GZR$e85q=h&`nzDg99kcb2^)({pV%|AH> zTwi95L;`ziiQ(1JsHUkmERjnP#W^t)q(@yq>R`q8ZovQXhL0gzBi1~8ru{w^{s0%3 z{0WP(&|vJ@k7}C@1|q4(Iw0Xv(5d!n0Qt^(V>6Jy(aaQgB2gFMAP{49IhTEK|Gt(& zd}`{0LDY2dByM%uhT4E|YDJ0K+S;MhwSiIt*%;OxbsvBKZ~)Ed1k?t^&0f=b%YEZG z1%Q*{I@hQ{pI~{?O2Mf7kqi)etXfG7v7O5>9UUh~*x3QCoSqXPA4=0>0K}HVZlu~l zGo`?Q?+NJJgtZE?8b*QqvVR+9kO5+-ne1Z4EUg%BZ8p+aqO)b}YBD3iSSF+B zI&syrw_fc)`d98zEd|IsENa#T31imFr>pjWnpYIq!(wO6$RlF2RDwvvM;CIXdF7Af zGb#f+m(Ep$ZWmtz8DgNkv@jsfq1&m0J*Au>g0pvW5{1cqDur_EidG+%$gB+}c$6TW z$t*-t-vJU*)>ez@KY($D0(q65eEC^VOr8rV7SkyT5NAWc(TQ#3t)tCb&G%L4e-sAc zA_mzJ%ZcRd0ly!B1-mNeFCWMt_+@}IQD`f*RPO=@WQ~*~Ys%a_yNyWuSMUR}7caWU zx)b2#XcJS#OGLsVA}q6>6XR6C2i$w@^9U98e>(IJ5fCCUVV%+0gXA*4C5?cWUyk>O z_1a_wbUZUCCx603vT0 zrfnOo!1rd{v9xAY(tnq)*-&3YTT=ik7+bEIh&$>BVE0{~cSffo2GIt!<*RmE?p(`> z*z#*LGqe70UkpTLWI{PFcAJZBHX=*a+1N*7(7+Xn+h7XBI9PkN0zVSxeb92w?RwZR z2moUED|%rFGWy-|EL+0ZQ%ad)csF@Z*JGFDlN@M@Ctiu_A)9uqzce1xim}eqTJ>LB^&;&#OPpod5DcrZ7jq+}joUiil0HhJD9b`~|TjigjyKPaJEPyrQ~p zF)6$L@E|TX(7*05JpnPI1JnxPYd0p8zKll!bgoK(h6yo;=zAdI2h#m?4n=)ju+Cpw zd%m8xcD|qYERT7Ew`q6RJj`PkbOL&m061Q}J5FgX*Idc@9+T5*HU2-45)l z+glYYoxuDP*En$}&w4q`x^AGQV@2;EqNxX*Kz1UPk)SsL9@+URI+mv2p&Jyh*$j^% z9pI7bCYcrRu(2rv)N0}OFg0#OkJRLiv2R!DhnE_S_V(6)P}jUgN2|2!Qw!g&QNaG? zw0##MW@F>mNV-gNzLc;0wBv_$WJK2X{jH0afTx%4;_;6%-M-+V?i#klXtck4Q~x`|X;^mPSrAIKref75RE>wyBx#Dt1knoJ7QDG@yq8bzh(L_Q3tbwgXHw`EQ ziOH~~m)zX1uRx{chyJGIIJM+>I7e16HB~}1M+O1XD5*a&=;2f`>so`Ci8Qm_;P&wX z#F7yMl=ID6Ju@??v97gsg%(#(@9k_)2Ajx6psT*zJ}}M9U~~r!?S`o~dNTr0e^<}l z4FEj>oP^K&Ek4-mGZ75ma&SY*=u66!TiNr{K~8dJ2dP-@-K;)*CUqYXEcR1>>6#eL zm-@B`FHLr9y_y;6(ZG%frQGe7y>73uI*IOKHPTSHxU93hi-jBY+|7(#ciCZYDbjM7 zS>B419Q!6G-A0aOzUWc6sxMm`g;n$MJ>1;zb)I~Q`#&-idytU#fnZdkw?k5~kEmuY z;!7IKSVH!kj7)DtJ^LJVKVAC2*&DE?BE7R_X)V! zjifpks_RTo2vxmnXK>J6Zg6-IUFoWxUdsc+l48@+l3yFHu`D0C+Mc@B8O)^EJ2(o} z*Jn~x4wrWn6e&lEpC*?uBxW{5)q|@U&DGcS?+^HIE$^0d7y>H3suLG|bx-}jW7y_6 z#4r1YNe9zXL(%95#UJvn0POOexm>je|EL1@Rf!q)<6FLiUF}?iD*W*5lm1*Aha9;M z&88Fnt{6r%nd(x>LQ}KJ^1=Pf)VF%QJgyv&*~=;ZTm>5)+OEwvc?$gPp_)%)dJx-$A z!>ZE1WL>@aLtB_Q3sG3OLl2#Y=jZ_=rihdQ9ix$&+KgUyV4y^J2R5sSj7*1X0;kKw zqbk>3hN`XQ2nTc4m|t(e|766zneGcNBkSr5>~BK;NlhfO5wx`#+-3+jg8$ZY0v>{N z?u#I25`D-VU;Q7o0n8uSJDxu}vvUf!!nXN4M}Rl^)16g+AHw;q_UKiiB&8<)G49{y zdb}7YGBPsHArN7O!dD^9F#}13E5ipXHU`s~wfX7<2DFT8^#S{5O%n4PdE7A4TVxP+ zX`u4zZanjl?ACFckd%R$!B|mgsfngAt(okz>F&vjgh(j&tl0z{<91k=O1LDe-bN$^ z;GagytTQ;UWSGmPT5ILwYw1vhZZT{fJ&6o?{Wdr2$--1{cr*!&t45yl;lYk*U~J)s zvSaVx<;Jfk4y>Me$_@IQ%dSv8l&phFRiPtfSaq=tuq!##X{tfV-UzFDakciP_IzIg zkhS^(&H(Kwt%mgUhYlwR(|V^dB|fG|*tjlEeT1@k3X~NdK5jHz?}B2^KWMLX)fA9J ze7c<`E!0;KMpUdPncwEsC^=m8_t+Ar9XL1!S!ql^Yu+g&#pIr9ucB7J?2fU= za~L&T*q+K+vDfaEs9GqTU^x3GFu!E;V|<3^+#JfuZBuekYP-9305c7NOCBKJxOdL+#TX~m2JwP;Y8 z5eY!aV-FO%@Ef=|ocp^Qke|+7a#`-{)Y?0&nCvR~ir}6eTJ&(u4IGf*)IG(|0p(4ehIu+sLt9@~{ z^riFRxSF+8rqvn0?xRxQ#ErRck8@PY!iRMdZOk*|tF7;p<>GNWIb;YyH6p~X=f4Yr zUSC{xOUSbb=dELV9DnxZMCRh9B#IiEmwl@2cK1RCv#>!|zfBxoV`V4&;R_U9D&Htb zRP}7OCBSmgJlF7;d{eY z-HvCPKHOo%n7KbmqyJ{N|FPfy@Bg3!NuC`uW`M9$#oWul{PkvoW4n3VK6@3Q!CiDO zKAkILxE+*Ti)p?04vf%QT=hp9$@j7LYLv}-kl)Pdf`N7v1@<7sS_3qU3K`#Bu%ErN zU`0dkbZxRAh-P(GGR)*l_-Y>9$)Z$U8WeNTm!gEn)r2*ag#@2*va6V4OATB*I6Mr# zLdxxQ#_`Yb#J-k~dGdtmh4zkek`W4vN&t0~VEbH34j3bqEQV?Z#+oKb?}e&(jP%`2 zs;lE#Dyxc$yQ=YqvMArT2~Hl4JmyOfA3zt$*GLPmWzQ;GO%|cf`!O4rsya6Xki)wv?tp6{bE9 z>M-+aFRyS0G*Ft(PwjKHBt!0PkPdUH=pstlD74ojHBISdmG-Xut#0a5p9k{2*bI>U zotXQ{j94ebiOABo3|xskW*>ofM4n^n!*2t>cm_Z@g+1*H@nu&uVDixGw z`hj;TmdKD$d2Ie-?m z)v{Z8t7rXFNc@94{4*c$+wZoKtyB51aH56&5Ym4_utUV;8Bxk_Y=H6HiTdtGfOmU3 zKzy=%cjGcsIEhE^BEz7j$Kt#16~p0qtZfp;Oo_-w7qyc7nYivfYg)I9?b~mRr=BaJ zYAQBcU+zuP_bWdrDBgtK&ZC;H_8vQ$${#Yj{{pz(@XtK99WP0>m2`RFp&Zv-+M|OC z=f_YI^_ptUl`XpwmTy7l@nj88!%FnA2NM&wPsjRSDc7*y%$sD?C+w_wDljCBhpTJx zvKU|dt!@D^Bf@2(F6LgZv5@N_85x;RRFtBtny=9>w22D>Pc2pP|Bxdb|y zhbJcw3Gz>x?Ywn{J8$<}Mbd6*f5f!8pGwQ+B+r6}I_r4J6GkAdFI=1dnY7)s11i`Z z-N1Zkuc8w?7U zU`&sEwc^J~j>7M&*n|pUM?c2GA_vt=hPO}yf_zICN7(mRY20fv78Yq&b!V~qvvc=+ zWt?}=tvCAlvZGkRG5rol8(F9%0s@)mdQ9B&r))JwS8yjjo?_YKt$+}{9$9Eag4N@{ z!0h)2m+`ZA=M~15!oxm_3%5CM`AE6a``xF3$HNCD4JQ9HD8TY9?=@{2vAg?QVlpy* ziQMF`iuzY^WE~UCA$r}#4S`)%Vhn4_*lP|)7bwyYF=Jv;Hy+6K7AL8wD;p%qrCPJF zA;r;4)W=7-yNIqNkWpV9Ht6B4aAve-fgN3W0maS~)_-F>Q5uiN4ps?^Z&H`uR|*%k@MY-*S+lkp9vpOW+B3YQU>-a&tC}N-XZZ&;tMLpfMBy~FnG+3WfF_h=HOy!JmI z4hk7yMbTI40>HzBd^**M;O~1YksuPR4Bc#J>F`}52MqsR_&?fovP|< znUYp|MWkQ30J?KzSwtqwml)M7tM=<{XLV^hJN#p4HW#|70FhLcwPN-h-A%G#V???X zrHRp{Ey)rx8~K!3yE@O1O*+$U9G3VBVV$nvGdW`)nDsb|* zL}pwhxvKJYm2*_dg{TzvWn=oe#9G_-v`2Iu)-G?!Fg~q*QfoA-TvPSNL6N+sIY*}a z%{td(#==B3j#mr3GFgMqpEGS#CA7)@1#**r?nm6`P1ZqsH3M_u-V$0K7v9HT-W+FK z7w^)TUkE#2{!v>GIrv4hGvS}#B=v|_m1Qu9-&ZAb=drF zaJ0-RJ6s0A(?R;Z{Ky;X7=+6QVZmt_)9VVrY)DwEp@SvONQf@^{CEgb61 z^N9chs))CjTanSL=})JToJjuG6hjNSx54VwbxCjfQIIct#>|*{Y`2fWyH~%Qv~War zS(4X2pZ7R#Ca5|lQSCU^F zr0X`3N+m3^HZVLEfFrX;nALL=r?m~+aliuIV4#iCoY2LFU=>%aMQUbKgOJ8KeH2_u<(4+-Y5 zaBO6(iv&R(nxc$*=a(#i_s;69__&92C|#q2w@~Q2peMEm z=xmP9d%4T0*yHBwv`0Hm={M5FyOPB3rw!|KjTT}*XD3aeoh;`lMrSd7@FL=k3?t%t zy@pHfDmixi2dZ^2wv5PMN8yR0ON#ArZgX2JX&xz*pGdS?2`sUluG(K(fiInq2`0ob zN@DtlH!h+NQ<%CM9{_#Z#F+Ty^gOrYid8jb(MnO$kg- zI?`!UksoSG;KuwqdJVPzU!C~B=jnfP*Ju%`bcMd@C0Hm2unP$Zy$mYddin?1N2$li zhC_G38i*M!=9QUjX4l#@tC&DwA-P$7xoqImz_4edSCpgbn(8WLv~;>%XFeET;?Cx6 z2eZiW?QP_9ROy(#Q@DKCAx0?7Pq!74Kf1Y)?!8=%!XY#t1{b*VR7bqOUt#d^_g) ziqJ~Jw&4QUE1vBj`dZrYWb$hR@m8nZqAzWih<#}4Y^3A8+PGOMi?4a))fKKP0HU z63Df6b=_-#h$HNb9nwk7^U?t12E&flSHC z_nAihxgVK~2#wdZTbYC9sY%G#*`=+eQe4w%3MRv&%JN7DS+Y1>ogHNzoY(b~zg~QD zNGG`ec$LPi)};y=CFlB6hhG1;Fle1kl^TzI44SE01&hpKv-(YYK!^27TFrsM+y4Yz z_*Y;5`}>j?Z`*&-?-+l<@%t`ByQqidk~7)MK2Z!J-!SnflFREE)-T6F<3y%TfD#Sk zxM#SEPLBaI!ulddKQ8Z5kMQ}*<54lfQ%$7)@vDJHE?3AA8D>9D63Cu2Eo^$xsDzh5 zGhiT-RGY{VLv7Zm11L6i`Ip)fBFZ*AxUQqhjzN`*BBMeiPRJL zL|jzfU2mH~+bXW@R0by3edSwLL4z25uRS$gCWY0roF@uj1a*b)g`DshQ&&u_x1UBI ze^MzE>DU}q?E+`YCra^0CnU+X;Ex9LSB+5W6|zi=cB8v1mF|0AV=((Q&-ao4hkZ7p z+>y(kYfpTHjcxU9WqBFR&1@qwZjfFP>!JOJeaKmZWOGkv95WsD)fJvTE2sTS1q{M3 zA-TgM!o#@;c-t%xi1gL?Q{8h-$Bc}-wbh1R+IOr3W#0Qf<$O#ER*kXEa9~PlddcB* zJBra!#!#AP&y=l4THIwcWU4IPSKYfx$pm!8r|K_DTUyCPd+-qeiz4OTp*7>nEg#cB zkYVcp|0RU+@$D&0aNEQ@^J(Qn`XtNz#TErg$6dH5s8vY2F6%kb5aseC8?j`$Yd4vS}GrIzqUPY zd;B-D$9;`Ptf1_fh`f^M`|;5yb85fI5>s3uvLnqc3?7kdN&Owi18mJV=>|QObtmOd z+&eDW+}0N9)z#Jf))uvfh6XP4E%HBz&<4lwg2~}?NUNxtTD+q~yYYvwYYR5EU#lXF zk$-s3q{dp5Qyb$BoC{`$M$EuO*>(YCdPq{8x%|G&kX&%DGG*`=pvC=WkI6e0h1 z+dSr9B=h{#+(q43d(3G^tI(UsB^zHsUE51LQW4ej+qe5HkepuEOUtzT;pY-Q=A&7x|K8Z2@$#?$!0hwdKz6C>O2_uKIZzJ+)$K6qZ1O*>*V$XO{? zFKMv#d35|t0*3PY$At4*xFq!S{2n%bfA;OvV16w1emeU8ei@|NB?>`~7X> zF&4IyD-Ze}7^9RWhzH>wYem6&Cc~PjB0!c=@_z=mjS8;e3e>e@sJ(){ji4(N3daIw3Z>rX^vP#H*caLU~)~dC_}* z9Rc;yU&;unY?M<1g={lSzyC3R^ms#>%m{J(f-d%4`+Z6k`ArzGJRma{}Ib6x)Dcqs4il z$>FzEpOBkePxZtuV?*&MMI0oWebyDp9~<9o0RN?3z?=LM%|844>a6m)9WLnd{_5QS z;wxef#B+k^R7Nn`8E5-U4~`V?JDlA^8X8yKjo77_d{>_p^bHxmtLpHA2sY_BBPVo3 z0~CSjvX0*P=<*=#Up@}q&%HN1+f85kfBbN8$;H%jLgB08tu31$=`p|^C~>zNXV-I} z6yDPj>Gpn-!APF@CCc9Z%(pyd0|$6R{a8CJw-Xd+%otCaVEpaw?Z=coRvm66g&0c>Z97X+aHYRB!_=a<0T-#^8kXCCc64G&wAq!#knon{_C z_v_~W^&$NEef*Z#_f#&MOJPY>xRjI>3@lXoJKV& zgn59Qal? zV}N2%4q2M^&6$tSzxMur|G7C3S@1H3=K1$ma{f$0?bj$-d56P$ZCjJ8Uitwj2<=WS z74o_F8-)RRT)ldA-hNqz`SZjVYQo~s&0IhdfBf57tDri^t-AOPq5N++{5jCdL9&?1 zGQ=gVZV}1JNn3l8W@~nLK_HzhwU4)-uP;fX1`_2Q=Z@@!?k(48OoDP;d8M>b$uXPK zxqZ!mV_f(JbXyaOrYITf6!v3JMp9qXOs0?fzyHJjdWVlFf5-1MKjRd2IimM|kyK3d z1%`-Y&WBwcLluCaI-ok(6ay&V$u=>oP+eX5CmM#xFf-7X!X~Gq{8v|VUFhJ z<}^V>G4iI~)8ygpmE-H)v8Y5|xdvSpS~3nMt`_K*-r}DfhFe~c-Hr)`7Ngcgww}-y z-yKunn$=zqo%YR!AIC^miP4XB4c7w016&*zU%!u z5D+Z}6ijMDv%F_JlTweKzWlgGsZ(DonG{W|@#~u8UH}=(qGRnOJJk-@+TAwevp-IM zi+g%{uFOxt3B^RvW68(%wc91E?xSQ zb*9lhBX)DFk~h*3<)vg>{S+B|6g9ScAxfEY-8Im83mgIr3~HAvEi#o{mFSS)^8SGV zkt9fHU12(IQGiNZ{k<<2-i3j?RqXZI+DC1zuMYqJR|Tj$qzT(J)j^$3jrc1_-%1b2 z3M;s>)k`ygN=p=u^plaUa%`2^E;4!f;wA0L&_#THoD=`NRK zGH+c%jn)qy^q6dj|YQ+m1O4xNK~pB8U-GOq@0X{Rp7+W=I?rRc)mPr zxX_quI8mfQ#(9}{^t^`Gpj~RS|7?Gs5$6d|0nyYsYEc0xW+bd$!!UVSvr4UnTBTXo zxO604yUvl?-Kriq^G!OKL|bb z_-fVx8FX}HfZt$q*zU*m!E_PwX%M%00Yw3WB zGqM3p%nV&eC^nt$mI7F0qMFOXS4Muv$r;C&d1BbKcL9&lZXjUX`RosDnSszNvl4*} z=sL#8+~o%bD=i_W-PQwWJa0zHC>6N2>&zyK=H4;@wNr20$@^;PQBG6Oa0io(Vw17- z$RxUX?dn^n+{LTYDI2Y9H6~$vmn9tFRS^Af>xWbYKJ#&P{O%f18Tu=!qYY!t{`#`b zXA6xk;E~q-36)R1jkTc9r#C0@dZY!LiC{Z9I%}?HbAV!g_;IKlHV6 zdb#}LN>3RIyaKETV;=;(2LP~~Q&&n#_T)LZOIB{LD5hLEW8{$xe|>g{?bfwV?6#gz z#iBiL-7_CAsM8vce2ht*!{4`-0RZqOLskI7`*-DKvFynt*f-!!*j??~#T?K>*zrEk z*UKQ@lq<*LaOLuM)eqI!F-%rLGkH=$@o3BQ^BZb&?Ot`0)ufS3-&BpwHu1eMj7Jmc zowmyfD^P}SEFUbx%BSlux1`E5+nMe6%dJLLZd5W_@lQFgZ*Ej9mbs`QhA}+6nO{$Z z*J(JX7mqiGL3-+YU`&@2>HJt3nf#ce?lb2%8yon2lY5MG&$L%+YYjPBz8$cl>Rn5e=!U^mMunD^A? z?)IusIws^*S{Bj<2iTcEJ-hp34}i5$I1s3ej+{g%yqba}7II&#Kn_o*p~G+EA4uXM z=LEcseY5%S0w_3lEFJN{iLDY#tpq=@(|?5N;<`I!v$F=2!EFWG-F$bSqdExOly9%0 z+1zI{r6u}pR?)Y$ocsHn$@{D_wvDybNv!6ZL+Pt)(|e4;xaM^-oHo1UO{?ASXPLKG zaY~%V>95!;KNo?#!>-rcge|`Kwex#?(97?=&YS9!KV5^Y2XWpqm|E&R^bV&8iXJy^ zAm(#v5KppSA>wfU9a?<6(j|UzaOLws`zBo$Mi^oERlWf*K=RiiOSzsF67#PH$GYxY**_xbu zGK@MlZi6amz2_#3=-{&7)RD3Ijb>^}i&MT3SaF;l0IAAOHBWCq|t3R9LUMH`aD>Y`QDS+pXro9;DwJnfNyex9Qt=i@(y0nQBad}3w z4Kh%bRtCkCN*T8-=Upl~x$~fshezH>8GdIybBF7SVh=vvTn2td<>Pb9excaKy(6l^ zky1iA#z}CkUMDKEUO>AIqMH;2s(n`~$bwQZX})IdB&ejBly41Hnc7`B6AG7M*zg4l_aN8_%YlbA}COIgR-(HYm$CpU}2F1Gn(kjlWj20Q%MI8ZjGIivJ-#H z$f!bz?p~Mu(8t3g=oVC{3OGm<-9RezvxJd89M~mIt{bU>HrwUvEYNo|d-yx@%gb** z?YOzAFwGEK_i($t$OIyqSXx_NUS6Q!e1?LC#&%U!LYBnsFoarZ%{!j6P3x(utADb^ znzv>>fNiqD-2^moiD+6|pbRe#9?#b>1;k3=uC>tXUSD{9( zs80+W+3FcLc41H+Jp20B4CnvEk^ZlX|25)kiTf!-(AnGQQ)S>k?vw$cke#r}wHnv5 z>j@zmmjL&BUdCshZCYPB9#$>j0A^;qI4s}>#Ct5k6BB9|GU;wkz+grgsnaSfwVk z?jIH&o&cUSXU$KQU*_{>Dzm)6YKot}Y6YXhC77F8h4*p;QtYW>^QbT0th$ZcdOriz zQL|_Pa^LgJBqg&2qL{qrJFyT}U+-fb8hC|+PNk5k2ybKKa(C-O{lhl9_P86jJ1ehh z{!76IrH;!*b_Bf^`3-4o**W65@lbrd>B`2z&`)F7F3fc~Xu^~NP| z!jj(IR4BCv*1Q9dk2wHja1zxBqTU|V8gRE#sn4$ zN~4pIe5<_~j@-D#()zDp$g?@FPHdx_LC?YZCfx9#t-1g|DkdgobE6mT_1%SWDJTG> zZqFL1tX3oIv^;4~Sf|RWnz=7ecNy&FdALDytZJ-i9sd$`Dn4oV=rdkomf&Y_Iwe_~%lx+6X_+TDRj$zZ*w$=|*uo?9NO^4_ot@i~s z8Xs-MF3^=P3i)CN`qm!J!GScIUj;B9>dU!*JCgLu94y@^-g0tW+5|{i4f_);a(f|y6P3Eieu>ZWy z|5o)8o`IF$-Wd~fRnn#}nn<=^((``8>0-BHe#bFWHc`_P1Na-z-<`Q7=j?nnHJbjF zZ*{(}$8rj+twMpp-_&|~KDdwHo7J6sz+AxsK{Pfj9$ykUZKA-PADeR7M-8Fi)h(eg zo3E$a-K?_8D=fEK{q}S{2l#2u+FhSBDUDy1ON!z=%D=t890dG1b%!5I_Rxb ziHd$J_q{y9HH`Bpd0#$KR~;`mKaK*~W7Pt(Ql>Ki+Sm@|sF*t07i!l5o3XDwz>;d} zi-U>;FE-s0ByFy9Mg3!Q?v1nZkCltCuodLXxq6%2QPS$&qVDF{UkK69qCk>Vm&eUj zU%c1QsYb`#T)~|o+vkzituPkj{>}-70)Z5|_Zf4zPP+K2$92EkX5zVPQI-D2+WK>W zBfpS+C?_We?06vHHi3kwvA4CIK=}DNlv4BeL_}Tb04-vY%c%p`4_TzN ze65;x-gR(^PM+L*k1RBePd&7=_01V6(CWBgY^g>a(5!7#Vn~rJm(JrA7h*-PLIBk2; zC$ey@rhnIq$9=<;&W;Ce3kex)C-O-0mzS3rZw(bRs*lkI(X`^&%;^hjnKkkV`v7XQ zCZ(${xrAeLB7g8428Sj+B2eaXi54NNRf7Y$S%t~qO7n0#7hVWk-8}kXh1t{NeU;hS z8=X|%adCd$Jv`7GA(@hru^TXe)tUK(x9BQ7T&Z+wi@%jb40Y1w@@k0s_SSXHc&z&{ zd%f;pZ8~qy*;$*#glCArIpfYOt5ofMaAV^Tuw0-iE@m=Rpgo;`hFBwT zL-Y+;L66n?yN%{VV&ErT-o1ZpYov+y&&|e+v0&pW$_x%sZDZpdA+&!pP37-;{+oyn z4A}v+{}>L{16wD7YIoYMh=ccBw;ps)z)Nka1IY9(aU-$@&`?o-b#T19JR|tw`K(4N zoLmN!IGNx26E-xhLzV*B=1B24t>5&eq(mmxK+AazJLMhL6RXVbkj^1;(xrtbK;k`d z1rV&v$BNvP;6i6L--rR88aS*bX*^>&8`2>Va_KVE)d9ZfsgwI!*+>C&dkmYCrO`O) zXPj=gS4;GrM}uMn0~X|X_Vy>E<2k*4`76K~FC6H_Ta0$V&A0+=1|onAd%o;lj&fHz zUmzg1&OUB=L*52=9G?m7w?Q_r&N}<*D2nWZ4+W1ZECBTmhbu4nCICh{X3NETcJCk zDydL+&glvY)}mMs$DUInGW@t(%eoWICb*p2cp$80{Xq&(Jm;<;EBS7!!naPjC!o(7rT_6}2>J8Ejp$(Y{Zuv|mdQ$L}dhq!O^qQ|x4zYuJ9i)QLl zCNeR88B|9$9&n}mQtIN|f!88})U|0jz{A#JRzY2EG8*;u8Lql-$%jDGh1A*5Cy|Yf z{N;S({9HV{X%crz9`^aWG|qQ7#5DypI?e|^E;AM8d{q}~8oLI$%+R$S#i|mLEJZis zbC9fJVzWB&JBc!&yV>Un887eL_aPi~b}(LM=kr8r z_C4mRyIUmT`Q$PYV~Mivug|iZtW-6$xid-ICG-W$ZYc#1_Iq7_bvG^K?hi z0JBAVSwvi3mK&z%-ml57mnqjJ=VDhwDJsor#)Z5?=8dDa@~L~*hj+Y@5DG;W)2(0W z4RnKY85_OdPjKIa$*$L(;QGA%^4{D=D)4=DXSq;e^zAz>r3V83TliT{DKP(fw!iae zB;)ww-+t+JwTxZKWxboc&lufxF~DP zb}TO-(htP1c+8H+JpD2mw?_kfT@X3$Pg|VvbJaGu01J)8z{DhDH64qHqSKJz=jX3J z%KOUDa4q=0qJHM}SdNO6k+fzee(p%-l4#sQ1!$J1&7JQP#bEW&XOGL)y7Jcn1d>DR z$z=piHmkK>oJhKox(EiHrX*L`T;1X2c7aC}@YnR*umNiPmhVnm*_lQQjUFI$i}&Qy zf+avz*bMY!m|ZU43<@Nvc4eoq%Bcy_#6-N#aOq7iy;7u zaRjYT)aIKfSkA;}Gb~evzQ; zbYU7P7aZf!_Ei74KfA3a_-)dearPqkZq>r*E9HY-wolYCEU&#Ei`Gg?MpGFi7w}0O zEO^{7p9QMl`?l1~=3G}DvmKQ>Z139TBoCKls3T!)%Q259TilL7f;$sQM5WH~>!jmt z=0Hlx>TbnaXM`^So6MyX7ehcjQgw_|STwf2+220O$Fc6WuxW~w#3rqbu&H1XdAC5k+J-{Is6N=hrFl1EasEB)wMo;%-RqWC8MH} zsK2pInX#LLa3LZhV%BYJv=%bZ z*I#KuqR2EI==%!nl$1DacS!GPkqIXQ)n8F}aJg*D?wtgk&v)-laVr?+E zOa$rCeQO`RmH=|equQFj;|%0Jjd%v^#*#jy-Fml_ZI1|~BPm;GPO%MbL7H1yjMjRw zjh0#^)|?ZK6gARMh926QH}C-ElFo-W^|ly{WrumnbX@e@+W)16Mk+m^@L8PDd=($MnLF#TW@vRt3Jq=sp_zvKQ7Ei zt@@Nn=F(WK0Y^G|My(>2vXlW|JILurJNWb`)h_>rr%{T+DP{6f1JCw~X*^(fsad~`T-JPLl zoiv5D&P>sZum8CqJINkuudibm#(lC+Onl@G466TD|NjGw@X)8&zuQSfvVS2$(0&*4 z8-r6EhWBvu2eNG(p~$CHLVb}J>B!Wco|5PyFVNAOPABDsUkHmlM2yHd#&?9#r93)c zOJt9qHLa4O?;>+pcL<7)O-wwTuB(}3gw@6l&-$_da7S(oea^ssL#I=(dDmg?_2{Jb z8-~oP7g7UZVcqFBTZuOgYj|eQgSihwu1}{m!&gsH6$oy_FZ`@*pwFJG&}saVp8EBL zj>Xs&=E$D4*@5q*Tz|ggW_6GXWi#KI;4)BFPzTnG<8NVg6{XHceQ)}n`z%C# zY)%pKT{#&S>+wS`jHB&vQpDk;mLfd(Ere!bN#q@M`hbZD3r?492~7Amg*u@&$1nMA z@ixa{IIFZ(U9QzZUZWs9x7;0aCM@}tCYqtUZ9}NKOL}oqsbrJ1+-B_oxuui-dQ; z-zEFv7A26-?H0}iKALJ152-`!k9UN4AWvY^ESt-}Aj@b72sSmjtw(-HUBCg4f*|?8 zvxOqbeu(o+XGl{NC3@_+8_G)T{)9nYnfPGZh3ua{F{A{6eL&82F2R_B&*0bJev7~V zMx-G_2+W(0Sjx?252^J{#~rbC(yhl8b9r|9x3);Jcj{@?4=b!Uho~k-GfZ6W@jPau zZb@L!7Qz1|+;-wg+WnFY2dGbOVho89aEdH3=s4w`9+#}s42WpQdDw<|xZ2^V(rx!N!e=m&LRu-Ujw`@Sq|A#Qab zS?6?TGJhL6?M64+hmu}wvATMyrhu<{DKL7e#sRRj$i3Ka5J26&|&=z&q@KWXmRs<&s=O1hiuYVFd3BYCu zdkBBo;{ktV;|d_|)f(7`_W`*zb`3f}F@@H_AszAn<^GHRJxK_ihV5>uud9IqUE=Z3 z6z0>shSRpq!NJVY`FbH3%ysjNc3e_Bi^T$=RE%FBC6`Tg$MUoDlks`+mapU61`vvR z3Si57tQHjeuV1fA%>C$k#b7)8Lw_lcf*$@KKk6Sp@bCZfzbBOk@ZIdL1v=%6^0!QM zL0&GX58-O;V5n-}Kf>N3ID7b=@2o#!skc~cCRITSC1}o&!G$lN$bJg{iIM51blh#B z@nQMIsXGsi%JTDOE$+H^C2RCVdU}(q%s&29jy#R8AC9}Hw!RP$+QgENOJppe5C`WYLt!-1Ug!$x?XJWIKx>v~MP zK6CW(DPRf4v`%oZ8`VrFiN4 zzg7$BbDUTyJKTVe1BqPmSiX(C6rK<6OFKP%O)X#<80z=lUQ(1qzEIo6|9fw?uM6-g zx<_JaiDUTOl6K}BJI$t zXZ-OZMf`zaE4I#SYT)aKW^yL_-ChNBs9^(~h1zWd+Do21W`{#VPT7j-q&oG^lHOHv zL3mtoIa&?NK^a3eXW@!J+iea(xzaUJY~I}+d6!|5Ji(5VCIVMj_ZHNoWCtM+n&K$Apb(9dj0lLMGPZ?j4{A&UGw|y_Q;Gspve?4Td zAU))XLs(5!9Zn|&hDz6kKs}39R)ELrtj1}#N6BUTS*{3J*|KOD?O4*Vc$3^8nMedX zRCY6#zAvbi)lkrv?wqX;mPCN9e8n}$t~i`DPR^*5KL})NmjuR$tUR=izPiADj>}87 z6p;N62gQ;nw(6*XrCn$h)UAA#NfPc;cq7ZeapcwBb_N%k7^puSd%zq!p7!hv^rw_| zv!m^BHd`YSp9Y&Z`AzW*Nl8%}=@^KK`!9BOCO1NMNVp=e5iE8=Xniis=Lt4_CrA06IN@H@sC?9r9JXg*8Mw;6VkE>TfTlaM zKR+dUXB1sLM&C^umWisBwdrhid5Os=HHYIH#*PNyGb-@vZLRkBNgH4UJc84 zp1loaWOCrYm?kVlM;7jwbohesl2)hQlYatXy%~)!AunM})RFqtaV@yr)KZ0&9KgUN zjo-sl09*V+Pn60xEIO08K^WW2Z%TD9N7>R zvr{e9rek9A$L2+>;T2oQ5Kkk!wWXk8$y@8wAnmycgy~-ycj2>rIpQfpX%s_ z0<+ZLd$dl`pTP%ZiDuXnrBy>d#KAHtpLy|(gqV25IW3Ac2vA7WEzLvQ0+xG#^{&>y znQRjGX1;w~mt42GHXs^NNlUNMBu1@M0*_tm-a?=?=5sqmVo73R9A_mONoJDf3tz5N z&Q?@R6_&`|x_1-@7fAja64H<;WQ!z%SMLIjc79i)?jZiBt)!2l>k?}#{XKy6*Q=yI zikTE)!nclvd#J7c&wp!0GFDkpkw;X=e7FkZkV-Rr+>>*KrjV+a_yMUkDE#jyw-lU3 zeNh0$-MN7W_sy|V*m+yu4XvI&VdG9SuM>9&r(y}MCI;WN#WyUx%V{$Zs1q?f1@S~& z0TzO4VF*UIWmOz-9MHE8^pptSKz;6>AtEaIjbNI382R zvRitRFhpPgI3)5X7vJr<7x#&>xtKPgzwS7;`1B`#K3*xC?J?+6wE4)VqzjS{f*K}E zJHE)LKZV95^3XEs&V6_=?6?PKk>VF`9z9U}y4DY{I#j}q~kLe&62((W3htODvfq_B# zr{U?FxyDY;P;?A&BRDG7feyyL+D4nsE)H{>FEk^=3C6ycnEH4By6FGMi&}^ui)5NX zs+}OWARK9kC<;HL-N_bNS5$>3){u`8YIsldKtQY2yE9>}q5V7ztSGIU0fiXgiql2O zNk;ydRF3%18ylRL;%0CbQcepV_^K@Cbtf5sC&y0H=VuIO(QrP0#~Z!WCS^|_!cUL& zLoV*vW5&&G96^%&X1_>};ZtTiC>SM`l_LNnY$NSm_bb7#`~{j-o>S4_qZ#Q>*Eb5M za3jQ|QH^L1jNj1iP2!Q_=(`R1#-l%+B1s6sysAwt4l12F-l|tm{#Xngs_QU#IL>3- zFCBZ+a_m0v#0T;io1Pkt=LJP`fl_A5)9G?0|58-4YE!lu`_dUZw*>K}vTA^6 zMHfOW(H68j!^mtp91)AL9i-XVI?+_MJ!|62M*2N%m6*2!=-Ize!OM3w5xeAa$6TOr4kv-ID zG-eObsLkXY0Mz*ha6nm9TT75mKC8yHmJ!H6sz zH&Voay=+%KuGG>XtUjLJo(cRA(jC{U_sTUI#Rx_ej&ib)-ux#UawAObxt`jh78tQnH zxPv;YN)bi`P%dp8?sLQj6W;soZ&l6jpA|BO5TWwg1&5WeWY^H@aqjG_){1ZJ@XC0y zG1&)k{DG!B1>RF(9Ui+ja%9N7ON&93eCG-`*NXzUtN*>)`d2?d`kehCJNXBU)tJsd zzGE}|0fIF)T45MXm88_z4A~Nbt)!vd$syTEHRZd)-vWcfkpmGNxwdh=FQVEwoez$+ z{dr1zBv}OEZl8C?vSb>{EiG}nP3htM^TtK?e&Bt0?$W${eQ|_=hlkg>NsGILaBs`( zWWO~+<**iOUlH{jW}9P+7SJ_aZVDUo1yd%8VEkDl5Ni`;knD1rt?kYGX~+;5od+*Z zln$UqfYhU*FHP*Pab(;HyWfdt<=wDvNo4U3V`(~fN72zZ9IZuy1ymE*6Ct(Zq<^)V z&olBaf2tge-RSQNACS&Fs-ch=e3&%z5W3n;m0^BcBsdA?ASSY44x;h}CJ|DcJ#YPc z4ps94_z=^a@6DDN1%Qdb>{el>F(S86%;nBWOm$t|asvN0%We!O#AHBwuADXq(Ti_y zSTSAlzIa~&F2a|*JhnSoo2lb=PV@4eb9_4EjmjSx%nxhp-eoJQNJdbi%iqCtc)E8*0#B*wj|LO;X2|<^RFIFQ2SW)WTk-3JeyR&sd{n;{PJ9?{pQoCL5 z%aPPNxvtpM8%O(E@nL!m7!-Z6JVME0_>5%ZFr&5qOk*wYOU0Ff@;*-8{A4xpbHJqmQbIK0Tq1PM}}<6dZ|Bdff3 z*DN>yvf!}a|1oeRc%UoYtFB(8>zZ1J!3lstSF=m*b+BwB{R#XHSfXgis&>DEwxq+$m&B%|o-2qHss9m~N zAz-@D@Qo3_x90%oKD}5zNk=--p8U|bz}$Y?r)2|wXzk_D>9%`WraOljKq(_}r_?9Y z6FebKE*1j-Jk|!+85#ILojIC2Yb+)I3vV*V=r zq$AB~{Wt_q0mAgPi=<1(oe4b-?4O$Gqw~n$FsbNcBe%17%05j?fLe$}0Dm%~M=Ieb zgD~y$o&=C3c7S=<8$$FT8msi>e#QJ+_rbC?&cXVU_14H_{!1*!d7F2pg-@gb=ShQ3 zwBcPnn5~dW`{@XO9Z-&@KjYMdIGt?Kc|GDWF88R?oBn0jbzib^Lq5%3(mR~Zo=GeE z;}vwapq)iH9Jy8;M$v2SsXgU19Sb(GyoQWfIw733kOXs6gYmk;lj(0$XxSnMnINy` zkF4tZnrK;#5QX+*BJYh8;7gjm&b&G7891CU2xdRTkQ~WVR|3q5LhN~LD#ek$NQ_=` zxum?Qt9FG^z~`W@n6b(w86ZuvUy^8g@-Uz|o~@zfb;(?DrQ`YjyEN^(OT{dCBD7sI z48I42k|!@T3%jmhauJ0Mw~|^tC?_gHCu;@vOMIob%$1hsuUflbosEgpnH4h+2^gN{;e4`vQAx27=jXK2jCh+* z(Wl+{fD0c7CmYbIq|Wxx?0KLdR&%SOAucXlY>S!h{hQGSgTcXiwhZ^*nQF!Qp7R>s zFUiz^Gq%@Ap^i@G?j8Sg|8X$_naFV+0y4mdE+vqSJ^<-VGJw^-_j0=IlTJ^g2mEUX zaS%mba=dd9LP8L<1_tmmp}xY3uJc;!7|-$Ntd$_2CQH(fLH#kE@TegA=IX4x^&@+O zSTIsLK!}1|^;m+Mypka^QUy&m{XT(TbCk2QlIW(ofbQ<39p(InQ4fTJ9ClFuYqB z5cgz+g7&272_*$9GRF$Teg$!6u@@2hw4s*7A{PTVjIKQS$>kXX`o+=uE8UYntyX9B zs!|ikaKs7a;nbuN6o|3H43G>TW5APj9{!^Y=#qHQAZ4#rZ7snd++vR9T7&VPHa@{c zf3}H8zQQa)gV==2`CQ(Dy6-{xn=ex%U%&;-Ig|UK`-L{Z!?mBIE+&J6a=4vHx{N@G zTjST~Zk}{Q&j2|EtS8#V_R6H%F@DZYlpxfJ=Tlp8i0$DDm03Wl%)?5aw~A(=_IiU( zAjG1VsA0aC{{n!iyv2avGQWrIvwhtg zkcY)(YcThr859x`TzemL*m&GyD0zH=^rN%x%+{tC0;|8mXPtK(SRC_x#RQM$o@{`Q%Sj-{ ztJ82$%I~qSQF(A219HeH^hI+u+$7t& zI{^pRA6GE0TU4k)xJH`rfuf@LmksYbFK&BXut3;#79`E!)>abtb_?oRa${+4?1Jgf zc(R5~-`&xGNY!6KL>P~c%==66qPPc}})e(ZhcI!J7V)!!>}9N~_V% zo__U4X*5C$sNPuwx#IY<^gBkW)^po-CL7WYF040l@u&Im zPrg?CT95>k)Q0S~yj{5W!q;usI{vva&rtb5MhU;C8Qy7qGZ=VG(j1aud(H!Qhx%cV+Z2i1TbXWEfQwoo+_NUj|0I~bTxAN}w`U;qT1wi`Nn53fV6`DK94ncl~>DdymOA>nJ zXOe{qbksQ@Z!_?02z0d}6n?ThQLOdl>p?NbIM3&k=eYFuK~hAOPJ30@v;w|V$i6(E zDfI=dZtL%xUwje&j9n1?Oi6K!s0hfa;j0IDOCwEQI?6{!I?nxl2d{$zc`zQwU(}84e z#X|N>xqW5Pae0oz@i%A;`#dW9er8g30Foz-nXR0iGh+=^Qm_A9?0h&#>Ov`fqtg&OKiv~oyRpF%#mE$~ zHoq1ZV`>QFTK=os{vTcV^Eia1V?W6lzvr2X-vf?x?ssbEOMrmbtJevm8hw8VjUWwy zLnsn@1J{c8+?nxDID`_?XpZSxx_{+^t@#4n>?G)v1+9>R#b7?H$`1}Q1WJ1{fW>7; z%h{$c=Z1$~r<%|TuV%j^A?<^x4U|NRPv)Lfa!>)-Q2o&}M_H5;?c>}N0B0&M*czi{ z-DPJw_H2#zYn|;Hakn`g`%M?myFM)l05aNeFC;WcaH)>u{<!J>Hnrgm;s5JZK}r=M&?y-3bBJq9PdWo^X7__Wmi$&Swim<|wb` z&oWV>P%OdZ&+^Vj7kk#nKKf>Bu5Cif>h@Z_oXJPYI)K}$5vG23>oqIvf65H%^bm?< ze9GmHz!Gnt(`7Qrh+h;onz5Z1PYY6oEBlFFG9e;Bz!DL8bn4b@5YJ14HY5O*i*X_q zA1#S{PWFH)809Jgg5IhHV!+tcFY$;u*f*xI2a`E%Bm+i5+!pw2vl=Z^rQlK#kRj*` zjaYcQGaGP%onW3J7;rr2>QsBe<;^?iWNsWiCCGq-zn9#Tn3;q|z$=YXFm<0Up4akx zMhgUwO_gInbE4>AWkE!qRRn7Rry&Uquu8P&3(gLJQx)vVTtj(~YLzYG{-c-mg*+}P zV0z4&4139Gp>Ygl$QUNETMC+Ud&4Bg-K$t$zpSg;tcqgWEH;=wWhZDh)&p{B#bSLj zwsq6b#BgW4Zm$VsxLz_OiEtxcqb|H}Zu)(VWly~x7Y(TL{*Vqj3T3$goKudP8D}2W zUT)dnA~|vC*)=raBIH}Y(~f$!35F|(N6W8R3!qMW4om(NfSCiM8!W{sq`;~?`(-wp zI{gMFPI|&I@CxCN0{jE2@tUg+Rrvq|i? z@u**>UNo@xtXw#Dz*(|yP6`$4w_%WJmX3!`*aO}1%%xW~@3ln%7@s*SNiWGkcL0>B@}qIe635x-j2I0 zvabNRv19R02$3aR3t-ZA0R@vZ`VB9bNUXkQZVL59zFGFcx^9Uso zka?*-4QJ&4eAx{C>~vimyT;}?%r%_ zH_Exx_H*x3K=&B*6!F7xJ8#;o{)ky7Lipj$!)D=L8E(YSbH9t1dAe7ff>!$C-wmiN zdHXEctDxj$JW7lnCy0o_-Xo--eckS3CqRh_RrTx z)e+||$*kLAWJS1H(>@kf*XJnRGCV_r#-#N(r?@>gxJP!CJU~k^qEO&`@i?fgqLuG1 zFC<#~RZb!ilba{99pcq0AS|XS;p|^QNs=kT+;l>;blI zF6wP1z6UY{oUxQu(Dk<}?_tT#hsCx}+PGgiC%IM^#)-|F;+MpPtD_A|3 zHrF?UWgH0MSh+7KKrEf+rU0KDgT>CuFQfRv*X1@IaPvW4@nRHBHV#al%xY)yh{y@D zb&(!Fz4oTbz90Ig836?$)cvB45n4^Cnz)jaI+9TJp|83+$@${R`;!yrvmCySw``bj=eB2f#iQg zuIpObYp(W4G<40kUa#em=~E#3&Bruu<$B0IQTd1jAKG^o73A-4$>oi>l#N;BKEcYr zbGnt=f;V~3Ys0u>jUGy0p3Pe*iTRysi(E{oYyHfiy>?HHblY)8qnh92T%;`HdnW8B z_`p-O{6N8%jr|*+y+{XOb0)GV=PqM;>R>2=zS{jx7Uf0HmQTO!Vn((_`*+xkxBs}- zyW!WTmu5Pjg9JV1-f7dwQXuUFB=v=W(u_?i!jGFa&Rf11e9|ClF;Ir-Cg_TrNKap} z;9k%N5*V-YuHfmjHj=d>{DPztIY(t;)cQoSb~e}{pT4=PKQ(=QHKe@m^?P#4M4=P~ z`e7ghbfWQTwAy9-k7+u7Pn(&h9GQ$}!&G9BPI?i zG4KjBYPzZG9floG>Rf(XSv@P44(gjL=~? z)9vR=yU1#r<%Z-Uq6e*Rr`#hb#d81|T9c~#_Va$qmVTCLKMThx{!ecnPB-+!aCJzEnk z6ybNdu%%XOk5ZT7olPLffxLZfgVXVdq(E$H;5;bE(-|qeTGMw-V-rgSk=&5QjgZ3%*OzAOEN-%(5PC^ zKj<7k7yNSjrNF;RwEnW+;6LB1k_Zuu=#NDn{riShyawUzXEk#h{3q7Mx-9Z%e7T-a zF;JdB?LIT>Yq_Btc7?U&Q_lQQq^YdFN+AG$*3kqW?eP}?PH(?V;B%(?*wK8(?Qp2J zDzgG`rcmHyGfoQcIA3Y`Q{w`72z2gDLL)R5v6&6ppZ097fu7QWCVt{6?NqzIr4DMa z+QRmy(6UUC=7O7-AW_#8YxphPGhA(RsqDJth~>3Lj{(j@>7|QxBr3aPdw~~+T8X;S z>Q_#Rg%7d5FR#)yBDVVp>==Q$$|pHyS-4MO4_v1YS7|<;xb(~MsaZl|S&uZNV${k$ zqU?IwuXY4XNke83 z3bX_5q<#JuX>S2lb-uO@1A>4kY(PPzK|rJpx=SQQq*D+{K82lkf>7+MY8pSTWxu2Fk*FZeREp>fR^E4@GTVh!Ne32UuYmigIj zIgHbz2q^YN`N<|>Wy0#!#vLQ02S@8By#7I-)6G=RUIY#3X}66Rgo+$lcohpinIL;Q z*Q!Ys7S}qHv>fjwSW)+kki!~%st%HGC4vysUOnOS4n%t;S$cta60Bq7-^~E2gyZkw-rdTg*ZN1dvgP*tcRHeDjuIxDCsAk zM=7yw$gSX?*i3IWj(%Fa7`Nk)3`RnoEVnmMz9bSLmz`$8+`jg{%5u0M0y^~1N2%Yc zh4Kt)eDTHnb1FA9h8fU&5S7>)--2IsuLT~jyH3(IWY#h%QT6zWu}oLtNdB7dNRD0^ zn#T0S>Ybkow0rRlyJY12HIO~B7mtnWu;o^&daP|@KY#uzil>TzY_(E`u!Oe%$5++O z>_nR|x3iNVL*&JJ2U5C{++3T*(bC|b-RlU3aeF0!96FkNkE3(XDie8aM|X4SJzCg` z`6*rq?Y7v8Uz4Q}bg4GCo~+&nT643vmTrh{QQs);AK2M~O?l~pjY^V{+c7^in zn_x+n^cmj*N52^aBVE;l54Cf-9nKrJSG;2&F*_y2>6c)KCnEm!FLIsa$1pw2qQnwP zBT=AGYqr#8o4~&}|yqu{-oO*2wZ!)WIrm<;dtVew{JLgw&2|QafMGr_~?jfhZptic#5)-D6Ip@?Vah_!1`}x4khxC_O)OxdqCuwL1~D84PP`{PdqtyXfGWRGYe zkHe^(I_p+x>JQk+XN(4UbR==qyc3!wh)i=dTZ^MTXO{F>nVvhy+X!@e zd4F@w_g3GZQ4&*V$a`&2RiG-)6BZ_)abZEfB73GkSd$|&n5x`)x)R=E^ymJ1snr5WcpcKFYuZKo@&M2B>Le93_|x5{~%M2!ldmLBjW^2 zK|;>$<_8J$bqu5a(@mjqp9?IJ_up}Emmds9v#1~2X%*efmx8;w&9!KCic|IvM*rVX zs_>UMIxjf6B9}J$?({x$ivEXX1q*Ey7bS|Qu*oxS5!kJPDVGcYMW4LRD($A~arcRD zxrC-mb?}VM=v2Uni0@f{W}IIyfk_!eI3!*}R}@-9_u7}O^nOu~ zs1rW2IFsZ-`Jfao6eVg&o_?=p?ozL&)52-UE?eUVlR#~cE4!{2@uGpm7BS)bG&y?1 zAO*+eyAmkvfkR&`WAuZcPZ_-y(;4YAM|C$S>DFJsbUUK&j6DWu zu|J0Lxw_MI@8#fq(_NNrzq5vmIZA(VFA=8w{_40t?dlE1l_75D|coui|j_^s6bjY~$+?ANPUEbwj zX{kS)GsATp!%;8aWxhLq7$!0Aj1;#$K@Y?DA0YZg{u7RI}Kd|t8k#NNt#0#a~`WeBC~7R{g4JHB-X5JkY?$GhH^$E-UC_ zlYD#(*LA5;d;-7YLoZ~Q$5EZ6Zp(vsuXv<$dp@xvpDWQnUzg#cUlG;FsY1?(l=?@L z9n$}JYWXS}RC}MItnIkky);V7?Pjm7zUPGU1cmvgwpk2)W8O4mb97H0nR<4)eDWnZ z#q5c~;0x)!lB)T1$xhbQ&A3)g_XZo$eDB^_LixpAw{5p50cCi|`5HLeO z@yKF?c|L4d>!Ydud+QwXeP?9l$!u2f6ovR|({a%L@_>>}mhQQ$hUlvRWEBpgt$Ur8 z4iEWaRgGNn{ht_w8XUToQ=WARAzERoOmTz>|_g!6pKADt~diR?EDPy>4 zmXWDfAuIfALITQvv)p%2g=i(w*n>K&rrb`uKLykF=Kso02n{VF;^D58N-44`>J9|n z#7M;6TbnCelJ}C8jVeDjCQAi*)qaxv+5OGAgS`o?PV2om5(G$7y_6`ih$22ex4g8R z`9%d88O`!m>zU-&N0t2%idp^#s~x6_@4hq$6pp+Q&9xlqm)*GCXSZ$%7F6@qTQfgXJEVlTxVZR3kykC= zbbV1FWmZn{PhA5?gd1Elb*NDDd!Emg%8$1=bLiLt3j!{Yv!6S^V_z3e<%P_tDEi8( zQ_^Xf`Qrv;u4|t{#P{fypfRlU52L`|biKL8)q!l&5^P&+jmGK&EC^Qj2;s#HnN2|IF4c`#Nx9WvqL90__#)C^1|bbideZZYLeE z*|C8yn*+4;^KP3fe?`Xr(Z(cK<9lbIDv-<)M=9$%^V~ zX5&yairCMd`!hRpmLDu1O8H;y0KPt^p$hQ@64o0w7rF3;<%P$AL35q~!@|7dVep#B zhO0-J!{T++z?N~zZM86Bt3!n4&5G~2`42ou3Zq@z(CGc~5mmOvbygrn6mAiIQO`-D zrJ&18N~!{8 zhpO2Y2lCE4M=(mJSTtZ->ix2;Hv2nOWppd2bnaTQ;M0!;7{5Jz1O~ z9TB*;`=(mR2jLv%!d$=NAcZ!Sq?ouCfnpZ%q;dNB$>8LG@8<;y1eDNBcqFWdK(b=X zU?QAhg~2lCFr9K2WiLP~fU?`3dL>5a!7Z28oL++WVlm{pC{`0~*BXv1+-|1sU1x%7Lh znq)t$seHLRU=3cxyT_J?pk|5q4 zp8x2JbuM1QbAQaCro|t|peVjkiBRNRbF`cgQ{TY;*$0U=@m{d)WEgS0ZV3@iw5#wadhit%_A4x^(PhIjg*)UlF#D-nlojTUGx`T3o; z*zLGyZ9Ml@2oK9hkfn_N!^iR47WT%!{*)a4y+CoodGb)VFEr>`ddmv)Ki@OgCDB4J z!bIT)e2U#xdW{|oAGV43L#2Ab=Pv`;-pep*4S;h#+Fud(U>YEwWkz{_awPapq?05$ z4~R3fqFHvn|KPA_mvfhjcdX=m;7H}c(|7db_^+}-0vd|C+wVDn>ay@bXGC0JJ%CF) z7_LWO0FI*tW4~OJMHjdiDDX7%3@nd-6^}K?UF5}O2hk&F4kDj(RSf;4YO)P`O)VnE z?@%&wd4IVAgkN@Jj(4%9*vswa+vzOa|rbC4++U;nhK&<2Vp4F^0(M;BDV8kTRX?ED*3HMK-7$=fgbt0i0*S2Wyb zqDA3O;XSG=g+ogiIt17k-S6WiBb*(@3ND?*59N}ovfY9z;(TQLE+SETt7}O*`Yvg@Z6&by5;(qk0U1>=KyamPI6$TPh6j~ys zE|A=$!`@qs_y$-9_I&&sEsxH29u%_YtH#B0;Tgu@F?#$}`FD4eH%45L9_^PzXjny) zghaGT-x+R-v^N}6(9xDqi3^6b(R&`u@-VP|S31dNBMsb92n-2%WZ!X~anYqI?OG&b z1qWdCb~7eXu!C&JcAnJu9)Q~phYWzJ{oWM9Hh zD}T)=d+pelF#DN!l!N_)v#lc!hvv7BB#cu-m&8rVs>!DG1n*ITZQ>*23Se=PSI}Op z%&?&tu$^m-qv9dRQ+ycVelW@-F0%%_lOy1ryvJ;%Xm4G_5E5aYO?LTK^^P=Qz<#mZ z70)GeX}?s2*}3o6dWWvSG!J&&dD?SHlyoiXtX=VxcfNlJdUw02gI0&dzw`L`AYUsg z&`2WSb$>kF@#!+uVaBPjTSs(=?O*FO)!q5$t%w3isL)%uxOTobQvI`_QuHBi_5DxE zsV|#Uo8@ds++}Y=3?(wDMX)rY8G2A2Q3rI=u&Wo?X9O`d2_$5%peO4O zEMtgGmDsKi1~0mBt`ihD$3>)I0;4IDax<5g8)PoB8_u*+Bm3!jt0P5U*Jr$XP58!V ziG!mR1`FxI1|!ShP3ZpavKrrLZu8rmk70nf-6WIK&#?D#BpT_x zOUEZ_IBsAsM#yrG{CKYV{lKg>|Gv~JiL#oO+C)>HY7 zf~0~3fAU#8G;$w$DzS zzx00Tc`$F}xo#P{&bwJv|E7wh8O2WeqEotcvuU^7?9@{pk67Xv3PwoJXcGKq_@2h6 zK%9o6DCpup;uynZ7R=S%5alKq9n5w z7x+TyH@7?NN4Lt2s}Ig$$nf4DmH^hL*#{)bdBfFL&LFL23u&0L z8!*sEyf*SWDbx^nF+@6FD>Q@jQhydhS5mgKdQ^~oK1KHkLsy5o;Gl^bZ)T##&+LQV z-BlbT%!<9DRXP>g5 z1vLmgH161EqQW^tvDvBIR%~r6lxUy7EZGGGiWct$s%-S&HLv4M?17c{X<%T>@vJN5 zlPXg}47jhntQ6t48hu~ILHm4nhwsAN$zw%W`AvjF=ImDz!C)@r31qAFBkr%91a0Xg2b{)yEZ~ zk(cyVX&L0vi)h~$ey=kwt8I)J$~P|~dgA_(`^Mnvk9{Veh6$&65_^1qriGqJC4xs* ziVX5aB)`rM?MLS;U0aIyk&c3DlTSh?^h7tVx5~_=X?g?_^4iaZyu&+7(O))O~ zo6HDwv=}M60T_}CsU&atV_|aw#J#GWZbag4x12=Z?T)aw@hrD&*?=nXG$^Ngd0hb0 z*ru?loXno2ETc4W0GOSIN{m;}W+KD-7_Wrg-<;Y+HkBj;>rH#8$TB`tw@ey|9Gqxy z^oxdzoMm%_6u0Q{g$e%2!@2g=%gYYyr{2)6@vL%Rz?KQ=NJgxb3MW}^9GerMB-KAazPi#w%!m8Bh!-ZpLV5GHNLj~R!N`0Fs*`^6?X_WH^*o^ z1)kjU@g;I@nYdfV1ui|&WR>wN9rqFQfPdH8>Q9U^$h}=O(a@1Ac|1kd8qL97QBzoe zZBagbsOis1sJI=AO+bD6&B=gZ&OWKaqwzkC2IfP%Brkc(DO-g}Lo3~|P+_+DsMN_1 z^Z~{V->Q18PcM{ao<5ytmQNHE!!I1?DCV)8MO>#Aau-cg^w2*q%p;%dbXZ{D9@c7I zb)b{dcYGQuprz3s^hvI;rAUkzutIEckZ7{iI4gi8kd0}kZ^8H zXy2jJP@GlgE_#!`%cNAIKg)Zj$8;2<@9viAmd)%g5;Lv&2I6L4Ky2)+Bh@*sN--e( z;JzMx#G$OLj5;|q0(Tcu30Uq$-=F#!*O4qNvROZ^7hL0L+xdRJf{F)wVkLsdS}#(5 z$L;L&WG3D^O@JghAg5`SoH&YF?k(+>w8j@#Pk!}z(P>?E68imAfwKMD z_|2F3=M%2#QmWBhcDQ0cyj*QKc0Y2w*kqB{-_=Gv54*&DN;Utf0?QHXVmlmSTeMY} zDv#ZaBqC&em#tl#l*-K6nq1}WS%bwO&Gu-A2lF8kta_s9Cwr)LbyilzW*jDKv1;ap zK1^_U^`47Z*jYrDjisA5X=_7=Ns1E`DbSiMqV)2TTSJ#TY}k(q@kNKTyw#H58PQ_u zfBeu{I=Y=fROZjpl|28Vw&o?;d>fP7!M=z8Hw}vrQvV^FM?ubOqpU4&+4qJkRMHij ze-KI(hNB@PNjbikp{)&4F{J8|r+7(Jqfh!yO<8vBj0=?=FCBSTCVBC6r4PKUdS6fY zc&SVvRG<>OE!Ok+@%IrI&x3<|H7up)*3j!{E>qr#bUV&O@}Rde(IzydorkvR3!e30 zu3p(2S|7eS`wpR;Zpy4zHWXG~IE{%En>^z#V?=^5;A7%Yig26q4wKiz^>fTQ4U+%a9Q}|C38@QXA zAST;s(PpzzC%8Vbefw#vsYjx2ho1(1ZLg?!?))Jf`-}h=PEc_HqY(!8)bll7P z8K;eS;he=yFG*amh){MFD6}4b@0X{Twu>vCQ_O>?8k6ZUXQZO&rbJ2q3_@l^3T}33{3%UJsHDcaiO7U zn12QRKMy!_Fei%89@An_P_8K2{-~qVV-=M2#!KR`9S2hl8E|LaO_B4c^_mA2fH>4e z9&FWq!RLp?cwwAqpltXVyTky7K=J|)uBYWx>Rid=1z8z^hZHs6wXXsv#=ql3O-GlzkJP{p8~1FjoMJH2E+rcU)Rk z=4xMfeN8;I-o7#DHvO7QuSqveUb9eCC_Q@%@T|!7s^rNp&+k!T#@kRTWW%`Ua^TV(O2j@+iM`1O4z}laXzXTWG!o*6QyVah=qK<#a`0l; z>hqF4$3qC#-iHZxilp^CBO|p(jY(UxB-bBa0Id9n@+pl`vE0P`O{3>fvP{l!ZzhaA zZl-0_i180w5I9uaTe6sO}t4BNza=Xxfk=^2edNCo%Yu%s%KNZT4HVG0ILD4 z6ZgfekmBPG;iE^4@>kbabECsv^LBFR(+=970JL}Luu>F!l;nBHoYO(SCv3#N!qOVI z9ciacv9iD32Ey6C{9pM?amTvFW=*f%o3 zqb|-scs{NOETU@Y!u#hA_H5(%00;ItFUT<{6|}dlVvi~nPh>g&E1=Vl^Z9H%^2|Rd zSLw-rj^O_&E`4_HPk5aj^!UX{uL|p6t_S=v2y(E&^&L{)x@45Fd?PkLsBzJtwg;(xM>we zC!#c8GFPKZT&p(V?e@W9$S!+`sz;;xqVj?q$0b4>bRjI(f*1Ky;7uK!G3lXq6fD(@ zXcsrVz(nANw)}=cPD>NvD{ktx&`HbEA!+lv>erM#+hDoPWcg40LxDBE5K6kAJ?Tni zefpUiGUaOfc9MDr%Od+N?s3h{)xF>W2S+KaORkExg z6U0!O1b&n`h1KoZ_>-uc|$MNWd!@Fae*qVkX?98cbxn z(MWDxfL7x%m|$)EwE9P$(dPEl(fazFG#UnXE8z;dJ^0BxIWOfWQ(vfBAGDy=n2c6` z+2W0UBG_&+Im%-$l~?8yXweGTfoH%9JIe z^`Rl+wC8W%_QSbLsA`*(h+``F;=i5G{y0zG$jgStesNrWJ1k(kI$TKG%jfjxuhSqf zur6HgZcuTXRIKsEauhiplj0BO`o{WuS#+J$2dGU8Uy1G7WV81aDhLT}6(yTOv8+F& zeG!OVe>zUMbmBm1zcyMro9v~=tSm+1u>ekw4}ITJs^vYd(~H2STKLd|Lg0l@f6*_; z=Ezh$-<_R}3Ahi&GZYOIxqHnlb=R}bDn6ksTVPfNYWy4uVRxA#%aLqSG5RwQ;XXXD zO{Ts`Wu-#NKMMIa497f-Hhw+9W2#N)oIh{_F}d(rc>y2ISQ#thX&A=|Gv=s2xe)i znk=s2m1{6gymAqL#}Lk>uw&PNh9f(htA6KT2w$MBkFIh~B$Fee1Npm+1B8m_(>s^-Rbl^xCUTuvU zAUkH##QpdQxsQ@nSe4GIVTau^p5hs+b}s_Pq*ejxmBPM^k;GJA$?Kf@bx)~(3Gi|k zRj1v(pP2uA>3pLK@O{|U{5Ck5?P~!a*epkdDePJ}Y64bBkBv{UaR)Ooz~rK!ocQN* z+v)Sk#uaK?NOXZCV7)Dytk!>P5~Lj9QqR^|BIA+OZ}K1BccKW5;Io_QG}?Ad^gT$J z&*Erh@(~5sD(V|~6EX9G0>9)n>HoRL4i{}EtUe>9x$$@^Q8w1)ZW~9G9G`Drw~Q}s*FBX;3fd{ba^C)`OTpOqv* zDM?TY_j7Hg6)??3er8`+GydHG&Ho~511!!z3sf;OZ1LOG&Am>Li%W1{dj zgJ!Hby-B*a%6oe_dspr6OG2c^QTf>@$5b0bo7f7Dc)?=EGJ?Wx4?<3S(9uZ7evZ#fC={lUiJnX~_h zwY3@BOJRt+g6f|m+{{sv_xejuhqt?rf0tg#EkS((e+c-#Bw088G5{9-0iP|8c3K6K%v)!xhFt zj64&idQ?_0@I_l40xyig5*SkHoPI{o2lReb)kX!bQOln`XfQv}w(M*%Zcos(sycCE zNXldeyQ#~8%z4^^dWak1oakMK8ERSA>FDSdR@0*|;9b7)e#l!1Hc^jk@sQGQFYAM# z&xM#K;L{{$>5bX7WNmdygrWLp2P%qaeOKdHol5tin{_yUYz}YJ;{1h$o@-KYY{+Fw zb1%lAVFs1-<>f6p=Tc}sz@-3+^(pvCshR*=N6U6VCPz<6LsQ@L;KlgImSzM0 zKW+hYw9`Mb1$n)bgwP;VBWY~A(9 zJnM=qN*t7N!=d2r#{$b9Evls$+O8Ev!gKKj&D*l<_cI~YG(2pmPl4YpHmE5%qW-2D5Ex4r{=t;a)18#Z(lHrf#={= zc8nKv4MEI436_5@t?MG_CG^7YoeV_)DJs9ar=VHx5}}={u~Oi;OhN|i-DCcJbuvza z_XU;%dXP24f=7C~s3u%`>l(MkXI(;N5zHWRZi{e$eEHt7z?h{sLtSYiGf*93TAsk# z3BC2Z=Onz~FD0}o0M^%-!LsyWRo?EW2Oq@0S&i{)7Mgt;rWGDOdj%e0|DxgDqVd_D z?^slpC^*jp36XWId4}ifBiI8%%<)J`F$0O(?kdIj&Axy0x*EKTjKNJ|Xztlv2wS6S#b;6c$PLCTo*b{=QSPSDp!oU-;gF8I&@S~)}wZ1eu(>O2uh0aPk%n%Mp=69 zinKb&%bTdbt^1u8>~m3W;wRJ5kGlgpsib@d^s;x}_p`)&TsLuNS{+8RonQ;HaNv8^ zP{-{F$YqfFI|w@#kTP!tKfpB}sN zxo&*~>n#!7S7f#j3|9CF$hgBl2{B|!hcp6#&i^f49NBqSFP}=hS1h!H3H0bnRT?66lzoH-=m(@f3}X6>!#LGlX}V z*u;JUT%G8WENh9jCr_}J<=r|yG>%bWGE?I@ry|6xX?yA$rCdD0iOfg`ZYR(nM_tYz315aEe z@W%@Jlw=^bb?Gd)+xt~_QFWq}4-2dzxb2|`Z&$p}RbbAfweyt2&vUArh(Y!Tl<@`C zU7n*<0S4I+Evt3$*_N$vy$3?lYP3X|+hWiF|6rGDf%i=_xglS9XSR?51;X;}O;uS4 zSejK%z!9U*4p=VqUBJHxoHzy+_3X=mg1Qo2DNe&5D1FasCj&B<_R+jd`gUzw5_ zrDwb4H$WB;X90+C}#~4p?8NHp?fw2DC>xu>e;L?wR(v6o?|e0fp=|W z#HvgI@7gMM4etE(wr@v0;PNX0WK92wWo<-?j-KaYD)`JsN0&}gBsO|}Ua}-hr+h(J zX+PhLL=GJT-kwT_G+$Jt?r5X5DN%b2BIZ`OXf94`{C*kOn8?C7H*{Bcc!%A! zlalA`$b}y!zJFfXjwq|wJEu_ny{7K?(W41{d4LHbv<6i+v&F!kGYM%lNl&k6&+ z+OXMl#7+be9|lQXntN_Ufw-EIxxDM^#xzQ_iatM;#69g_vPr_weGId+%m!M=`bwewkf^Sp}K(U|FI;rMEc@&garnO-QNPxBLnmPKBUCg#UtzW zaUrwR4=)a_No%aacK>7_V4_OaNt0db#ywF$7N?UxWCziW2A>SQk-CdG|+NPC?Ne3o&Xr2~CdgM+&UU@vOzGpu2asy}6I z%;N+H^nF}Ig)&aUsYf~z#3?B$J01bcp}=%d#JL|)BtY$(T1r|?g3$q3P25t!0`Xed zXS`P6su)ok{aGCUOS?d0gMlajgJ$KaqN{~uGTf_4IGf~>Hcs~{tIg-geoE268BP?H zq~0+=N_65JHM2y%tdNWee%aY)=idn5Yf}FG5sMyPuXgpE7>M3psgB$HqiFrxwuKqB zERB`Q6za;)APqnHb9?6!d(&`ynFQU{_gBF&Q@Y4ETIo64v>O6oV5f>{md?#W{&l7QTaA$lhSjYT7QHdpM0K>buJ5 zCjN&ZctjRKfVuU4Q87zDky7KMcgeqn5#mA)+c}jy&HCy#^v@JiEkK+x_%WsVZt}i;LuDm=tteJ zPA_|^wG62Cro-`V_V1uisg82YQ8n`YT0sRVUyhKCzNu*|c~dRZ^wVUZDJjDg+S(Ny zerVU35kh~sc>Ymj!Cp(+oTzkO>cxd{gIGHzk*uDWUVHA)I1}%Qf<~=lTC_QXFmrEn zKXYIB$G^apeLtQ3KV(@yV!`0qxoR!Jr0n)%{J*Z$|2RHGrRmX9B_lRMxGh3s{@kn{ zM1ik{zhbL_iGn0SXM(x2m7IgIXA%yV9+0XFa5`{GZElWYY|pl&>jU}}1u<~XO!x<+ z5)ilEq21$Gj}fQv3nvJ5k}fwU_5L6nSh=xv|33nCM@b2BZ@R9?$(& ziZb21cW?P?c#(iN+~AEW#99d&03zv<42)2L^L^I!Q63?zKpV2Zs*>OK*PAf3QB`)G zYL-QETn2rPzc&q{H1IhS_>;8!w<9rJ^f@D%N~HW{M#eoNBV|t@QS_$lvwky{D<4rZ zT%LCQvuC?cj}}2ZiVau#O^7|Pw?C2lyob8^!hHvqe@xclULjc8ggH5$9^#^SHMSUC zy%(dwIC>sc<6V<>ihw#jK!D}BJVFU~HRH~tXjt!{uf3?mkbI1=|3D2dN)$Lkvv(Vv zj#H^tQpt6WqD&M&+j|^CXN8F9K1%jBvZ$47dU%05soqUb>2%#Y#}?CXABGxO`{WoD zmu%aLt&!RKN&cVC99t&UaPyLj(VRtbZV?l`c`krNx|hYLF7r8=D1@|W4_0|qjhOVY zSJ!(iuO>++@`fH=q3ZzQmp1+zkW_T}>PPuZ6hm1(eBS{aQ>KHQ#_HoU2ulCF%syU3KW3DO zb=eo9oEh6($LuwoZ`XBj)%VTuz9}#HnD-QfYck2Wk4Z7j5zojX=rGlw)s2Q(=+IY6 zUshsZQyZlSBUe+nRC1vMd6=bjmreSPmE&A&)w<2{Hc5yEqG^A1WIiia|Ddw|W#!?7 z`s~;69Dd&#H^?ToTh1yrDI zPO>XkK80;l{{c+=w@>XqfBs9vZ62LUsw-eqMC$oE!fZi&LHFxJEam#__jG|+Vy_-b zW`2JE*`1DLMOGbwh4-FD^%>zJI03pr_zY)cMOj6QuAPr0(lx22T@i`wFKaF1`1`5j zFHLjTg1B#Ixzp?mH(YAQBTMZ6g z!X44(7!D3+T|!31t0LSB&+ls@@WWA-J0Pf2#``R(t$+j!RSzW@dUfrCB0PH$q5gT^&fyh5-ZeioK6OZcQOPhP4-}0^rlJe zU1j203>6jyOS{K;vE@Ger*wg+NnCwwVS|wcthfR| z$nYc*4?_Mzp8hz^|KrI0|N0>&9h#8gO|gfjTu<21mJyfCfE5}YxCEE@SjWj zzx~{XC@_)Sy&)9yrqs({IK3IpelWXHMQL26g6@S;^{LW4umDDTyF?ro-dxkbDYx8a`7_JqhSi-g$`zU zNpeP%)?ZTSEl5H(HI#n5vd={H0RY9b@H8Rx9fEspxR7Px`{@PW=*bt{<~PJ{VSdBm z&8co#E!iKS>iP3s{_Q6GUwu1KrAXt*>ODazf7((_9Q;y+#KTk)FUY%))$=k6~_OUg(OpsNO(QQqtwJPjwq+$@T9+ z`82Y3tOC+3KDPjiql7rk<V~YLY^y@aQkNg6RR%DR#uC;5C?T zjOb$g48CdR{jG6P?|JK=B3{#pCOA*O<~b}lhpq*euA^`>MED@nTMHFsSgm;J?aDBn5fAo?ZcS#_u@wQH_O{J0^HfNB#4FNO(E$hHdL8AbsB{d+tGW zl4DYpJ%&}pTpVSs!n~`&@Wda$_y1~V{m(~TE#gx^mv=$xOe*#q1}ysiY-#OcD-@UV zK(yuwq^9xQfAZwM?%sM;sz>wn7|f6}kg+`H1xZ*}=q;4ddtKekMt|0b4BRlY9|HJa z?fW}va}q|RNCnI8uL(KeO>o=I1WFb(e*L{holrzesm~5`QWnoRCQ{}Vhp2{>1{r}U z4X9lbF#<%V8J!_CY4O0pne5h%0<(H{`9;swyNk%2959loso0cEOBn-%7f(_f2l1>B zRtUrzW+ukbO18q-s=cXV2s30T5Rz^v_Gr4rQHHoA@sHg)NrtOE1`2#ub-z6cVhBX*vvwTX|j~g9uxk zP5J86Gq?_0zv_t}2Y5WDxI+6#2q0*Z*Wy>Ng?hXW%~eV?dL0tdYb0?9z06MD>}63y z9qE!+hxt$2-at=d_L#j9Q_+SQ+be`}K3Jt1mm6_C-g~c_2)$M+cc{XxI4jshzYc9a zGT*xAe9WJn!2jJBR+#L|5U>^Uy5!1ZEeaoS8(fG6I@S_yckE~g0S05_^lj^LK{k1R zJY_o)#=XTJZQOHn%XtE4qd-;b&YSj=4R@(f3=Lsrrk8eqFL^F2vEl0PH>}TPDKxrF zr~sBazI~EA7fM;1E?nVeZfp4H5d*VIQD&xA=`DlA`X{GAE$sfROn>qQt>B!qWME-} zFPq7c3uMYS1Sj6-Kix-q$)(B1W!|{d9`K7daa*1NXRJL*$Xu;(gUFDhgqg1TK0ft2 zT2~Aq&+~b#GJNH0{;sgFyH#m;{leRs(_+)U3bw}aT?96*}(O1=Na3YeOer^{}&tcJ2h@z)4VJXKL%b| zi?jQ{&ZUN9-L`pg(#Uq!W`UBkABbMhulx0YO-%Nf_@qUk5rMb_J+}jiv}T}Q;$uK; zyP-<-P!#jVAxu^IVcM)$?$Rak*}S{-`v0!z_;1AsjwDU=NBpQZ!QT$tym9(4hb~^L zvaCcqub~u1C5|`Lp0Q~nf%IAM6u!&DtYmgFK!BMkCh+nLo0UN>NMgsU>Bhvte!k-} zoY`2`nlPmP17ZE?i5d;pNOvNnszp=KMDR*m$LeU51SOQ!8rjI&pSj1!B z9q@OY1;ap$Nepq&$w%iuc>43*-T1>NyEnpqb^}RHrSf3Td^R-yC;hrvcI&&l_kvlw z=D}=odMS2+q65TNSriG^mamtP_hswW#aT`RmK_D@k`J@1Dt(>md{QqlmI7l_j8WlG zQTp}a;9IGWx@9A?LncS}0j9z-Ru}}`M$jvM#y-qVKPSovU^e9J1Ld9}bL0}^7Y^;B zM?g%=VElNtpe!79CGgktodsrKP(%TDOYY@GvVzE7_|P0il0Erv8rTE>f|)>Y!#fHr zVEC>YcM*?)v*t&pU9YX5H)d<6mn~k{0Wdj`9dFS0q3$Hx8puzO@1|Z2a zpabIP?gfb{zMB$mfjg+#C`^bQ^5s76-%SYS{6O7ww?31YF=YSCw0sHpVn;74-(LNJ zJC|1*B!O`+b+Qf_4c!lK@NM8sBItZZOSc!Voni{oa>qPHq!WS9p!?ZeZ1@q=nU`dC=+Z6U6Y1CRb1%jV@ikbID^eR!kq8fJ(l* zl8C?2V2RZSw>eWj0h` z&7n<#j&a<1eO#e*Q-1fxIrNzjva01fI9t%Cu0>d@fq}1b5H59B1~_qPKmvtCM&x50iMjqOJem%fW0Cbm161w(gaGWtRMp z^XehD4;blHpf`VGS8X1D!}+?>u;Iq61+N_H>TqJbc5CL+7rlyCTGlM!1OMU*DY$g@ zT8G)(@x{ZVcnr}sDzoy=8CK^kYiiE7KS!iSb9hxCx!=g0*Re=RO zOZoA;qnj+4hzzB+8K}Xz)hiKIVm_c4$77w{#0_4%Lb6YmSSVoO7YE!RoYwsA5JZ%I zn%dyIK+P@*#;o)YC}!Ei{y9>_@#mn@_^zF*EQt3Y)%1-nP7Og zOYKnv?D`+1`tGx|vi|u~aNw;vr3e;5dZT;8hjZsI?jRUyXK@lzQe&Pq(f7W%4G;3Q zWYqs-OZD%Mgl32+un)x?Ip?oyAo-foPEwd z_x?Pw&;9Rnn33U&_g(8*Pk`iV7GB5AsFClBn2g2EyLmRV(|yDCb%VA)Th)Xtxvr#a zTAG@2R5A_L0ZgFTKr{!D)OTI*y*%Oxe|#8GJk`>Y*H5j-4dQK$4uRjwGfUM>z&7uR7N_t*s>#84gg8Un_Ej$KBLbZa5~^f6X%M!zW<*sso_%#RiS2u4 zVK13)r3ZIz!7JrLKS)W}Uve8gwe>Vt%uS;S(#y=$NQvjQHj{Pb<&CK=4r$B|b zsXR54j#7Zm{|>fzrj@HGH;C3g3%6}i0^Q%|LoiM+nt;R0{~q=FUtw(j9YF3GFGAw6 z553~+M_|lO1>CKpLEY8+SsZZ{pb1Zu0iBie!!KQ(*B_CBCqH7is$~G@<@CLiZr!A} zeXSh0)=vdKyabcquL1gmN*d+*fLA;kiRmYgzmUuKMocsrB+5F?xSuUXk|T)6Z^8Do zQM&PR-@WJuT^YlTWMu^q(+_;Inazmaf!Dx!7N$=xw)}Sl^w=cPl-r(Na7u`bb^$_o z!jIJtk43%GruS@}v(yNqHPW4PwMd%grL&%nXPEB?(8zYZn$~2k9X^e5QO+SK z(wX$@g{a<`AxyW~VGie`;q0Z{tLg0Ni8WUTo+OJrkv(AbT;|0bCX*0|9hzQ z-#P1lde1Xk1WRdS!q8s#BsszQq@+i);wjH9izYG9a1zBUk)4GC0J_t6N5}brExYMh zBg(;%_`x1n^P~&`Jpg)#J}M|1Vf`yt>mc#Y$pyeb6?RfU<54O~9%2AO#kl{wS#V&(~5*J-bjTGn{=UON=}Dj4Z*VyeaIL zBPyBb)js0ti?d3DAcqBATTDlm)X5IHxMm0IY_ooiSXO<3^cpbP-hiJzhS?*cVBt_d z1kPJkETi>~BqZG;hUKPZ`pz=n(`9~Y6=~U}26_g22AJH-vYq$R>3NHu-&1L+cHEuZ z^{33-f8ur*vws#XkGWai@hvcR`K)9qNu%gYHUy{UEvzHaz)jk~b(Lo81Xc(ejTLrx z@^v5$a01Ej-<=*F@iv0#_D0c-%T>@j_<6q)Ctnq{?{_L**t6p8D$dDbJfFlu{p?=72nNJVJFj!F|9LeQ~hV zS+y|yS|weeQiP&oYxHQDU9dQMmJYL@Il*ZTo#$1|UzP`LT_e(W~>7VPs|MFA4;jM&R4W>NbtT;(Qyy@{+ z90+Sds$0)+M`m?+>Uop}!nKRH;Wa8@xL~DJ0kUEm?<)@7s4_s+lZQ;GiJY6p6ICSp zuJ@S*gakQ`IKuSl?q(w7o|T31OqvGJbV0fyp=H)xPU~lhG*a=j!2R*3)K0m>I;(v* znRCGoqs3@DS#KJD7~Y4IPZPzqFa_e;jlJfBi$bB1fq*!PQO>fOsWCv4aVtdSO7Zi#z#z&4|iTZ^kA$$j$rt#)$a!Kg1z!3}3 zx)h`VYkvxPg*e&>u%v5(8b!PL#;p>trxmaT)N&OOD3`!$oKT_1cD~>r^_nDX6@-D0 zgKUE>$IYW?;Hi2#{oXXVX`<9{eqyr5wlsqJH`snb6qvuBbjTfR1#~08WVAMn^n@?<8d$@}Ts)=SL9$>B7MVa_X$v$S;5L&2lijppZ=V?6 zxm`$8R5qsErxM3!)vJAzEzVZjz^eb#EFF0?Z|IGtolO;}K0(r(Y5EH#TIg3$TWmR) zj^z(2a{h1Oq<_2)y|H49W=pj8(p3ajnMusuu7oI6fWvD!l9M$Ca_0n(W?hz*?ig3s zmh(4Qf_QujZA#b@onTRB%rI?5;#_&{!K%&$>v8dO(JNlxjYZ5qOR6kH0133N&VLbwF}|FEj~;??GR| z>Q#S+@-%#mHM?z$+a@HJIWii`~_$fOCo+e zE&~#R62Atpt5@quP;tNOcz9p+6i|l~fbI9ibG0(#coiK9{Axf2Kn}%WpgP)t8qHrT#Np?d@IgP2QqPn_nOjYw)h0WDMZF!pr$s6(IsF+1B1!roFFj%KhZ$0rJVB z3E+Pc=dee-vg_3Vnjaz>OyjR`LC3N-M~&fkxWVzjxg$9K2#Ug?-tVZBF)~JF*N*@9 zHuoQ!q<`YLM>?;ff`a6DBUzd323lLP9pDShI2$$AZeiVJ!wpEFls|HAkb^7l5c_#z1dIb z1ilNSEPTlBcM;aQxF7&9!OD$2BuZ#R8mP$dlQ~yqSy+!sxJ-Mt>_Hew>iaH4yE#yR ze-Mx=ux;WV_+%$TakK?`Bc&8U=g0_2BugZpg_RZShi%%x+wsbrTf9G+%cQeBUoTq< zZDXvU!b-4a&Mg&hq+~d=%%mI62{saVSKBoE_Yh4`!%96QNYA~#eoYpQ?O8T{iS@XsChhTaPd5#Nmji(b7c&;YCeC|G9& zIFbn52})LPF89VQfgi(|84dOjuQLALEcEx>1do8sNS;dRXxUKuMIK88QpUEy03K_b;cU5zjGr_}(KyhQpDnldKqf5o0B-|_5O}Z}s!4$Xmf14E zfj-@F*pI;ki9xpaK!i#eRbk%Kx%PFjj?!JR5r(gX2*@R`jeBI{xr_;clWo*)|4K5A z9-dNgWTarV;*vo_Rqi}R3RbpI!aO^Q3=+nP-0}>t?i^e6WW~+Rx0L-ED28FH; zJn=dms{~Fo-4Sj8x3k}0xG!7@sxzxZfnY?nuc5;914*z_LpBwm#VvbY&x_LEBMPOz zj3FJm_0M%~t`1fBowk#J53(Xi31)rFW&V7ymUWS+_T4z6?@#>2!=aXVupOh4qQa{h z)bl`ZstMGgRgseW6Q9K;wJU$I$))lX+ZQvl0H=xiM%HK_Q7&`E#A<7k%XhF{C*e<^ zvftvd8vn)u4r+>l_0)g>DHTZO;Qb1?{KiNib*Y6GB%Fq|8XW>CVVWCzfi^t-OY zv9&7zaDu;GsJzX^aWy7MWY%T|hMCde9UD8&R1&j(+x{x9c#{}FM({TIZc<1GPQ zSB<$m=+?5Z%+}hw4vP2~3vupnflAoaz$+#bQ>h1Z!;3ZZ6u$WM3#PQsmZ zmS+2MChume6kFq3v)YlO76@o6S)3PI@AKHvs{h>!fTcqbRTmx{DyDb#Q24Bx^)CmKiudsE;3;@*ZQG9_g|>X}OrXuaYX)S7F0-)^Nl ze=N8d`c(-ApXj7`>8v3=t^9|*bYJ3`Ry}kRfp*I`UhW=(Mex9Aj+{xlo!Z(b$k@NE zbGL*DwHA~M;?td3$4tzDYXrVop+Q zI~cMfJ>|P@V}YFQJ0~m@eKZ6#m+JKl5(;ozXY1oBrD)%QGW8!^rOcseMzMN{CSBp5 z@-p0BIql7@#ux%AOB0qj*$h4?j&k%-g&!6W^5Ill!8GpLqjbWti!4c$RWO)~10%|} z59SiU&@~4+1!y;-L51pKv2_gKO!ya(89XIW#%5xW2}pr#QT4c$3J}-Wa95$B_X!~C zAwTsx6VLo$k^FM$PWlx|_VnkH4s{y0Ng%}m7%(H7obNV-lnhGbhk7C>O6~ce%>Zio z$YrBuXX3Vdtun%RID-w9^t#wFj5T1zi|1@(24{u0)gxTXiD4$AaBk7wFdHy z7i*oV7!YQr2~Rfp=SOS4R^w*ma#v1ZOc=-z)?E00 z3X&50EHXX6k)4R#xWpT<>eow613{1wN@lwBNl;XBxmB-dbya?-Akguabl=m|t=<1S@*-wWbFW5f zxz!8I2kXEjoSW<&7`}eE4Z_-K$z2XEbvT@fndyE!@5KH$;qJdyQ50tQXEnl!H}GmY zv#0u`!hfT7{0(Ecaz@*)J8Egzt6Tl+S^($a9T=^j*52_lzn~n{<8cSebn^8dHcSEg z0fFQ35(EYo;bgpoKw25}GIyfC^S3nbKy#Q*QgEC2LJXzk0~&)!-0+|!DVTnj)ZZ#- zUn|9X*;1(Yfw4|8*)(N-%Y<)xhgB2Gr0BnYyJ#_asyANPmo<&-%k$O33>#WYTK`C8 zT0-5gdmf4W$8?n&v`0q9?`ml{kyI!I+>$@+&d3Q~f|f)+3=cTbGhhpk0R!#@j`C-J z`<1ONN%sQCaJD6X8A*lWc>i1Qpgo0;J08+et?Z&fpu2`W4h<~|59!~9-hYC({*%`` zR45WyV>OU)L-ChUh%}9%X`(qA#hb5kUf|xf^hJg($_)~o32*-HYjXofy7Oz14gn)o zVvk3EzovD#eC{XA(jmRpc@Uw$zIgwQwXHWztcXYz4WB_?@mI!lnTf1KVLAD~UV*DE zv{uhefc@#jCxPN_NB+2U{;dtg-+n4qPKSe!|3x3qE~*}=Wy+tjp}hEa$-2_+ z6{%+9(f<852luAf2#(K!e|A)kQNPANUh{vVF}VB4!Y00tsxhOS=dqx9)c^GB{g>}& zh-)8>pUvU(?k&Kn_5`YiAly`8_pPXs_8@JJ4 zpP@IX9h%(xynU z{(rdr{;|FT$Nxa{`n!+8U!TE$>Muy-M!4e);ryF?|DS-9|L_rTGs?%a$NXWwcH@b3 zC)2r0>`en5HGnZM_uyhSxM)an6t;6$uc~yEv}*vo1+VAt(P5TBXv3%1DGC@X&^44# zWMBi!TOw%w`p(J>dx4djpKSn*+B09!vB&{3!)iy!vi&)ypvBg^yYa2~!&xe<)H`e;_NnhT?sp1b<<)68%i}vbI(l|z z1y}orMB;%movx+LEh7K*M)|8o7dh>6mn>2cdB_CwOlZlhbueQ5hpp&8o{%#-$m8~o z*NmC?53VGQ|5A)2CQteD^P7Om=2a(P8u~3qne0>~7oerR8T#;5z(9*$^=4CINc~}F zK!cD}baEEggh|4kpbMY@{ra*fioOx+89cECu8r6!X{)%S_J^xo@W~f}Tk8P86h49;Dy3}kWbPaW=+(P$ zbPhMH-voUHX@YJuO_^TLJ0mg?Y41yZUY3lKUuEPTJ13<^yl_&%ML^#M4 zjXQM4Mq9-4$DD_@)86z(9C>5_@BEF}wNtc={xVel)}kTki(Iuh!tFx@pjwJ$jXHjr z-SLhb9>;6zizK?`{ye^eiI7pFy5(@jek*-sYlHQqA+TlM_Uuv)Z;yaS# zLfO|`NA&Vn#Jl=7f0fyBm52JK{q=f@3WK;YCTvF;0L%oeIui9Cb|_I!_hL?E8(C1W zwjEefU*S2d^f0Jy79XR!Twa1U;8ovG$tM=3rj{Y}dQz}t!S&PV%?G-C<))Uazdwl+ zgUN{F_i>@V)!xLR>iegkR;UyQ5=(u$`N9aJu7)1sm#*>qTgCF{jxfpAK~VVGCyLZn zAlC5jMzh=wv;j=THT1X=`$R>D>Szq5yc93>je(C{F|22sztJl_Gogq`UoM457oZ(!xcXR#Z<6D?>G z2R=Y~wsThOy|Ik*8Jc^(L^+;q@+@@0NE)qrUEsZZ9$#SJW2-jA08i-#WA8?pq@P9GLZ}EU-=%Pxyzkdbl0EPHL%WT zMCAvBd{D%QeH8X@+Ms{jW1mSPSdQc-SX?wyfD|Dmfp+%O?J0)K(>;8?MBo)NrmQy@ zSNi#gY(KW~?260&_ezLqt#lh^f%p%SuhNCOj*Bd*ynWDgxPM_%yZr2B>km-e8wqco zAKv`*%Oj{pgMFxl$j(%m6cU8fGj~Tljq3~}(i=U{>xbSdtt-Y*b*?s}kPq{DpLw-& zgV=N?!?xX5dJgmM&cPzk1px=J#06U{UMW!3%zD*4x1N}`84SUfH5=Sau>UK|pIjw!IyQ#Ow>MTs7iFqRc)X za5uBxEDj3^iJ>eNW&@H5;-iNU;){QQM4mUr1AdDKt&` zF6LS~#J3ANuYxda^!GyYAJ6~49M|H0j|{O%R{U#MM&Eexg^Ww`-#9c9F2}PS!{phL z!KRxNRus-N$37YC$&`|`EXp263tR;CU&ChyRB8QMw@NYb(hwnKCKSFv0}>qU$R(ZX zJ&=+m9TI$~g&63q=_8n{7SCrp$NX4y4ITUbi-d37Ifq|nk3S~9l#o_L5cw(<_IHC0+`>Rzh)U9k|0=G!hEIvt~UdoH_`miDTo$^}>ZZuz>>MfDafYe@e$gPH%Qc0=_O z+eR-`Ls5;D{_8@QLK_c+-qns3!+%EZT*@u4^bd&w4h$7ORr2|vd4Dv_FghuW*zr7# z|M0~sgg@$RU(f*7mgpeAYXXdhH+=y!b((=W+tj4iL0>RSm9Q+U zq)ZufZx(KZIc<*CMG&G}z9javpI2Ddm9Li696+Q>xcwpT5#ay>ctMVY=PoRM{btXo z*CZ_NmL9KOg~wVi^@^7@L?-E_`68)umLG04Ki@ZD*sbOHdZroApy4x%k{O<@*5)Rj zjIo%NR_215;mXV;Yb8bfQE&GC!f}<)WzUWu#nxYo!HJ-apnP~RtH)Q(pTy(G;4eq?B2N|ZZOv2U!PMSq*n;{xxcV#WF_G}=;!!r*{4sOSXioURbipVH8q#T zl__kN%KdaZ77f4oll!i_mc{&JWr7r&?)$L0ncVc(g$4?&X@Yc;L5?FT@a>hlkGK3bQ0nu% z7&WfSO0zF?b@=ysdtG&gE>iZ_lq%b3kVUd#g3tO8IQ&GnB(lV0dS4i%6I;xF+o;?s zX;4nL;+1c@h&V0-JEl%mCU*Zsaq6n(>cDsXbQPPn_nIE-RuV&xYebY%rU095jtY6a zF3S4O6{YC)`X~WSbTeepkKqZ#5_-o&Me7(r1>Bj~mRkeHmccV^$e{hlk4h0lTGqpc_}#FU{@Q)fY_f zw4QNQ#Bg1fF3>K_c&!eDg~Bvr6%tKMJ44@U7eN;ZZ>6>bAV-CHk>~R2-xL7tceT8p z6)2q4a~01uCQ5a!;h5a9KdoX;&q_;izIJIyI`pJ)2LZ5LO{4fs_a;eL4WNajo8RBN z?px2~)Wx&v8UP=h5GM#LM^E_-(GWE(Zvn`UXeqLUG#_9U|Y{B~8)| z^z?~8vFNX;(Z(Y+`h2#mNc|dKPj1m0s|2e0!o4z4l~iVuiH0y$4B4aQ#x?%SPLa*5 z5^p{cyV8uC{?``O5vdL+r%*74FW6djEVo-0nEGxLF^By~CI7ys=#42GsMqsK>Q&G< zWjy(2z9fUWvRuE0HBFx#7U^V-?s1bru7~LGIx+@?&9n9pfAfjX&uTSA;RXzkMaPDl zX(mK`kK6JHb_oPc)ObdH4Ph109N3)Kp}M0XW6o?ZlX^h`v`}%=tw|$0@rLGb4LU+) z&c_QY(KQt1O=NreHBx@Es5;cU+0b`pXV||sNTSz&uFz4TcKv|24-<1K;})Vc z+w565kS3y}QDDKixBrtm3O$K0Q*?UnTZaD_S3cTJ$tCSmg^G0Y9i#&xvuS#@6SsmQ zJwGP(Z&Z&tP2&L{YU6-4NmSNpchWqxj#aX$*`4>*7jyjr9Cd9`2Qar(*iW}%%jmUWwL z*Ewo5yfjX|NdHAZ_aECvQ2Yca-@`A>&i*zXYUq6s z6M+(X^%aTGdG_!#-lLq8q}C__$FTs*;5QVE^`$?k$!)@Itev(FisD&x+o;j;@5Ovn zsW2}#J!_EkKtjrYsjO7A`%UU24z&Ox5*3cX?0i;5iKxr|Avyjs1y$R#*L!bhBh1wO z@b`W;nvt~S=v%zF>s&wqDXKKKQi?6x%#=EYH26|dqK5jV=*F1Tsvk1?B|)3iD-Rri zj_<`hEwOv85jR5-`@`#b+jbm^fKv=nu?;99F(1-bave7&hVUCS!XKK=lEUU3L+RU8 z7MbEMIb5rL_@^hlq=hy;^Sr>2$3a$ojmc0z`#bVx6Mv8P$nCDf{KNuxTQS0WvpaLX zLQ6y6j&yV1v*A*aKnnTjV2Mc*Ih)C%mBXNpQ-m7?n6|8hsfZpWzqPJrYChzqTn~3( zp0m?)r+6D0&b%BG=ur1=JmuYhdetP1dejQp!|mh=MU4FR3K=B)2O3c(n+OSm0@1By z*vgRMPZQ4OUiNiS+8O~#iA%fcMKYBH#;UDHFQMTkQFxlE3qsI$^c=BdOCwY%jwqjF=04k6ERx5r@tv*yx} zI9rn?9W=Z6^}BhAf${4NFV3Crz4&~mP-o(U61p{=6;e^!)j7L$MDpv!`e2nGU4!G# zro7~@NE#hNEb}Eg`$np)u;q%Y#~-J87(l%kre=z1LzX~3Kr@nms%<}IJw>m!}s1g;w4pa`*_g~63C zm$de7R3BtBdf2?1(3yLjYk;vpLvdo$U58~iQ89!S8%)*d_!|ttS7(h%`NL3YD^7Qw zGC`fqR|XAV>=zW5V5)haY2@BMq01d8k&0Zqa+t7Fa99KK3^ba1sy_V9o4oIr4$~~5 zqnxXZJ?oj_uTL9qF215bZ(cry@i!76Ey-9w<^+Ln$``3<>~}GK6&OeYqP%CQ}!V+}nxotu~Y#x%Ti3p`D7 z1kq`8Z7l@n4xWx5j!bhGdS;9^^}CjuJWYb^zxg%9m22bC58rE4X+q8aD$9{(xW;#B zx%n(gXfGikBP@&D1Z}4~oYw-Xr=w_Y@XKiAAf0HQH)C@cWOI_UamD|Fu{>Kj^?O;v zQD$;P?kwTOB}QU1-#h5~0VYf~u8?Q*DbeuP*2V0&JBH1c%SNv!w$M@xq5D9BX1Bg; z6?X0>apfDsMZqxL7d_`GFUjYwO+@ST)$G!LptPZ4;|BSZ@A)JOdC7_iAQ&_{IBYqM z6uDZtu1e4Eu|hA^>wXj7HA3&7=11Ah%&a*TsFnJbV7I-gShJ2cL-cBCZK~5KuK%sq zLQbwY`c#3PyiC z;_Q-ivvN34HN+%Vkj>HWvB-mOcizfj zT$0vTa5MTZzsCPdgHp4IO+~}BN2GTz3=Z<$;6`) zM5=to)R`Bj$ran%{znRyf;&nCWnWg zmMyj|YrBXZ&`L(W9h<~@HBtP!E+vW{_wwHEz*>fn+|-cpZA!|Aoy+ojH6QH;-Pfkp z$+A1t-o-i^My(Uq)*o*kHVUKsg!&@7Bf}oy?{6dKy$*h(#P{KskH{@#$o_$Ya{D+7 z?OH5Xm>nJ}$^FB$kgajX<0~+!zYphujxJJ*RAfudbf>|%2s(rbp^`TeOf>IW4~Y@< ziXmx&AW6{1{pK7teyIB(leXQ_Q2d87kkNeJ7izMxE^4sJKefp8b)yN`x*^* z9SH!LmT>oKIWC1-^3;3I1HeloY1}38LT7RhIcisYQEs0BxW%9Fjk_j_X%QH{Y#dd!3 zA?2Ft23>sx&t%%8zb#oDCePs;dLJ)R?{Ch{msNDz@ zqEg{K)}EG)|K&Nbc>2KhenG`X04VJi7q?*@z%a|GWi7+6((GzpVlHzG zt-Qr|+-p7+_Z|$Q5l(}X%%3iO$4H9!e*vg}eWg{M%Evn{Ct?TvGjA&RRJuZEkoW6!HZCv$mK$H{`*COfUyAQ})8tg__SOJ-MvSN9*ie=*>Mw26rC z*>ZJ7t&e1L|4s1z&Fx&d2@Fm1CcvFEN$hkYm976|N7oTMNlf$WJI9T&x>N(0vX6WY zuS1uLrVNft%;Yi+WNWd%^VucL(9=LUGm;Qy+#u-eDbLP9dY=`kW$u+aLb-N`+LBS&SQ?q$u258u1+S}%qbA(BR(KZr=}(|30Kxx;HY zqPI4bPWUr=9&0xT4M%QkuIV2Sr#I4a@y9bej!dQ@ zx=T8y(4jXal5^H#Z%$59{dbyq&T-&v)<0@{j3t=TVj;p}_Y&b_0q<%dR6-|Gwk(_W z27-cMNIOAM@(y2F(iPP5WPIDFPbItlTf$$bYRgKd!FdOxy4R(DRns^G8s${8YF>DGO5m80Y=XO#*NkW|0po#K zeT(c3(cB%Zj@3khjb?Z*MT(+27<%@q#iR00SX{Q`*WivX5}k~%Kb>C2w-%Z?N81?) zy^^gLVz|!mlsEJ~*_r(^q(Hj9ceSTxYPDF(ujzvkh+Pp!jqhEjKkjP`o;eI0cmd3C#O^(H$uZ|E3 z@gx?uV}j~atQzjPrRH*{9r^L@TO=w)3)Iz{?V@!e?9*$(_;o=cM4PE{ReAc;H%LOK z%)0eG7D*8)&nqlj+=WS|F+_2L=Oajn4Z8Nb7Q{EdVsyW^JeoQ_$Z#VY@^*LHiAFxW z{khfE%a^oH0spM->bFP?{v?VCec04acyrkzR znLk=g+&#eMX*f17yjQ<2-f0zi4UL~{-jVspA1mrzH2G-I2@HX_TKUPaCU6i{)FVH( zC|=Gblf5Rr%pPQMo-7|84l^5oWoqHFH0LdJGR$GcSNnxNThqf>eWai$`iQOBWHBZ5 zUfaY^O13Erl$==?HT1gc$V=RWAo5*CTdaud^!r;O=S>tj)`}gEIFxH}Hfp*i zaHifiU4ASwBxYijy*wjlUpQ~Oc-#OP#v55JBNr(twpnJktLZ3A0w~a!*b=TIqZhSn5*F>X@(j;yDm5OJO6?pl(ReSf$F0R8v zw&tuB-NgoU=%w)ew625eMJ2~8gLKd{I5LHYlOCc!s>%ZKC~BI&HbDyu zyh>Y7Q*$0J&KY>mhsC|SdjD^}ga6mm8zAE!#TbuUTKi)Ks_kf0|CDk=Al1sCCz8!Ka0t!@P9>)II=7mX&KxNd#dEYTBIJ}U zH$u`4ei)wD(%7om1uX#DQqyaAIhBAzRWs*RlR+BB2ppYZRQT&Bt?4#BR{oU^kNZG( zEe_zv@V74)-&xCg_@kZj0RnIiAx*X|`;VOWs6C3c1=tY~-&{Ckgly8xI6%CZ(?oSy=06F$FY7@T zX3S}|ek9xXSh2aFj2#d((!64%3*Um|@F z;!gSb;+MZO($YuQL-Ss2HCmlE{OQ)}sB-}gtpG>VW<0zS4biM{Ol8f|#>sgT!_*6I ztMQ6mIUH+#=@5EE*0_Y1*jVbQNoU>)@JYFRfwrxo=d0wFz~i#rbQVM-wSF0sm2b5_ zd7oAbMBhI+Z4}eK=Za-%saDE>$;$E+)H1MgYm!YC@L~pgf z$~&fF8gVD3_?%f}h7-)VQ*dVF1+@AilvqPpAD`@YbG})ynqI_h2cC+#&DU#8zTZ$1 zf|ffOi-j|<)lM&s_m+JVuhxC+(tH&#kTjsn_^_2BnTYQtB1sFL*T*;Ml1dGFv0|_A zeMaNe;)8EM&o0wIlG5CwzNAkrFZCqA)6nZAt5-u}s}%-M9?jdd;8=G=Z{Hpg^_;I( zt+RJz+Lku1-D9e~ZM<#a^^^*6x@Qy`aCn1hkR>vIz2h*?XS# zA=#exTVhC4GchB9(VhZ`YdOZD$@WhdtY+YVcKdmrLcePYH@DDM+eTrcEk86k!c%xw158T>>Cl+TwCfGxq>GjKDVDa*9GI`CDpf{xHjhIo~ zTpC4kSm$(dBXRw){8inwz}~1!tlG`AcZ6q`tnJz~32#uKMR&P@w3#dQo~HTT z)yRmT)avGsj0UJa)5tiQ(G5Oy2lJ?jZ{UZy!yh>@(H%&}>!kUHD<<=-pE{b#T>l!9 zLuI<3M?E!l5C0+l^hP6pOM^zBWP;r@pEpLw-v~=qd2FXCx+BRZT+4PJTfVOYe_CIZ zs0SSzgAu}#&7!?xxDwRxb94j>iSJux#&6$P$pKjcjbElgqcdj!jKHTGg0&Uqce-50p8wWcNay$p*q~Jw`f<&(kVh_e7SBTR#F3WMimaxC zlF*ImkraP@P=fEcK8W;TV3-Ssq^ekdCY7yA=#eyA~3u`$-v*SF!#nhev)m$#tDo-J|Yv z5SwKO<2;}-DE3++3F3On8a&6UM%&_>GGNudp6RtfiNv*`75PE&QOT>~k3m`!+BOH- ze0DW*q953I-NH zKE5G8MYCw-f0%`%ml5vpjZ1X$?8ejGpXJH-naYCs^WalO5z+Ll5WHE^*13>K3*DXfzHeA*bfcjwQk8J6@lyfr3GI{x zn)vVdJ-Uk=>cvcuPmeC4n5#ChXQv#v*vx4=_a1gQazs{Q#$$Mq_Xh3>1`a=$$V-jLvL;ZZ1RkyE~r!D~5E8uc#qNH4s zS-rj}f_ll0nRemA3m6o^$up!6uGNrVMUZQQw(Lt8d+~xG^2~}8Lkx`` zm-qQ9%EswH+{n&B-_&J5CejLHT6NTSliN3blFE8YHs1Po7ZVWohNQmsod8E-1Z){)P! zHV^@MS;`M`l@C}%x*X|XPi0L7DG$n;xY@Ywg!GRn&&P7$SFVR3=!fvNQ2PY84K@#D z@QaBe?cz5mwvpJ+3s@Pym0OoZ+oSp;D&1aGP(X(<<-#f>{2Op0(}M_$9}D^ zIGylHfMhB=xFGtve~|HIvevbxFC@+`A9550-~=kQ)@Y zx$QE47>!M>+XdC)z{%ycvXF$1ntMN)T;9o#+GpMT=<%u?1ZnbPYs`0 zr#ye9rF6ooQIwOTY8PNW2!BDw&3{>?`QU<(s0$|Yo7Xy=P`V*O*zG8o2)gj7RmuT1 zXqJ|%VH`7ltk`2Aj)NVQ~!?GhQ z;lF9uImR%N0vCFSXh*3|#b0wzgqL|1I>+j(M6MT->EYn&efiBaaXx-JeYNRD3ZR$fG{o^yJv$CZ zt9RGa<2Jqa!w0h%L$@|<40BHXD&HzQtZmbhgbTX5o&LynU3iE0nf>71h`Hfk?#o8h zkfRqxB$Q7E(gkfNr@1lDQ}3lSCFXrm$b-%*Cip1u;R?*+0%DTziv+64o2PW<1rs`~B<5km({~AaZMW;1I!KYOs zT~@_t|J>bhJWM5C+vntZ#Ng^a`6h}ptB6zxXMJ;7!*-}7UA{+G&%&%HnnSU@qt<=B z>t6ednuFS}pu+EQZt$WcTNit`TJpAi_Ef|)`+j2JbuclahA}N#KyvHXutdBWV|Lig z?14}!zcl%j!`g(2TO7+$6{=M+2lSUUMhn4p z1^SYSJ>*A{65YY|i>P(>-E$;ACr(^C@*7CA+etU2rP=}}O;}F?1zaZ3Kcw%U`aPVL zdFVmZaSD7-brgPdDIJGB@LOEfSZn_pX4Bit%AyC~+H zc$+tN2ES!HyYdi`C#Fi`P6%T5cBZp6iaj+xz41htbkLVQEy_~Iv40R;7&aK!A zMiyH+-oANyZaHnbx?PEt`qPl(Mj022H z9Rm_hJw_Szve|$_Tlj){q}ad%qb#&Lh3l}7!n%{8y@#_y;mU{66KIRhx-s9yx!Lq7 z=k{PCkhrPOrBb@a$IX-VP2D84{pdJ1PCqLt7Qxh)+ZW-vPB>In>3P%yfAPyrr`y?M zonv)kPWm{O03WZscrw%14-hog?MU*Qoedt&|x}83uMOpi!#z;(_;%|)QVql-cVAMnUhYZU{2#^y0xA%7uAmKfLkzZ)_iNW?$ zk`M37AnrGVch_q-ZEV==+Ttux7tSGp$BeK*nTx2?Zv4~>Q5c$N5>OS_!-E)R*Z*ZJ(% z!X0l8VVW`7H18)4mT&YQC}lxN*>a0l>H`iRL>hUtmYNoDzM5Jb@wL0dhc)Plc~BE^ zT9NT*aVcUi1O@mgGaO>;OFRf~`GyLzJC`C6YbXhr2^K#UjecXID zTiG9hYZ7s`z@+v}{EiRk_h4Netvk9d!^B>=WK>y}xO539tG3JhkJRW?Tr3PLj_>Yb zdujmCG%xsHnjhfNB$!yz6lk`T*$XJ=?ldd#&~P(8rwJ=FdBMoKkxeaH-d2z4#3JWxFc5>s0b{jlz8AWPfA&1fK?G0yaHMIgPR^3^-w(~y=T zs7^IY%THT>E#FZ3Eb;O*ZHlXE5(!es6m$ykq8I)jm6*2xk7v^5dkpAVr`B7}cCn%E z*JUPl^DTZmOm*=N0xM!;*>U@?&35x z1&&wPPyHtR>UAw6AO2|T0>$FeMmXb+`Bz#qbw1!9kaO15N@A05a#WJ}^O!nv9zsPG zn!lH!aK!`6=+aWJhpF52-4|9Kf6;S1Ai<8T!FbGh=IAq&%#~*~4_zig#&al!>`A-R4zqboVF2#IBaMI3}${7@0OImn~-$WH6Y|xmZa8d~2)Gt3%x%U0FQLsCd zZ^Hjco13PGtG&bj@^s1?xU3u<_GmPR#;D~jK?|^0sWxR`gd12NNmr6YzP`dL4YS zk~C7rIIW@|_~Br^d$gfe?lvxZaX4FJf*fbP1&bm~A1l&_NhWDEKrsYJ%bA2+f%BW)mZPB}9nN0D z>&VVR4OUn{`>ZqH_hs#pYqC@McdN$0R^nZV!S6(?3VI8v5PAr0fJL)OHdIgtifP23 z%;YnVM6clX8D?I*m-v8b%lJVo89o+i@Fn!!$p0bjt%I^`+kR0+P!K7R8|jk10qF)o zy1PS4TBN%{8brFKySuwfy1S*jVV}Ow`+V!0wV(H!z1N!I4~D^e*=8Jk)nK(L3>u^>|x$k+chIczN(hX-(CQwSrRi8sU_R%J~B&f6SjThaa%m&mTd4r)|m+y8EFCFvFsLiIUG&@U*4f&`Xw zkWzdSSI#FDJ<)+*7gs7iLKH}m3mmad9qS0du#LDX?b9#@tsFnySJ(R+jD=e3el3QJN_Z zx}kC?C_`hH#L{5_aCYFxPTTY{51BnT%QHM);H7@}bu8wRMoguUM=KZv)qrXil?7I@ zpD-r~sc0w+Jw9k|Af1#t4boL!{i^$#o@H(`4iH)zpUapIewU3sP4NM*Li8EyyFEbn zjF-yOyrkY|by9)S8~24VnQszfa}OP7!T-L~U+GEx_<03RT|mmD2J`E}i;MjcNrik_ z;oCxPu56=`I+6`^y-zEdB$#xb)`%^keMI=MZ8%RCWqfD?=B#^tD?7Lx2dp>8vO{;x zbnZ1Q&s-+)Y?cw|h5_2PI^<^`ARyY1`_a$BCXDrqM*2P9F)oi$^5aB~qV~&-y{O?R zPJ&F4C?&E+=gV(JTD4|$3Bqe$x30CL2=<4EcMUlTl@Qzes$x{Xu2SP4aDje4Q4AZw zNJvM?dap_;MRP|nHX73Syt}N*QJl{~P>hgp`G$b75{fOE_tvkcr?rz3W}ozj zk6oH{KAu`Un{?!IrQzV{o;UYfTG^enc(>H+ATb#V2wJ@EdFgtc3`rk}j-5Tz>7Q4J z)s68TP2UcBEcH&10sjnxOo_Pi*~0LpwLFEQ+_6yH#o~KUN4tl5Nkij_tnvzpA`%`4 z2a*V(5H&U7-LE1V#TO3S&lir4G*ry5I@^?-(*%l|zN2=zy*#kSIm|vv(s@Pr1m+*V zQbm`!5p;-g;XR)G!T)TzZ-=y}?9YWv=w2j+%q*={MD*?=0pVI3sYE zm9E(bv%~?mjZ}AlExUn{)}A(Nab~7s2=Qe$9npG;60);rm@kXexmqKa*(X2s#g)>4 zs@#8+qnLg#*YfRd#g_Zkk!NTX`Fn%_H=CWw1QiWE8)%^Ed_`2KuDG&^WqN4En&qX7 ztRmU#w5H%Tt5@zS3h4N_?gTCeB@{Kbb44rzHDPeRzH%Fp8xZ7jIFzR6Go(_9!A=*r zM&%T7-7{&n0 z;o#QFli%CO4RKvpXgdNs>-@s_9Jbvxy;3&yT404Z$4$;P5^6%i6#8hs1ci<@Mo5;v z^%Pu~423J{OC!hzu>z26^)00RuOjn7TOzU(udH)@n1uh~96IZ0#Lub)}p+;VJk zfwR3sZny0X2b}Fm;GtAmg8HPabM@ibH+7YT@i<!Mkm^X)f@O%A6s?1J6p9(Q}Q$>At3xFFdDy5_3fz(QQx?fE1Zmq5AQ z$)w!i?Ua#5G=5FUaY+yktLN497mc4Mr}sfAl#`eAQK2O8KIAYV@f8&_KRt?Dhcumc z!1T|pvn^0tg-cn(k!Kf~^T;QA>oQAn*M?3tu_yG9Uf307_v)v^G(WLAu(epy?xy87 z-*nf~D(yJUD=)H}8p4yJnfe{vdgQQrYn7=Aw)2GPB=EQUEgKDG&q;TXuCN;zrKn)w zXhLtfiu&>9V|6Fw^$d1IwY?Z6_FG0bDm*lpUYw~SxjP>O zUwG=~lD1d@3|j~NaOHcHZ;2{KKUgF=YJ1E_yx474?o?I3EG;HQ^5}i0S=U)vInR&j zx!l%qx+3%_)9)vOGR7AQC2>3dDAH)!E|Jvp%I?xB7)wr}$p?N+@?@1p69F#~gV-BN zjgo`vBDQ#z8aUh1dUZ;rLvb_)E+zQIO zHPvyAz$7V8U~qhu#BD7`>N^uoBf@09C4lo#1V^%3Jau%< zLB=jzFUg50ZcQa=ia;ETJM0iGhqcz`pe`bX+(y%?dxE6z;=r*q!N;+OWe1Jg&oKG+ zx04|XP5c;v1iaw`v+0+gjN{JIYBq_51H>GCRj9-wDWvj`F{Ye-11AjdAsP_0NeSSu z{~^4Sc$#U4LFQr)$`rg^5n6BmrGM9BZuPdaBpf~CtrgH~R#>!D+=gbsT|$2kB{RPOZo%R&`{c7F4K15zD=e9@I*afRq`v1P7CP&G=e$U5d1g7> zOe?@i*xPfZscYxfQ*PAJrGA3yj|M|z(6;CZgnbSd@ymnxH(RS;92YQv zN_v=`&3nYD+(0s#s7mbm-ag&9W4rgHse3iy%hE;qAz5f-;xq@AHi!wUx5CH^Jk0z# zEwF;BS??D0!!i3Og8`Mv%4rbhuu*y|6-|uJBDvI+>`#>R)FW@4@0y^%t$;tq)v=_#C=0csw=Q{#?%}5*(z;I_*1y zY1S5+Y~}6y7piOp@h?gUiyZrr%ap2|I96`4Iu46m@Ynm2l$*%&$yAMM4a!yD<*z2; z#kHnBbEaqxGamCbVg4zt-D#u#7(E7lWW&yz%m&;P2^Z z>jLRjs+fpOE`Mrp3dnBO?Rz1E{nYQMg23lp`DFJ89Po#7p^^I}u76i*cX_aoMX)7N z9%_eu(ZQC;Liid5Z(bZoI>!)&G1kto7)fn9q=KQJPK8wR1y*MVz$dX8oc!!2Pl{bYxBYD9b zVG7d@`lftMO)q@lXM}}LVXV`SqDWHUgZw>dq8Dp)qBL9vuF>C#47{LHZ~6ftbIQY0 zwWPRbCleU!e^BAqeu>Dkhr7^?f;THp9sLLqYqV;_l3onbu$VYcEYEV6_Wl+c#qU}* z*)}Nxp6At_0e#>|>5-Mp}x3HTijmn7|BOk5ie%?py5<2ymhj9O}E6w1dpptcW8kUl)bx+1HK)gL_Ks}={ zywojm?0SORGh2vBEJynF^GHn@ZSN#R_1Ex_5vf0`>GI9;2f~Kez%(fmmB;XV+oO%{ zLpOox<>_jz&25!xaOPq6=4S2qgLzTHI5J_blvpVB-eu*wD z)BH4j{VuGqezMTe=>Ug%**-!vqD#d;CwM3Mq#pDsvncilhi1kuekFPRkb{}y@@l!L zcoxa>Sd){>bo12(sd@S9#p6|LXb>-cr`{ym|@j(UcJ(k!Tb&CWpoe880#D6Ym4qh({eg=z-9P%A0 zyuKp;#=(+|6N0|b=;{rjiIx2L{31i0wKyNz%5y^eKu_|bPcW^)VQ{welkQMKFO%MH zfdt>ksUoc`9dGB6O4FFaAT&gdnt2KOP2PK<-n7~| z3V@}lbL!!dbU&k`pZGQ#eWVy0CnHIxzJ*VZ)6DG#JTSSwVRj>ucZc8$nDutdmVOP# zL=VFr2?Jv5FGcCw=$pdo@D`@`AXM%*2(CPGfZw)CCdV7j^AWcEsA8_pfO{~3Gvr{gku-l3uA%Rk+x5CEvqMS5 zdo4D=iMB?Lhp^>>XSpDe{7`Uc7U2u_66cj$-xjU5wotd(blR$yBcpLZiNJu4Y4=j6 z&vAQZPFS&dQ&&i6zr{?2QkCV}4YQtH$(^`+Jo#K1Wbl~L{s{b+q=oMxn5usi*99E5e zrKlud<{hX+pf@`G0o7k-h7@Dj)jfVMWe0VV!Pb==m3a6b=9@`QZ@(s&%JV*6B_DsV z-jXuqTg}NXSjP@Tew<^gyJ~SI%HY4Yw^lk+<3O#e?nWNSV zUUQBn-$dx*0D-|X2FdNmT?nmlFVw4thyCf>%3>`aj2gPXEH#O$;Nt*+`JgSg+g(px z-zUF?`zEi^63)xZCEpE^WWBuW@KVj>xubGVR@AR&cedH)dg?`21a)k@>$}m#+D%=B zTZXt=_rnbF9PAEh0te(lmyV37&B_m7PN7>{l*6i5tgz>_!#HoTlCq0BTmD?>D@*)j z!K@+jn!;U{M>Ug$w+US#cvOqr1E(f>`1bC9_IUDkdTH~d8`$H^3|!f`+`1l}8q+<& ztWm4>Q}y=V&1k62OKx+8g^7MA7R+~?uLvAJrg+{rgE@+>7uW7ZmjETpU&cL zb|)ORCZir9x4uK>{gMV_U#BF4?TVL==GdL^9dT{xzxSZd{v6r zAZVl_-#^g$yFA>*wRd`XPGlAku&bHn^jSB(rPuv^QQ%8Tr7aVdj4k|WpJ9vDLW{HF zV7=E(AmFv{>vs|Y#ac}n!n#i@xce16&!@DUWQH;6idC2%I3rMlI^QoLitDe82)>3) z>jpG+p)QS(0L*iu4>soVwZZl53W_=eQFQA!^I8FlOH*eko;t1FrnnYDY+_WYlpghG zdfnIpy0WjqmH>q&@6) z4!2!MhR?s1fm%K^p~8Hv`n2)ex__EyDFP6;`Jk(>q#X4$hwjX-V^TC-w3nr2W>-tC zRNZ)MGCdgCvMU__wX$~ujURCk2_qm6HqIe9S6IU zEzLPZ`2Cz;Q{+x&a{^s|CBeb`lHv@7Y`Toc<2|AFi#(UFDKtf)^f}j88iw6V?lLL% zo|T!K+!jks;ajR-7a9NV=8kz z;fnn$Hx@3)srZ@(WeJN{!q8l_;3hsjTJ8O*hNG~5#NX7k;cLbnE)EwC-MO zHC#cj`1>7KIhIdpYSt9j^h=ZKy*QN*tdos|&)F+Np=Yl`rx`15whHh-Etgn{KF?%J z@baKsyt4mtz2%^vs+dz4vj2Qa4H`DFz>{~9Tw?+2`%HW8;bEvSnlJ zCMdJq!3uWo&+ zpzb-b7H~*i!lF>ed1-IoXLWlJt*m!+HnMPCQvQ(R7)B|5G{h(k!qF1-4yx9^IQ}`; zLg+bei`WMj=ez4(rJ(oSMv5}Xx7tKd|i3m?%N2Ol-c5KMf+p4W3*kxn^QWj^MF@*g$0}Ih4HddleBj=*{qcE5>~%M=v{F2!TM+6@ju39N-j@;DrMG~MLk1Lv1MS*4Eu^lCgl_@g=;-(fYn-3LC*l`;$b7S8dUk_Sb9oh-=xMDmK20O|{wY z6U4(J`GyH1Z-P#OZu4X#K{^>b_V}Z^)Afn(5F*NdD1QI-K>(Il3A;-6=@hUdi%RPa zYnJ!~))~kn!Et(g;Fe6{@CDZS(je!i()09e2ffbmlx(5dU3;^#*8~0^UiDvq;aBe# z>7dQm?#usvjxCxncf4UbA8+f_k@~zp_`g27Y!(J~bkXMXb#62QbNn08uos65q9}KV zW968`I~RkyHmg_kf2w~2GM~Rb#0>tZF5FVjPwoP@Hs@37Y9y^WhcVxAEh^#j`DSrU zAHw7LnK7H~WuAmG|9^xn|NTSHu(%IC*vxrl0^uNg@`66BJ>8W~A?H0DZ_tTq zY5V1P3XuWo6yx3>DMlUsD>@Sxe>D^{%gL-7cnurGuqr^djNHO}v%Hlof1Zj{lCZF0*(_n`_oaW9QuJza>5+%oY}x}ta}zM_v?XuK^*6lCTH7cElVjZ z1={;1Rof$aJt43#Fn1fceL6y%2;sznSpvFaep_uPTj|0m`|E^G7iYO8Cwt=T)|;td z0xJxYGOCnJt;gcfAd#YerEl(2%rJI0kzu4G49Ajix|; z1z~O?VX)m7^SVd130zDS$)mIhJb(J%x6=R7^Gp>Ygb{pIS;f=#O(k)9;Mm&k zAb2UMEQQfJ81K*a0F&ve54eo6!|hViVUMx&rH{!lzk{Cgb|LZ071cnIAc@fb`eDo0sTo<5HW+Q1>G%D{?qG|7EHjd2} z8v;Nw)VovrgLz+Ynk-w*lnm4u^nW(rp|o7>Sl{0_ujb_8fir%%^Q|x(b_3Sj40yb5 zaw^PqA3(@tHVDRI))vH>CGxwR=v0K0DR@ae;I#5kc_wV!yaQTRdhlZNE1ny9X4&4^hX13DrNrrvKr( zb)3Rs8x4X}sG+7TWfX>Vzj9>tdc$se4Cu5P_(AM$7Fg2ort&j;7<_-tvON%CwVE9z zF^5wsuS=LeZh%u-F9PsvSfYZ{F@XIq<7>oMGvMmi2!shybvno52Y+ac?BQY~2`{fU za4bgzNYO{b{+MCEzR1Fy_P#aw5_rV|1x>ge;HbA+yDg4FaI;$zOW&BYTkUGA;)skl zo!>BOl7`&!clGwacJ%+>rHkqXE8*19BXIb!BwRY(1VRps;Fx;60j2%9VxB^r%A2Wt zWsBeAxgWpp!tcXC-v&cy;?01Fz1rq1cS7Ver376=~7!K@XG4Vl z){F*euin}A2&HgnB9q;oLvOYy`YlYS@z`gX!T3sOUs)8i>bYX22{Zcz_`p#R`T{-q z_Y*^9^E8xB86CiiFlVs)eMgho|6R4~v2xEI(2IQ}@dc*IWw!dCbLea}&sC&wgAkQ; zl{Q^sP6(+~99QTUjb^g;NB?HqU3W>rpy|L2EC zs87e@Er=xaebXqHQ{!a^U%)hcXf_ zMI!|WcQ&wSKn=oAxkuBXA!;>;l3%rb1>RqMvSB$B-o6s zcJhwIXit_>98&x$)gy1MtCrHLw?nc$n3whA-U$Fz6!Jq*1se33}_ z>-8jJI8w<(AHZti1I)Ic-1U#>UasXBAB6FaE~KQcU0x;>kT#Rlf$F(rwm#%wp>Zw8 zzx|40wN@rW#F8j)0Dva%e!d99M_9Qb<*0F|gY(}CBZ*;FM;1$U~49`a)bK~RVH9tt2fE4e6y}|&Bl-%Y3nRpvWr7K4zD}z68^?nkFIjmnuFF9*t zv$u^#`||(#<{?DY1)EraNW$k7_G#5wH*q;QrBoEa3CliPZdm zIH}VMBu%Dug(RRN{*F{HTPNR$61K3g_&t)I9SZW#-<a?nR3_{am)8@*$Yr9imL(*8{utXT;sW zbXzuXXI6Y_CDyehQ=chNzZBO|wThDht^1!cpKu*PAx9$wtW7ewwXVrg!vTp-vB*^K z7nh^O;t;F|=-r=t4=9o<{l!T-JT>fghEI2nQlCMLhhXkp4i+rn1q1cnzBnV^y91p~ z=eHNkMz^(@g+g4l#Ue#9myo{^!_1&3KmXU<`N=_?icA5F?(W+1 z?4855jrp^u= zXEgNqm}-J$tnZGzR4z0pi@^0431;CcG0ki@(kZSlOP5eP_pZ?pZ0 z!|}vt=yW6IykaRi8wARxCAV@IgFuz>B2An>ue?8oZYRN_&P0~Dx*etuTo@zZ&af>% z-(~&f28jis&U}8^Qb}p|d&j4oLhav>(3C3eeq9}}8C@P|LU#V$k@`SA2UN>MTM5)l z6^GPbDgXSSL#5H7<40|`}DGlxQzy`D4FcqP{s=kIQwFn2?hO8 zM}$yw`{1q;x*@D?Fc={u|5HExzrVwOeAOS|(NW19Bwlu+e%VUTZ2~MAAdJzYj6Y?C z+9^NV9?hf%n6Zpn-CE}ZR4kF#ok%bYKk4DYIkWGxWrNL5+AHhYu+~Pj$q)ywi}}Om zN1;2*&BbQcBzd#A@WLl$%b}W;CKI-$AvmmT70;*h6cKeMay&F5G&yJp7#UWUz} zABzeF=&v~_*Ji8C4U%XC+Ia6PRKohFTL5>+XuW5J$@NA8OEXWwILQSd0lHOvx&|Q6 zRN(Q&j?G1h;Na0G#j!N`e?5v8L;jzljHiqsWzV|8p;f~PjM+;u+OlY7Wul>E;hM-g zXqJcg-K)pw&yf+CD<|JSiASMSz;@y|kt>dZJ>D$*Q=r~$E=afI=*Kmv z^Cce>xRYRj$4g|6Y(OCcgvR9cyUC(44`jS~X*%sD)ZrM4NJ@Dn&C6nO%#qe-&IZ$S zoVBPN7nuCrB9ZS@Umhoh@zH53cEs&w{wF<%=}n;<=0)G%a;Mc<{x(<*X`YyZb+{zW%P(i1+M1+$c*Ga2ymfI z`b@LGzU}RhxoeNU$LDq`PzNUK#6V9Q0P^Ok*RG~(t(C=~u+Km&)97^p_u}GWB&R(- z(RdF(D1d(g;G*|yfl8^3QjvNslkq4O9mqPA0bE(LIQsXNTA+RgysQr(sz9^e4iN8X z3iDrRf&lc3M>e-eAjSad8)IYMUm#0h9Bk8o=;uZ^3t#4L?I4VpgmfFNto7;YG zA}2{JKlux4`jCV6me;PvAv!Y^%qK^q2XTC2?<&m_1l`pIRS?iY^5kBLGywE|rCnQB z_#gCbh%?y_+5BF#_JFn%3)3AZ>Wh1aYE&4FbTG2TNo-aDC;Emr_*Ss$xEc@LbdgX| z2C*2Th&R|-=#vi8NnAxKYk7-nZo*M9f80OH4JER+>2-h6tk?@i_V`}2XU%)Rc-n`v z9bPD9hcSr)<`k5tJZ^V@JgSo4e|)NrK*#>0^T(c6!0|$}v-mevfnHkG6NNL$>P4mv zKZFbx-tP#j!$8Fn850~$Yx|Dt|9&;#lO46@B>9=%Z|6H7wN$ilgSytvHvrgPX9xzl zK_;F1`cY6E7V0W>k6jIok%sX-_GYYmwm|~GoH+7%4I0Tebj0fdO>iq4oo&-A{PYNu zF!{AiJzIOqt5$2}D>w-}?&TVtkSI)*$`#3iULnH-ye1u`1d^aBKo*Pimy2>e+YUeZ z))(HH6pE`Zw0;T_X&o5EPu3T0K;^hYz~z8Sqk0kq2?N39!YbQl#JphGkqNr~UjQsv z{S}{)&Vq*WuTFBA=P3lStR;+hPq)LL@^B*{;$0?jrv&6BJ5OE->~@d&Yy}@U%9Fvl zJEBM=BXQ366Hq3_IGi^l#j4nyoD&3`5?lDYrdL(b5>{2#>$lvcU!t>*s+%FVxVl{( zr3xlL^%nkBMU&8T*~52&ISHZDtosy>uj0|^9Zno~S>kZa^%XHlgFMD`p@`8DTovjr zmj`%*N!){8;_DQ$X7Nk)_T)6TuDNdrtnemGCo)Gf`}T2~hmIc88aRIrL>YZa1-JNrCT%1?Lwz-yc&7+VA(bm-0anNn>D;Jqok> z4QV%5JVpvxFhBz#2zO?o(ODS3eU|_-=Xz~`N7QpC9)uo*LH&hDB9X=X`9y(g{{iZB z-w22p$9)MQ5oQU(VJjxr1V-t(uD8fchC^o3O1mXRtQG_O@Xm3;{0Qke?Zby}0nb-f zC2da_{2pZTO{0xv)hopu8J+7~wMvdk^Et8X*1uXyR1R1M{Q(Jse&sZYxU%Q{FiZOh zQ~P#js~d=Fv9T=XPT6f8HdqX5<0I9N?}JKg2^|9?83Jjz4rVL8fmJtYyIFXN$@%O_ z`|6)-;}`H1-@dD5oK&1fD!1jYn(kQ@v9nlgDoq&XD09reh-9~B!Q-|#cx7BFD-1te zUws(NN5s~^@BUCF&}}&f6fPtdhwF7k1v6!H@aTZou5ROUKYLxSs*Lhb1r!(A8d}+7kQ2CjMYqu3u`2stUk3!}{k2yEmh0H~=RpK)YLqzo~D- zYO$CBJ{6(WM9E~b06{W^M^a6UX%qN)6IYrc=%j@s{OdA89fi%C%#p<7a1{q04b^C* zWhKKHm@}Vre#Jgk@_i?u5qhQfBZOCJkB!B0h&b}=_tU%ct>UxGK;G@nAVY}Y)gJ?T zDfT8INlo{rx4WTe3y#gTtz?7hL0buq$miV_ykl`=RZ8EM)BZ(}QqgxL3&)*PnU`En~ zvquf6WENd`uJGG7sXycsqJD<`dhdtj#PkYg<;RoZ!ISGZS*xUdGN2A>Hz456mO8#; z`z%4nuDyBKrRnF~R1eF$f0EMfthL$!l9D&DBZbUlu4>sI-1?)6CGhIcGQ#!9Mt-L1 z@_ib=spK5K-Yr z=g8Dl%oTcUzIL-`ML~})Q&rU!eGNc}cfXK{=|iS5Xf+hN0gfc5rtETa77!|*e73_# zQQUb9JpUtsTp25d+Zj2CY4-^oMBlL3EtJ=K^VMKHbHpF?ZsVAMp*x4nRodEI1vlfaV2|{%H#M1sR3d&kP6SLnc9RCu`#ePZ*Ae^W7;H*O7p> za=mW5>lL=oj0Sx+e+q{exi&itps{k<=_dHsV8-)1X$wpPNFd%7I#_Jt0|<9Ibqw7I zxXCE_JK&i<&!ccGgm?FUY8{RhR%KqV$xo9qJm||r6v*uBg+6_HY4m$NV5kh8*EING z2~YiBz)mK>y5uerYbl-W#>fy7=~7hSoAl<$>g>(~OJg-36IhEoPA4ipK$Z5Ubwl+j zt5_i>N4E0EP)6Qofd3?VdvU<@*9|>0~DdR4GNv-qgth@GNNQsnp0r zBj--Xu^z`_oS(aLj!G>@6jV`5-9zb`fcxc7Nt{W4_Ay98ZDe11XyffGz)Y7P1!sS* zeOM?JcK0XLx9=S)rh6z$p|v+D1{HRLeV@j?hzY1Y4M^I}8W1 zj$9TyWMd&J0CW>+GmK<#waC&K%ePl8L~9)uM&U+)FmAl%KF6hK>py`mddFL6H1b2C z)kG6; zvTOG{%MuQQSz0gnl&U*kB8#Z{c|*H-EOh^ZRXGCdsGG=hc!xX8x7I|f`vX}p9zV$V zwCdTE7yIOCedzWPptSmscBjXSLczD5C8S5}4bDC^Iu*$O5jajE_w-NMD!~i80rwn$ zea+G!;kGaX7)^;}2h?FF^(L=wB(UCl+dh>q49hn-Qu z`ZT^Ol^$X~=P$Px<{WiOg|liIIV8Le0L1vv9hMTAa)zgkqVb@ZGjQN^)`Y7c;z#_w%4eT|Jyi#(DqgXXs+JMe)TJ zTH8k+0;QrmKQIOQxfbQ;TSTn{RpvVm!x`iKVhj84(vC=o&>&Bt?f}$Fqdv#!+W=pX z(|Fh%G!={QI7!*zD38ki(AuU%FCg17bO_Q7CXc6z*)d_u-pa8j#{l!w>CMv{X2jL5kq@y?+u4jq@c+8Qu0wLH7vMe3-A3Rj4 zWQhTyI>Nn%(!HKyt~2gfk>3Jc~48MnIrRYy)il2CxJ>T44_w#-%wZV;hZo~Qn_txG%rLh!)VEfj)zo$zhib&q0 zASxLSl;G`x`D_3?MK=U5Dqo}aC4dXm;2BQk!tfg@H0#c&Lpy4wZ zWSERlnofNHlF~U6?5%EtN=)5kr4LGq7#=PUmDH*?Eih~x=82(lR575O%#$n3dfBH} zdlChv16&*jeS&K2jX0Met2<+}8VlMso9UwsM#taZh5iC=;~ibdn{^JBmN6DLGL|~I zFKG1!V zb6Hy7aP5*~zoheqR7`>UO|oeUkE_N|40y!99U#S##VR2VMF)B9+)IjbDZ)*|$J}9s zBwEc*qD_}v4qsC)4(B$G={Oq}W5DPzqx+`+$Mtxny!G$NOy=W9#J|}Y|E)6mH(%pl zKjBe?L0fO>fZT8TUZwn&@#dg~lvJ9{NT^litCm9mR1nql_`s=Syd5I0c#IqmR&qL( zVxE||5a{`=Y74}XYT}Gf)+uccldn^P8c@RuPdmd>JXH1o*RDIvcrbo~N(6_cfV_&h zH?;5(51^MSHCIO~AH^cb{vfk0D+iStn_wu-e>N@T&*UBs6SB^~3~FdM-KY5KlB*mb zq5;t87i%n&K)2f?_{^5~?tK985fB99sPB%u6Zz6jO~TvRDUjVVH@9vI?WQ{hjWT^o zkg?4R(6o=V{?;J0iPo%JK0`zn1RqtXysH(T)G?C`qB z&U=pC4rOiDbWn!ETX072ZVs_62DOHH=v5WL?a)F?sk9<`2NxXNBq$H`R~iS`k8 z>~(io?^UXFXr?J(1!DB;>-fM0`=RVbT!-TGhyW-y(rq!W#<}=mRnLn(nXjq3=h>GQ^ia)Z2Rx>|qXG6<&nLfP*?AOZ!8e_%LSY+P6 zq0g3XJ`Wne!BH2rw|i_#s&H6&?5VNr7){xfRV_94e2trhuJo#Wt49#Vm8q2yI5v9!ElUrxYT&K0^{jnJi|c)O6SVi*c&c#&pqP*DOdOaHvP{p3M&$V${+1 z#Jdb`&G@;vQJ}-~F@*sI*7=n8kmrXS^ksI9Y74b~J9=_>?mM&68bzGb=Sffz4L0|x zSb2iVSZ8vn#7JzGFGLR(Q6KF>w|-9*X{IjJ+trM#&YxuHjwF|3FX(uax-?jgI103a|z28CeeL0vR zb2t8N>%ajl7h$j)vOvv;Anuf8yg!Q+$z8vb@^$Fie^ml~ns&SA(pYg6@NpKc1uD zi-8fmZK_lf$FD!IeJqVYnaIb0r~Q2QXv_HscS;; z54;9Is!5#-4FEHdNk3?HJ32b{(t}xDG>{kO-nKsar*K}nSC~#oC1L)1YkVg?0;Lnt zDrXLkXcQR@pyBc;7HV;dqzNeUt!DK4zFq9*kL-awD&4Qe8^k6(nr9Ct3_#%ia2z(M z70{I6OpU>eq0(w2rOoRX;|!1;F@(0xab`}xOU*(RRU)fu$eHg#$`2?+tz8{T;Y=?l zm53F1d1LEy^iC|X<89=_JJAM80O;BA-t95gDLxkbG*uoLK2Ryul|6|x1{2EaFB+MY zye`>W$ayZga%6lYe#+3tkN>T%`sU%(jKqppHil+@vn{2nr(CwtQ|Ju_r5S?2;)d(; zn%glb7RP+9OlxZ}Zm#0^d*HI~_dzuq89pF$Mh(H?92bi0!5T{B;RBU9S~GVnTu@T| z*|>6d24tJ=?Dl36$KmVQPb3u<~WJLQuryrUkm+k21_40n42Hd?f5w^Z(vHW!p zM}5ROQ@mkodb%5;2RVqaJTG8cYsp=ZH43`7dv8Bw4{>+QL(9y(Xh@K;roBUNuV)7a zfviGu%b`ZU+k;4EyESX~VZ`Dz0cn@!yX38c zj0pt+XK7bP-04kzP027H*+e3Bd>g~f&tKvZ0^b+GWaScDfhCW~GaUqVyGa2I^ z2n1Yo^;#DDz-56^=dX!;WiZDxXR=u2GG6(`@z~<|%wI4_v(812sAJ6qEWfl??-!%W z+RPa*F(e=f1^w0zGiAITyzdr8Tgj~rE>}AdiR}Do3+X^v#pCKU1{xLp_q-3s1HWm zlLiAVBfw-|y~`1V7Ab`80);pMHoKM2!h>`IOH`3|GwsccXW8e$yuemak;AAH#OiD6H>do+CmU0Oo$jtu#eQ^qcs!xh zT6eDs!{+gzQY<7AxD>2hPFcI$yOvh*yBU(ch;QEE^WX(^s0sd@ z?u{>6&0^v)&$aH;Zcu_A(j(Ir@0KrOu!J6-BJw;6JFQJ6XL8&DYK>R!be zC;~?qv{jfX7RdW5q`xkQmTX}{gT8TvN+CS4Bn~8S=ZheJFZ94=xB3n=s^UO{T8(8h z5UX_-;?Q;Kyg<-=FQH7UjS9IGlh@Lrk4<4N)@po%GkrBeujA`5eBf^0heP6EsRd-g znanG&0YCa`Th_W!wtuBQn-xo>cbB=tVl+to7NXWe%$V&e{qS(H6qCSWAr2CBvxUR( zN5*^g%;T4NqA?)S3<&-0pk4cMikciYUt_1(;8*lr>6!UUSO4Wsvs$Yy^x-}1NMb<~ zcJ0-&7&`5Y#`?!};166~D>a(YYZD=iyg%jUjKBEFlZLvy{_8=7U^EPm;2CbHJ3%a~ zecf2+_d8B_x*B|DyTH352fcuGO1W7K1~sNPFrF<#NffeaYKhrq3$pCeNjMq=>La;w zm5`0T2x$v5%gYNS4sw=!6-uZRuj7ddbAFhMR>k3X(rZ#NXmO?Fu5thpbKVc>)HEmW zFbj_A$Vq;U9cn}g@s~mKf2QN_M6Wmkp>EVCn9YKkKhI4c(WY`7 zS{~nOD2b7=TrStL(KYZA7qwTKD)lU-0Me3^ID+ZA8D8S9Y|$hpQjSdcDnrlI;GH9* z;t!rK0CG?TBJrQRWjkqH26l;UFxO($!}L7b4yWNTa4%)$4&RLS-2xP4yXuO@sy`YD zV5qcC)NXoZb`u@<^vx8fHyh&EykBA~IN~{r!}bjKrjIjZF0V>SDqQEtUZNfn4dJSj z8|;eqc7A(javJX8;oSCPw7ElR^%~oz^Vq_E!THrsijlODa&l>-(aUr^$SFk6e4?tn z;=}M3v3B#`kAN$d{W>~)1;OmUFOYu2cbN5DHR1AaRAnm5_NN4#m_H`1Q&|K%~$Rbyl7B_D9@T}W2p`OQc ztXqbM`W;;@-5Yb$gS}i`l7R%fe18Z{okKPS>DCaXTt*t;X!u``Q*x}>EASX5Aw-We zwvSmK01{@v(&gyxgH!?opPAi-hTp#9X3+H+a0^)H{0NM-J2*d39X}T4UzDkJu-9+p z2T!g6dx^@c`@=#Pp%ktaVP_v(A)aEd-&a^l>CrpxUK(}}VT|+1&b6a{%R`SEI-UD# z+;n+&*gVmfT+G!gQeoJO+80TWM~)UQ_GwHHLbP==~#q^XeMz5B6nX(`LnN9 z72-fQ{SW42#&H?+x}drT_~KfotVID!i-MeYyztJK@77%7-KkfW2(P${J*0Jvv5xyP zPnmS?zFteoR%?-19$rkYNhGm6L&7!zv**5yQb18ANgBep-%9AB2W%p~yDW6Uc|++j z)nj)Uw-n1N`tb&Le9-?6sZxsI@(%w3(s&3sIR!R_Q0R?I42I&+5 z0ZHlZZUJeKl9KLj0qO28Y3c4*bi<-~hkNgH&ffcZp8cKod#}qsa59-|&Nat9#y#%) z7jzQOt_2)lP2L2OmsWK=#qN^>iIE?C*XNIt-QMNEkA9YbI{0RKcA?$xw2L%3_w%W* zh`~&G4(}vgSeO97OncrfDJkcWHAYHMrfT9RV?9c6O~XEb!#mIGXq ziYV?&k3t9lm;3lt!y+GdZ`OGl0JNwI-(AgNs$JZCtSi0QZ>Tww9C-4IKKmtadYp*r zZYyEg&olwKPJtdW(EzH4z#_WWY7j+T#jg)9hl72W0*n{r%YJgyT3wZE=c}VdQB^1; zoZz$zYt^2kIXV;Db+Fr^jsh)9-6(R8Te$J2 zDM5Np`IniXH$3C8KN+WVe~w>vIh?xY2T?6wFBMxYf7O!~gk2DX@}&ET=?I7Ng-QTb z%m-l1crwd(B6Q?;YBVE7EC1a)^ue&#t)ZPRWyJ>qc(|1gr`$B!BWLP@Bddm;ya&d+ z79@C|H|A%`EGzhBVh|-A`mi7fQ!y-xcsp}Tm5;P12H)dYoaC0)hAQ3NQCgz6zsm~7 zW5e4RNKh8d`2hWLeMTT*u|JoJTv+1Pgo4lYR*R$i#CDAWeTvxA&G(mM3Fi}|IZa;m ztj~P9%#E>Sc^bF9>TBOE*8xlTOee|w3kR(3 zhaI{;dii3^iGrn075K6)hx=$6d2zb*17qb!gR2wGKo)48Qjwevu|%l}6<}UdL{TfW zwKf!cK&MM0!%mpwk!}ynlq>Y$he5Eu>4@b@XK+FLT$;*DB=v z8~mD%;GfP>NoU(q_yu{@S3Co8CFz_cr|{R(*z*t#*&F$651jB3A2pT%org9E;d_#sU%0 z9#VVpQ5A~S)VnDh<>I&YQcHf+5agO#Wo>@Q0h(__6k&EW3?Cix^$&Qjx{TnL@`x>G zOKB8@0}lp%&&sgvuBqGRcPfjg78f^br1Br{QJ} zVd~DWltDe?Bg%yuXX*`TTgiJ|?B9jqFy5w#a6cJ$UK_eeq+H~);Qsp8%puTdM~nu% zw-+Xa2H`O8s_m6)OT$W^PY1GFs4zOx8LW_asx|oZ3tkBv(55zRqWG&U z|HOd-nicRfc#qUE_E!Ov7+H?=q5G4ZN{o|n>3vV|#P0abY&|D7y;2{z zp3! z;_I=*#hEmSo}&=P?Bnx1_9o=5Jv_=f$a@BBoxKxC*&y9RbMpy5! zJuT^RdDn+EzNM0|v{Vo<)FQfm5;=|9d-JEc&|3L=u zTfunVUjiZkIiz{F=xK+wBIH}NW}E? zdlgO>p4)`NLGf@zW8UWul}|pO+r5>VKw-U%j`OEsb>xdlimP52y70+m;b>L?bZ^l^q;E zk&^};Y7iEshmMr<8$RA-9=e^qG0zvrtM5NII(Zy>&s^Uwo2MHlNspG}=hj%wC0w6x z?}Rf~By+h(PV=^hRW$6)I_y!<-`hTJMy2%R5LMq>+s(^XshvrGE3c}pN^-J5jWdo< zuMJLgluQEEuk+|$mQNpgR(O=w;<}Ugw)Alu`%ClrDYiA+LyMi|Y;^9n=W{5j`@QMx zZ#~+@f=}G&FEErCXUf=6H#aS!qOtoubENb}nm=ui8E0C%qDCvts&*y_sfg5ny2_OJ zE)ipHvO@iSD3vP^6wY?oK`JQyTcatqxYiA9|2tvkck3I;(1M>im$3qFME}T-7*C0o zW10H1mu%`dBzP@13ql$LM6RL3OC;}lOuT-sblhz3CCGD|i!CiV*_Mwa9%}b{WB>^6 z&aOYb@CBxlXh{3#g*r4s$5gwwT>kHNZmI~wXjjrxlZwX!e|MtaQkE)P=tO`XmnxIC zSG)beo~b6%Fj1k%6uh6T?cfeJH=3*4(}b?zW`Eck!eTn*o28cAqO%ZuzMnY|cI!`U zmVKPTH@wIy(JCPvPXmbrGEzZDX`H52C$~fOXkt0c1wf)47-8WwC&Y{WJ(ql1OtF+Xi zQo7KvB&%D-oA1_RGIo!jNa4|Mv+jh-Z@OxdM;5MlQ&k>pVoI?W&DO;qf5arad6KUF?A_JjE^;A!>OjY(S5Ms(LU~n~jOo05MCT zr8Ut)Zwmv31lOY&royb&)q{dX%2Fz2o}HddqwDnUlhuE2mHqqAtz3^6Unr_`2>zlP>>7s+HV_pP=HHwAdM*HRTr?S>qc}x|1R&7a&Sj{x1u4gv4 zcx3q@O9hv5Nik&Jm)eAOR2WOgPTGxMa)M6mhc|{NFD@jMbJQ$19IiMW`kd++=s0QO z0JF*LSLw?TLXMbjPmW+bcC)vp`zkwfSNdJ->IIDia2atlC3KuNDyrVHrX&u}7$MRb zLS2>cywz`*9WR~-pZb(|UD@T1`5_y>eu|R}B+L6rVol{K;8dgIiQkga6B;3Z0rZS2 zm7_k$94d*H{z@6(DmW=}86{9?iehIsxza%jhSLP%AO{C2bJ_6+n??BCCAqv7In;nB zR?b~bs`s4y)9WxV+@BSGn4c{`O}2MXyBlN~=CEtX73B-X#-G9>4)2~8JixR4;c;l@F~uebNA%mx#yRhQq6+5)B7>HOF6>!k!Nes~-IFxF{hv z>Ro*k4z6@T2D%M$obEv8Ef=}9u9Py(uAcQXFUk4ctZARAg3XXrF81$TJMlCeD9&c~ zUth+@);_FJq@sYmrx3H^lsr#jIQ7v?g2 zE08&RK50yc>*5(o$7J+(*xk{)FSrwgJT59K$4P=M45~Uc`l2MRc$C2rs?l9A6y`m_ zj;f^{p|}-YA?BDwww#u;XL`CC1{Y61FvOWciwQ=p@BDGo`P^QV2bUGiz4=PS=a%P_ z(-%!yO>DgHB)0^bUE2o)74RI@@!xTnPum4>RA=oh&SgWw{6y%UBM0dp=)Xsa%%rUD z)UgLtd<7vDxn1%?me3b>e1^liII2I~nDx6Aek*1MUNr0g5?|s#AEb|mgcunGS8FH* ztZh3h4DkYUdb+g|%Ux%?leb0Q(LySy<&aA?X0@z}xE`(Kk;Ks@PsS_&be!v_7#ddO zPA!6^>484hqAN&lg5LwWFxlh+!*P1tSxk~KwosEc+eNxy`10<0;4Gf)<9vFff|%mq z4I9?-z<2%r=p}z)T8Bv6VHa1S#;8GW%+zMBdG4L-#EOrr!{XUXR&xPYt!B5k4@e(k z^t)h;symH`MVK@M@}Dh5G&OeDTB@_}=rDJ1^+vb}P|K!=OD~aiP88R!>XCc`Bx6IR zYEjm!h7p|LvUk__-K54@Y|PcdX2WGnb~U9En?IdTxFC5bp=iB_Q>qPbQFue)0;+V^*^Rdoz&f zUd@j)8oETSa-=^@z^oD?$*?)o=Xb_RU5+k913k;{4a$1zzsc-Zs#a8Je_GddNO&<{ zO^?2xwcyQNKe7rL5{I z4QDVX5KC0WLNt?yg!gbMX;GaF5bU6iFJm}9i30p*_p>Ti9qmH0=&TbRLJ_Pvc!@{zB5v%mD2EYRz9 zQlYwfuKK!<5a8C5*Y5OUEPkYsd_ zw+OLW&F1QyY?$_*??Q&`J!PqTWTz0?leR>R`tf)tk8ah5Q>*b3)lZ(LrHL#!^|AwJ z>;#ui2)@f77n!!l{ct=7w@9rjd9p|=BXj$z!g!WQo-P^fhwJ%H4qf{sF6qJ*OTFUI*m05FsB0RNufjh7=AQ+nF>L5wjEyfAmy-X9Jwk z9k`W1^tXE@7j5JptY%~cTF6$fX^7?V-9{Xl5rXp0HblYH6O+0cSDUw230vHRRdsI}tHI(YC zr}MYwDaE_mT7gYTx!D{j6C*eMI8t3J^I9rWA>fE5LhcA-+lLLy*debH=tj#A&Yl~V z=9LQd2j|AHb8Tiy-TYV`O>w#bDvCROt^Z(*5C#YKwE;X zd}{QYhK7BLvAU_WgwWHNdl)^!-1+-Eqm|&G)AeHj9Kcz%(_eUezbVD2CmLoY`4r>f zcb96YHqdhY2la`jzhY%Wu%Fy0tNfgQa4(+BDbPS(1X^;y# zEs7E$!h64~Q63NHl*S@-qfV^Pjfah564D@{oq-vA5AyCTgvY7I*}A*GSFAa$=`h>R zOZEEGbX28WbxpJSJ)uPuu{nquybNxxQajJU&>nTB9TruRhIc~d473S;1vh*f2 zu&9U!OJcFbbNIU_iF;a=6;mX-FQr&`c)Y3Jaq(sfuOIpz1atD!+E!sVRGr|e4Qrgs zR;NHxz=@Y$=aGF;dRulF?(FXVAMZ5Ula3s zxzf;t%ErG)eWowgO7DeApk<3cEcOShS}*_ikD}yg6^j`hcE;l>w^^?=B0fkkt@j?x zG?vYbWlO%Kc*E;7^7a6_Hg%chSZXA{!NoD_D^#++`ElrkiMloEN3l3GpypGDvw zgE-`5Q|UT=y1qe@SuZGNI*^3aM^Pj&mp9Kt8AgQM*92RFWot=jpU7strWjT3g3j1p zk;E8FAvNue!1&^`%!|}MAAcmo$^qYT>+ez#$?q1G7L#8s^**QmC>{j?mUoT5h1xBm zICuQ-o!R;J^2O5D+Az+&R_^O;$@HvA(W!B16eM$H66q3@g?Lg_?!=SWQ^u_?E2HU^ zM=*C2^yOw;>W2>(yO8yV5jGuS(9_jIW+$N|c4miRf~80$73h6meP2iHaee9~=|VDs zeyNjnP74`W4$OaF!co*aU7&PNk3ad<+563v&q52iq7Z_5~YV6XEy3xi&s_RkC5O(Tdy4o=yml>>Lk?S5s!VYJR78)fBE$kRpiQS z**XPA8dla&eH$i;_g`^!r$c=3+%Ia})0KnGE_0QsfgmYyW*P@IIJU!iUz|R<@x<)B z2c`M(#iu}vH}9b&9{$nZIpSwdhr~CN)Tr}NF>4WFW}Sn#QD6@PC9D~=o!Xxs>4@%u zmcqZ(2ur#Piuoblue9Qc?-sr$Adl;)&JU@}!o-0xrJlAA5!zB^dh{Ql@UMQHC+cFY zTxfI;9X2>!Hyiex+l*NtJp3>{b^N_a3RLL=Px5^hh7+G+(cS}rRfk4;?!-?`}a=EG1 zNx$RnB%O{18rsC?l`Kq1S!{g>3+wFO#-jx@oGM9HPiSj-?oXw6)Vj#xIK=pAf0ixR zqP96B1A8Nc<9%O+l(s!LP5=Q<504|SYZohoB~`tYh%prkrJ<-nC1h9XD{x(!ulOz* zz(}%({QYeJi^1g!=S+a(nD*dDOxB#43ww|A)&GOO;*W{ZxAQjuNIa@5`D?MhYsIfW z+pfmF%2nG7iaos@)w7;d!fzKR>DCn1Gzd8(fD&7AU>}a;Y$H2Iu=w8dE?(h(W^8;|+UMY;ms)P;iqc-sXk|t$|-^ z9@LXr@>BBGc^nTaV+fER@jeT-#)r;VI@>-I)-;r zC+$*iW_hPoZcgJhud6=h@$S61ee!XcYI|#8LX5c>!bWBGj(%==HsNv+S_Z#3;GG zz0C_W#Lw-2<~o&r$>p7%8l&j%Mp+CD45<;e`&emwu%~@S2{MyNN^1{l-P315_H5>p zG4%LT@b(|`Qpn(5#=i;XlrcI)F3$JpQ{SD=@yB%LJkYHquoOviycjUuhez5_$Wu|& zvRGK0Bh88~Ha(s6=5L=KB}X;@o`o>hp6FlOmjewh)z>5=sVq4x?Gcr21cbz{_ovcm z<@CgN?=_lSoU>t$a+BUffuic}Y3A??=pxnb!xwYIy`?stGizl_EhjRpF9vlTzCUn%wv^r7+Rf?+YyeYm>Q$OP zU6D;~KG9s!d5{ruJxY$u*Hv1mq){|eNev`3-^J=X#clL#<9|5Q-zamlGf?Fvtf^sD z4xP8+!*dlA)L)UMAO?EtEYL}q#N1r8{mS$7mo(QRDchUtrDEjsUS8!35K?T}g+;s35H@Soq zMWu4tedH?{jJ&(~+%K|`GQmq!eK@WOpqKhN$O^4PJ!!^af)oB5;g9}o^#1K&Ck0F> z5oa`0L}ur-{mhKgpUjK_)NsUV%Q@4?ZM(IsbtPFY*hQ@_A5K3h^tea()I%w zu1_1EarYJHiyrPPMsviYq@`1Mb@!75tM}?8nMUd1c;Xv_(0DDPOJr5mn#ScI{Xj?P z%S&HT$WTgEpHOlE`T+Nq{v$0%(9h29QbPaP#A@W^+fGL2(Y{Um$mdIMfJ_{NDiyt! zMXvd1qaprGwL^s>P9xSW%c<+WDi+dgO`aD@pj8oy~g==oK``d~=8QNESg6RhY zo`)gGxI;9|s@x0nmlCCTTjqNxJzz0Z?G+M&u}jmewb7a^R?YvdR1`B+e;#3B2E+Tz zJ4n#Apw!OSF*b@aRMoQl39;InMht7{g{ce#&ttDFI+98*Ahi2#zU8nW_QgmlyE-}_ z@!KPFi@o>^D|i$DLSq-tY!qp!bUx)~ljBeb&OP0vC7v)FFY8nNAq>G#T_Nqt{+#kv zDRTJxw<#D0+IDjEy?cPnEtq`yn-D*)t@`BP$@2C@6kpzhU0ZW=>THh_7`j_BIt%W) zq-JW0&Xx2UwRy#7K*qPob{cRJHj43-yA&TFeX&KvVL4B+$`6q;{qweDh8yq@z(Q_T zX4bVdn!dtXFN94y6@GQyT<<67rToyLp` zw-DYb^Ao0v5-k*be<9&eKObKAI6{NF+n3g}bJ2jsyx#_97aqa_Yft4b!fgk}$9sCu zh7*b0nP%xvlO>jko*|D{yl2|!X89|hII%Bqdfbow2}jC&(=QTVjNuPlMEuzx?|coj z&1zJhSkdRh5lbraLB$k?U^-W+g(FrS3G37eof3ypPHhx4$|FUcU5}PMXl4AKDi)UV z$yT}0-xSN&3bv7su3zDdW}NQELYZA*N^hV`=1K~HH(>vw;0C`xt|Z*-omvIH)!a!} zT#oLVaSWtkZza?@s8zWAIkmh*tOXZ&0j5$rtBSBvy(HJ}5fHmernoNBtPrwVsLv1U zC{R!OoRU0SuMvpKVX0OS))RuS?W*C14SL;H} zrcpC1AJNd>9VK7-FT1nfHI1orisi%t-Vqz~>?zyax%fjRo`1|z(%iTGrRjBai-?9b zVKzCEdL@a6h*qFblz(U)Fs88&6jDxZ4l9bb-k1^yNz#%&_3DYH6m_gNn-Ea4pQ}cV zD8bf6=3Od`sd5x_Z}|p!MmzeBIA_1{C=$^&vq1O6a(gtRc;xYne>$d~&I<|-qVjX` z3#@2)Qe#hxUZNKD8TCAVa?g7qmxsQX;7?~;y1Vh_#Lg$rN1NAnmCi6X1~K&f0_dji z=71~-TY-gR}KucW|)s#KE1fD&iQVawAEPy73$jxp^PQ z(Q0?$@f2w{wEy6QV@#^%Y*d8wMJ-{j94<9{2pP6g-F8FDmR-Hw(gR8d&mXwND9$b!uCHoVn>jmdc~dcaVdNRc{>B&dcGYGTyXP;mkQZtzTB0Xz&fYai!A( zy-}flAD3;LEb1f(KLsl&5h^6Hdy3mI>aL{gB($taRc?0%W+|PgeoP%*csay+&@pWP za6Gw%R`?o(!dULGNy_flijVXm&A?+ic32DtCOX++!nQp8{?~t&xDx;EU;qK!0W#J$ z8}nxktLn%S_sdqc zWkc~r9+iJsUZ7>Sgujw3A!2bP6{~s`Ad1r^NLI(T<$TW+K{6 z|Gk=?2#5KiS`apxCqcv`Z?-iIj2m+$KdG-h{KZ`y+U{S|tARelhhcsnxW(OX?))2A zUg;w$AYP?pDLiME>IMEU%TGrfw5_0Smz15~uBF!h(923E(omX|qwE$LG6wRvG__=aZ~QDl?x3_f)`>krw(py9kF_ZU=nvgZZmM zcQ~7c`j$nRUo9@>g12eGdxRdcQ&aNzFJ)j}qcVO15h@l-8LmY}HbwRV=s zYF)cAHyAgUZ}5iMx>NJ~aAwe~H!(z-^rl2x^~A(uK3;|1QCl^WzH>U3N@drj>4{S9A=>1qkc59 zftw~%iHb|F_AxaNnq5UyC`jDxc^|rQl?g zA@8Tx7P?V7(25JF(1o9fIde-9R7uXqXKP%D+&_*vfS z;6Di#F=8XE=}Oy_@!~OA4otnAr+rwhUs24-o7Kn#-WT(JtYs1whE5qg#SAi)r!+fs zjfaclwEg&!$Etj9wn_>xJIJ?3jl*H!gD%~TBzb+`zl%mQKmYh7e=5BUbHVUAA`%&9 z74cmpIdYW=`U(O{T7(9g|~UACl;IYQ*>q4S*CmQg4CFeMz_&6gJ52CtYTSqyuN zhXu?A{C~Lf81z&Ry6D+ClV6rY9?s)lYPouKaK+v=2~W`ol>Yut&^i%kssk#?>Km$NbWg$Gy?T3%m+zGy|)nHOwEjTQ=b zOUGI7Un?fQktp3xQ&zgE!rDpoifFIppb}Q7vp$cz{dZ;p`(a&@$g#V`C3fL#EYGrn|1*@lV3&eJ-v)q0U;(I5;SL4 z7$FkIv4^5wb}PeYRMT^XcQ2vYQE28CW{mtTLNw$`>|$?*TAjTpDqB2Cyb7%cc=xBT%E+0wLz2}XM=>3Jgpov zdQB)U^Mbu8JQaGyXR+RJU3YpC(+^S$ zcKXaWhjxX&k5S=PXUJL=^=$uB;V9q>CI$ENpt8I{3z|RMLkT`3wvm;(pRF`4!rbuP znH2ox8@5nPV{P0UnN#{6{*47icJ<-(!pR8&bEamv6ZF7RHJ4&USFeh36Wt0Wmg7lS zwJ=?GQalE^2Z#HQH`Z6>bE}ZSRPwH%=W&HIcH3)pb`a&%h7*1cG^mEANv^SZiD__R<$TE&*pmm$5i6 zKvi>O5jyV-lfrjn%;m9{rwAW_Adtkeh^gF;_5DK9b4-CmQEIeG zg`1k)k8-Zx1v0&R*KdwNd`%!(+eP{70;(OH;v0B5)DWb-2O0R$Z1=gN32Vghe93%@ zPFJt=hGX7ttvgg<(P-(T9J$8*C%RVqy)Y&Bf_-`qkcf zzN^$5CBtXTClmj`49rWyMoD2Aq@LB_?>&fb`kIetA^{XZl-0tuX&L1ziL9@(nd3#= zN%7s90cHZ6Jn?O2Wrl!A`JLz8 z%zBtc7Hl(7!0{$@$z!|nk;3d*Lblib%#wmjH<~teTUA9kU;G_iYQ+Aqx5}^Y)hF@y z#PsOHglOf>ytscB1b^Rn2}+VItK9MzC&p%}J+H>}kTfNndkgc&;A%$y;`AsD&{_Gz zWq@gA%+J?ljEW)W&P>NUW8!Ou#h5g0QAq`sA5%HLKHDpzSW0!^d;ZHEu|X8o^PTIB zU>jifjW5DsbU(cZ=GzX(q+bjUk3a{D=60zE)tZRr$Z4Cm(^ zaE*@L&*GLm?ndZy0qx`G?Nv+fR*iq&8i(Y_B#w8b9NFS?Ny)GNqRPK3kdmpF&2B6n zg;6ykQ}(GBB+v1=^GfKnc(q-z?4%r(rA~Y(vBw`$m^*lbFr}KW`jS?YtnH(6KH#dJ zcQxr9U{v;4l|Xk&I&F^ko9C40(cq5lbDll_Q2bL7OBy?ev#t?&I3c5=XC^*5itqH1isJISemg%LDN}6ZJtFK zAy;-YWzz;QL|!RhS?qSSY417w%7c@Kc!omek0!fFHI;WisG1yfh8<>%b3Gh zFVdzT9suTtb@^{All)4P*+}qW6Zf`lFB|zZ$Jer_&jy$;Iv&migD`=mr$ERnc0uIR zW6z;vEcI7&jm}M&&o7>TbI;VH+U-E=4lu<^g(R_?W5|d zuRWn%?nM(u@xFC9qFcel?ym~K?h;M2Z&0u%E!-qfBz znF5FfZ$AT&1hQ;`!QMWsc%Q!0_6gK4{_39|z85EEu3XmjwQ?H0dUa>@WJS|Iq6l3d zQ3qaW`h0hVa=j;Bw4CH0$B(`s3PvOPynwuoKX$^Ulz!#T9RTF-6Kvnx7v{cO->&rl zwP@KW|Bm1-??D_aop(Z*$P}A*j;Af@PI2$>BVPj}!=UE*+~sIG=cjeKom!9>L<*W@ zo3zz=pn#1%-++#th}7k!7i`CSp&}uk!4HWh9$ONRtObb{rR;elEyn~DZ}Ay1l}|3t z+8wH1!Ei3z(g$JTZ@ddU8HTiCNpTHZh3L#h+7PX0I(-+AMZuSOPa3~1DnJhb+k8Gq zaj?$2Dg;-c`L!bS_{pH>cOxCMDsTfcTXOd zL-;A=GST-}opsl)z=4 z){e)qcU4|owzq1ghKHjQ>{~@&Gkp3lEr7=;j@v#j{cUmXOWsqo9@n6@-KQCp5Oqt7 zV^Y>VJZvw*x}u5_9JOE!FR4t!uLy?C?d`jvuLyp7xP93VvL6?I2>i|BX|=YHcA|>= zRM|9oDv6tO9Qw~{IK9Te>a_$qpqKRN>LiCskumgm*?; zopm;gYH)Fpz?W+j0BM5=-%uJ_aokA2PMAJ=n5ctZJ~fszNqiUOi_LvLn2`j=c2?`1 zZ#TG$4us#~gRQG}sw$~4EY)kQHej*HSKZnUmTVSoH#TJ+LfG3I>-_JEa@QnpKm5LJ zFA{VDxWMn^_n{S%ToIN&cLKd09^X=6>VAG{+WXmlxpY}NM4-Ch#zh!#oF)K`7g+Os zVmsV53E|Z~$jMtjzDRwmjhAvgCism*YR=tMnd#6ufPbawNLLMfp@c?v&Q||n@A%&D z%e$~V-$3Bdx&!G~f z_PLFzwW#JpUip|HJ2w_WJx!O?{UhG`6{>i*?x2*8GuPU9yB#G7ze$HQLGPSY1jn)x~X zZW-i@EY?GekxaUzb)WZt>@75W0J1k&u8&G{{_mdp2oWoQaaERzzui)2;x7x)#G;s- zZd+g6uC6*?%&@Zu!CB4MlGqtN2%-PqeVQOl*mEz=AJx#;2V{!IO0oa(TmI|SIJqE5+EBRfBjk*lR}c}c&#T6c+ZF@bE>#gT+cMQLIJ3U z=M&IesB=F3-pV@K@^DY>^zZ(C?z(zs$*p*%$nk&p6FnqA zPK=TO;7=gT|2j+|m?YlU36m&*8tC+7?0Vi^JJh+H|IldoydPnPM*Fwf;g1jazd2Ht zB=~AWb}bcyf_t8_+lttN@83tQumB7?*>}l=kC?B;bQDn+5eA%iv}^@g2)z~MNF z!2wBt*MLeqsf8d1AAoi^Uq=S8v6CzQ3Pn_DlMIQ%ce>rddSE52TxJ1EEs;P>lFReH z@fNz|DaZeW_1{17&!X|K&f!&ok%$)~`@_5wAPx61TZ{zwAC&!oEAd8u-TmB01;etP z(&P40E%^l|CTRkzaV!uf>jlgKeF^y))V~b!|DMJE>W6>`I4a3+5pgjgr2l#!P6|kZ z;fT7V&j=An7Jb4QzvIEhnHYoB6OQoth=(tUZ1$hFi~j0a|LwRi2aK(P-+{QsZN?uj z!|MiPI`+LvI1v3cFbiW94aP*VMPwXNs(gu{OQG`X|0P&5@j=T=4hbvc&u*Vr66WZ6 zJb1yH@BwGb;!M>qXTMc)3IQigv!uPpcxFWf4u z57Qx-9?Fov6jhltU{KE1O-C|`{l(h#hqdM3F8TYvN52qT_4QuW9Wsxs@E>PP_)${) z>-#14Qjs8*%M^|^ZUy;5mzi8<9Xzo#9P*1s3U7>%|A)DgxE{Usi7x3MFR`qP0T#9@ zezs005MdrF69Jzg@W^Xiqf9}cV+{QM|G!q^SWMY=VV4iza;8!T|LrlIe!_v06$J?i zNj*;YyXAMqA1k?=bYVnwME)3ee-$HtJnl3Nx4QMT6Zh?x)lPqRE+>zF4M4By--0dC z^&{xZ3k^LGmMNY`lC2bPz2RV#luNW6q}tsv1HvC2q=5Oj547&qTOO>0<0q2}_C|8L@{LkK!k(E!SsQ)wa(4!C)ekh0ojph8Lvwp^w zpTNLE|KsU24kjYv@Ab#5jiA#JymwTw1m-gIQb3++Hi9<-1>xYpL4zb1F=9~R9zE)% z_?Rh79f+^obWv*!+*l%hSK%9-TeiimDHa{(t|k^EIjlX%eVgcjMR+wTF14Bvx8r^e zG|g?tdHc%6wGJ!&@Bo>94ghws9cG%e4s}w-k_5rBZ20Tf2*4ZYskV4jNj?C@g!0~k zg&PS_ws)8XA`PD~)0M~L1O92)x5fzePnL)QksSN8aIdCu&g&r5clH4Nr9ykfV~rLI zM1R$8o`rv5g!{DFN4DU4rhJHqsWu~@BSHJSUw-kD*lSuSeMEjZ-|@)7!43%ZVs1D- z$2cEDCu+VM>e)%r-~}?22?Lk+w8-XTxN71tKdR0=VrbY52DUOLnXO@A5t_)sx-`}c zxwZa2kaSZ}qnZ% z)_`KW{K}|NU$l7Ot7_ zASoGN_1+skSTxqR=U);>;*T&deDrRgb;QrB0I6pwS!;kklG4q@!Z^4TN`IGYC7mhU z$k=*1vx5yp=|i2xmLn)+Rh&`S?n)R@0`t6gyqHu(aaXKE_qZH*?+to!aM&Y2 zl{kt{wPJgrZI9(f+Z-}G&^54mu~}YAj#{@ez2wdd+ZZ8d;e5Dl z5_TrdE27vWmxrD#ehI`R<6#T~7})R-Hq-g?XrLgnWoP{G>L7bi+kzGt zoJ{~3UwXyE<78aJj(Z}YVSA_o7{3L5m*g5Rzx{E#{pTZ23W)Qp?VY9Al0-2--cPxF zmrfBaH|(bYb;sz3`y0l3rKt1&PGy`r5p^6^rJCa+ zvQlp=f6(vQgDleZ1HQ5Y5O*x`AN`ZS23P0(=WgwCnZ%m zR4j~Q7KCAr|H628c44WW?OO9WF{oszMcdlOuC4w4AH{1T197iYl#8NoHPE$>Za2no=Il7 zM0QIWpbHrZRJagNbXhvCP9cU1^$sk$?Qkc{mB8;_Kb8Sz-W~U| zs-qt!HQ<5f9J5}rw+okMF){aD>e+tO`teg|(2hXac6-7S5`(yfWHLf_FwDr#qgS6(be z411lHx#vlJY?u4L4DEl8-oO3)u65-#$ib;4-iY1KgyONON&T=k!j5=j%0m7ko@OwS zj}gc~Rj$7L?>|K3e;lw&9NrIfm};Zi#+15(u+J$y?{805{E5DP@Ch(ZbrxN8*`>dO zZRUFm`l;|~l&MZaU}m5>Y`!VJE5#;cGmB>E2&yHbQpgKmmT&QJf5~Z+u5SB!=WfAq zN&IBJH>LcY30wr3upgjf$gO=VE%O&g#K3G!Y9n;rW5yz|LdJ;1K+Dw751>`6%v&4; z9=ABuiSAJd4z0OBA$PRV*^bNkB%A9sK&D3n{)5U5h0c&6TqeamAdo~Mpy0)8Hg&!B z0!~39et&z8Hh%K z8E~m4N6{1I;6e@9q;)!`H&0XHB`qQ++69MA!98G zW{i_hDP>ZHb|$RpfD#@kY)4A8Um{JkBlySs@@T*mWr$%lmR2=wLeYT%tW~}VA>@4f zSovJ(zTBw!Fx^J_u_SvFyRy}s?9U4bW^NJkM|TZb6g)sfCt#-#$|;r{3^%J5i7%VY4krCbUGdGLN@3U@O)1N zQxOKu9Ttqr2ECH z8)GRiuz9$DZO1(au-~g`6co9=?9!XqO%li4=l7?3c0sg6%uMUuYn6h@Sj8R9^#LUz z=2ZYa-9KsC5aS0&YZ{YCsjyf2F zsBCohF&Hc4A;#8D<8|c}h=%t-4_D|KlYK^R-FkcdlOa)1Yqn~)C{3bbcM~qA^;k(J zJH{81RXtDW!QOk2_-1s;m8o;%-3xE6}7Pe9VhfQU)w>r-f7UGy&n1NlbLikZ!{MpV7>XaF5a%jhO zZ@t1*t`n`JUU%(YIk{A~IWx`CHP5t?qe98OETY$|eu;t(a-JyY?RT)2W<`!T!EOq+ zvuC_jV9G5Ji*y+vLEAuW0+R5lU5v##aX{>tT0{yvZ^69giNCkcc6@hX~Qa72F4XA!!^Yn0aYKIbr> zsfc?@BA^)b^p#Ouz3q}N2g=A*kHqe>2=WU?P3dKOqkYZ8W)Z#s3lBcyHvhm_>Y{ju zgL#d8?*0V~TVnRpzrLwZ1m)2k>@XOcl32h?9c_yg^^JckpfTl#BJvu6lxtar{y2JS zkzjOZEXwxJOCXJ=nv@xUi`RHfv<9P?!=_wrcmZW2HgE5N&uq1tkei_BE6nwWpb2mU znotIkVk~0H5ZOP{#UZ=!!|giT9^0(~HvS260#~A-8?jh_=JXJ;rur3_hN{Nkp-aXy zXdt9b>0=UPmUN$zuY&NFLh2rnn?~Fi4+Nzjg2}wtCUfOTs1;U0Ry3k;0RUqyj;T5^ zFu(a@H_?s0C#8D+Nli&zS?x(Q-QzPNz?mOQQyVE=ZYo(7i4Ao_P9`&|B4`=7lodx} zFM8UzB~LxMvV#3dzf`@v>|RaJmwTr0L$PMP^!JvdZ!N83g`vTV>g@rc;|Md~c=D5! zH@{=VJc2c`4w^spdo$zkXEMEK_f4#A%eAtdk1Zp}?jEf@)bIPom^#uVbos@{0VKxdzF9024uZF+c-`;6;@eWl;jzP= zuU1;F^o9yGS29MISS^N;)!bdE)n;Wl`l&|c`WK&Dq8Rdh#4M1WHo%ZPp$ET z3jaUa!g*{O&7v%)tuv3hP2rI5Y3?=kFPJp7QK}9aAVmiB%0-!r6x=u=b}|TUC^&aC z>c7i{;<;_QlYyj*!8q2;{sv~7EF!Dr+q2jb;k;J3X-HA$uFbf(BAJLhA{&a8q3V5n z2eAR(I#|fNdqFPAn?c{JclxtXNP1hEhYiZDcHAjI;sTFW9u`GVd?CX!o=Ro(6uZ&e z5*##loiJSHeP!ve7x`HR>-g8FaUR2(#hsQ13}8+9if5JRoQXYq>|=az`KpfBZIG>dTe%EK_V_MtNgf {KEm>^)0=YrZMzWV%u}JAvniL z4*Ob;zH6yeJHW?uI+NQiMzO)k{r2i28DboR^$94xCb3>-ro7#8=aeXE8r?tVigmNj z(R5o^^E~Ex+NRSA*D+4cVct#zoC=HIm|aGU=g-NDKlP?aDH|BZg$F_&E7W!lwyDcg zh~UU7o8w@Mvuh+6!%DkcF)dX=K%;7&m}N`qUEmy4yJhk4f=BelMJ}`3byAK6S(^At znB12~XwEMu(kBYz`_%MgQ4+d+ZVRXK7$IDi`>|B7Ux#}1=t%pj7F|l^_?{V1Yr%v}+B&Mu$xjMxiO@Br(**h*Li0&FG&6~m|USW6`ZWrB-RnXGQw=`EN z@+J~vutg~I_ReW4y1wyHK8VX3ZS?9?0PgC)EI0lJd4%8>vF!$fvZq94_I&S?3w=5r zid?q&+sk6naGAcO`+U!!2zo|Pz=vEtzWN!YXpVe9h!%cH7h8Qn;`FG{{^C$j=oV95 z@fP{((ly+p%rKCD_ka@_&(}f6QKMiaP7_`{OSMZrxjy4AH+T<$PwVdrg5}m(u>UcT z3JsDSosG#^GJEET0h9i*_BINMs4bczn%gd1J(}kttj!puzAhihJAFyK&%RzD2OuD! z6UyK7!Ug~jvgjs;O6J>d`JO+Srs>|3l8Nf2y8P8bZG}9HcQ?m9By1*wcmr#Ce(;EY zz@R-rP9|MsJCYd~!*UEh%Gz#<#HxUbJ>d)xUjQbDsC(CkVZrD2di$gp`^no+tnFXO zM2t2DlT`bTUe9DnVmD(PMqw%mUm~yzOEyxAjvs>`YpiGA7XW1D`R>wE)51Z*t;l3N~`RSwrOvJ#2%$k-*~_ykm=NxSo0BG&bZf^c{e| z-iBhy(UrahuFsQ@WdM;(akDL0Ps-Bw%7Z=964y)N~$7Wwy#*`@zEZ3@Ac z2R`CUao!mn%dZEMUc40Uu$Cd+lo&$_mPEzaMO+COwB2mysvR4%q~pZH&At%%te7hWB@#GO{4tc2Tc{OF`FGD$e0<2n(Vj z`Hix&$uRTBq%}{!nDc^8V4|Du_JZk;7@Fs3`6&YK+75YL8mn|GL{H)Q8B*Sy_Sl}a zGzz*Xkn_Rms)T2)_k(z5I{g?eOB3S91%^n9T)@;81pX z70HM9<@k!{VUX<5>$u10;g)wx21T;RR!gtgCU zHZHofsaB$0(=UEa%n#!Y#R4g##mz6k5$;T@(G+3w^tsNxA4Qhlp<2hp(2EP<@EV>F zC~q+OUR-jmt)#&E3I_3({{F0UoC0-J-?5>@dCS$$Qv236*(p5e9P#=`Rwb(HtpRW5 zZ*?Rhv82#bT9Tw94}v_{LcaW5l%O}<&Ia9wPWGFYX3ue9t{KL!I`to7)Krvv>jOKO2m zLhEa`QnuH88?VG^YL%~=>;5cN!UXh*F6XgNC&6j0?eV6!Ki9e8f5PMcniv21LgbMw z)?w({)Dvvnfm(owf3EX@9zQ|hi+;hREsFA@zaaF=yc*iVJloE?Z~qz5d%9ZK-hjSzieP>`1%u+*a|CSa$jE#Bs=dYa#1|27 z?MPU*828UfEibf3Ta;Xr*p45^@R80vh)dUh4gBeU>8k}hDwzg3oBSv^x(ux5#F>(K zO=cJSg5wQ1AX+b>v?VcX39|gQuEsk%d}oXhyKJudkk0nZTjn57G8W*N|Js@PvB2;1 zI#;@YB^@J46oIw7QAc8-?&sVTzD|wMW@LS7lcujCc^n)2WDbgaG{WQ14?Q*~BS?ltXC-hU(cO{W0}V_iJwGe%H&>U| z)l^6*ssK(I=j2_TVAQ{QDRXShVRKO0J+v`HXzaQ*zrcgex&k|ls@U;2{ZYVSAvKm$ z>;$$(Mf3x4zs7x1b#`kVH&w5kM%}2%s7LYS{C$~q7=VsubOxC1LmhofN-e$1Fvykz zqI&?M*JrBK4Ft`W&a8EEo)@T|X^*3^Kb&1HHc}LuV)Ln;t7Zzied@4xqtQ8r=&5q- z2#oYf&U2c#zXih680Tlg!oo&iCBq}wS%k@(R^}aD?CnQ`9;Q;Q)tZMXZJM>?Pb^j&eqm0NHm7$+^s&L{cFz+dH(Hw`PYr}_X|_7?cYko+$99+lI}>)sobEo?7Je3R&)DR z+tyn-xfI`xB&voYS|uuajDVK7T_s~aBmdkO^RX6%m?zj;v|#?qR;`*Zruv}!AlOzn zXfv{)97n;X>iYy|+v~dIZJCXs{3tEGER1UJXLPqES#Dzmg%nt#Xl z!&BtwOuDC4vM(g{0Tybb1gvNU<5|k(GlUfc1WRI$M}x41gm?@(k#ik0WZEne#zN~v z;8{a{0ZXHE1a@h&N&+hM676l>s%nESR7Po054W$k0P=HEJ!9{VXV$y>9T=L!H2>{W z>L;B)1R`=tx35?7C|AoX5WE)Pw+vJEy#Bl?Dt+??jUegG1!GIc zQN&kzHC>`-3acR*w(2ox`{w&AqZuz>dk(X#wB9DxeoTS&bwf1zSXZW)K6Lt_^=tKu zQZ@ys39BEkW~;!I=$L76&s$o3SrqttZfkxPsn92IzYMh>f6b9mQ}cZqrIdv_RszRx zeY_e-z#-w7^mN1D zRH8ND;0{|dW))_Y)(mu-^0JcOL@6#+s2Q8pLLDi13 zTUOQw7C6ShE8y(yOJdkJ8fGV#@>trZhZEvY_eo?sO`{}?(V`;-hY}BB*TH6br`{x@ ziNm7_Cxl8WUigHD`IBc{E}uH^pJBA8MX)p!e`bHO7X{m3rfmO^M4mlf49yxoyF(;qH=sb7k?~x#Bq40D$bc#f;6$CnYMg?+o;Vcg49nn{8 zRz-0x6&{?dnf#)w^*b=zT*;ncey8sdNtK%B&p(AcgNmxGb2w#S#aOGPQS1j*_*`%E z?(btB1KGeSjWnPAY98p(wgFN^4w&7cXWoW*F4ag~EMN;@s_OQHP*B3mYY5a!wyE($8dV7)M+pH zI_A>l_MKTix{2MY$9;s_h3%dKj|C~+3Avns zLfBj4yb2)wIZcV}76Q6*57QjFM33Oz1>;?lTu2n3I`#jpnuIJO9Bl7&1<#3o_eD5T zYz1_H51%gmG|$Vv@tsDj1^?^{zyAV;iz_v^PY%%CDLph?c|xBTuik9QoEuI&%^jsz zDPSP+V$mJVl`B=-02Dtt`G8Pa>T}0j!=8&Bqzt2m7dLMu{>hP0gLgmq^OamI%m_! zC?R%);i$iU>UV{qmr9kaeozW()k0<=BXu(Ei3%(>i0*sUGCf(Wl`V?W^9RSR!0V)s z+8Ee%!H-ag%9~L~A{fxYN&gn{^}}fQdozYHGY~LswS zFMiW@H)-+QEa?&RSmsy(UMCF94Cr#9x9cbCQaO3ZpbnaZ9Jv`Gv0m|lTxp(}nK9~i znRw1C5Fh`uNq{M#0z?AmDP|AsgjR0U32KEP?W5uS8bhVxn0uznGcRM|(tx8xUB7QQ z5OLlUvu4Q5iw_pzpb!m7o9Qq6M>vVxvKWd-$ziM?SZ1g#vL3qV~fgLlx`vCM8 zSE)0QOT}9`>`gzXNB09^Gh5M0S+n-|hhp}7L?Pt|@k0dFxXll8rh_I#(3E79toQSF z_aK*En&xv52dd|&mn()$UirE_hu?X;gW=Pm)$9?dtd_Q=Z$6ft&=HKjRdx#jJ_zvS zA4HSV^85EkITv9@>qcK8VGwf&G4R&`*t;y)A5!s>aEa`0`d z&UoOiO5I)hMW3ILwjPX@l<5rg!hDvh_lA&yj!mmW#Un4r$sq$C%oTiqo5Z2)Fm~Du z#3o33@o5l(;4!P`M>>TEw62FrQG^0!FCz~#7la3R7WpaAi8&$@JUsCO%FSeEs{ro5 ztwep!`5W5EG`=1U=8mv-C@LDVm=W+o7zsWmfX8`Ng@&>(W_L8l z;|_#v3?IqK;WdJ}D@{GL2az+UzuNld?P3;OL$?^D&;A-Ho`86(|0}mj;WefNF#5nj zh6ho2U%+Z!GKnq2!ym{{rT+Z#yY-{tAvKPrQ<uI1gvD zT|D&gf78VEfWy|M4!UUGCfm!nK&(y)5D_f(|7OC#OiOOF3M_F`VX#mn9A?Fcd)8BP?s$;Pr%gaZl{LkAd z2D-vG_q*Jm=1L`u?3bVQYXy`u&XN0&l$o#bNF|2!{qd_R*5WFF(i=N{1+sF*wFdDD zvo`Gq!;+J?-lO|jny-hya|EkI-XNk%#upvz#&PMv(GyXz{0Gq4j%-vGiQ+oTcNds# z7eFfBSctP6QkZ7@(*P*P##%=b1F2ycUQ$s++<8syLrTJBM9-sqZ(n&L85E(74B%6q z(CK9q*f{hl8XK>NwH9xv6&}~U)VdI>kF2*Z{o?l1`!&xqfQ#F5hd? zpYuH~vkji5r$7Kd3Ikb6qtkX1P?lIUzmMbBi?{j+spoaEswQzjHGEFYKxkMiv~+k; zAv2xc@+c8l@k7UJKd7UhCT`7Vh)~n&G(=oyP*o43 zGt+~O+cyls8==V(=L?oN&?6^tpW>byN@>5Fo~|&y&y$S7JwE_hr+}dFOgB3jr2Pz^ zj!Ic)DdjTp%(waqUY$ZHbIjcDZ!xlk1H&x3rth=FBMU~$4XORq-jBZ$3?UNqkwzo- zxm@wQ+AYmi6_|SDvjw>EsV4zfMH~Joe=f^SPB&!O%tuA{C$<|8edGm|v&Z&*_Nz-u z#I`(|E)D4Yf=d9O$6^GH52R97D|t?6*V(BkSP8Epm zvycnwR!R@;!4d2wI2F4zfB=*h5<;#JOu%K6SWvex8$EYgFGDCpL3R^>SpZ--(8XIz z8eqr%kjco%xIXQ~mU}-{vU1-lK4wy``Fi;!+}U&S#4VL4%-D>YRqy3FI|EXAT_<%h z$=HBZQ-l8T3%MHeF{*q3N8r3xEl}>mGB>P-=7m-9r*33m-UrfRYpYGVw+_}P<%YqjIFok#2R0-Id6L-O zeY^ZiRcV5iMgz3gx(7uuOLg`-o}nG>F93j89#$3&^#V_{MNy-&`zdF8Cw}#gX1gfE zWfD*ITK%8)z)D$z;y-;IP0c^wF}I!+Oq7JcC-E^na)8++ZZT%wd?jjhSkLSc6Z`@N zQEjTvH=shIKZ?7?{-ExBkY{O>d&9lbI15cy^f*G1&!KFdXCw-0E# z&go_}z^+IYG}Im+aLm0#!>c_!Fw!`kr#S=;R!81cU_ZI`apg~M_4cDJ)z8SB{b>r^ zb){qF=0ZA~>!O)-`EqeE9r@U)r4xI%c`@^NQurO|+8ff}x+wpKv>`taGzJuCP~5aOg{mw`a1*HB$ee zg|noZ6rfdIlQEV@1-a7SOfSpuD4`D<2Snu%IzEjlEpMPyB_QHLjg!KL3SAVg6kG=|l;}T%pye@lR08DMcdJS-z35&fcr@j!^>KJOG zMOPLw1mQG*s`Z0hOPv_^c|lK3E91TS`cyGQV)hyI6hU8I_p81FLvTj^q?#|SC2~g3 zwCVl=Nfmojj<_y?^!W4v><37eqS&o3BgN4qczkFJJs!3Is>PapAlcZu3vjfedEg=x zM1+OU;K|ddK3&aq1Lzj`T3S1?#?hntbe6R}sTI;5oRbV}tj1n-^n8Y@dbM zRh02Jx2{|sw>K^PdTc1_GPm&&&R|yvA0h2C(nUP=4nOUL<@n1cc9tj17{K!I9H{CM z*&eWIGFUZ>dGpMZhJ(eF81ui`LZm#{AwLm}x{sRhzFzS>2?{RvyKR(O%S}@{^e<2k z_g>Z}Ba?h^-BR2=rZ(!&I<9aY5Z6J3xj_<6pI8)mULL(0Sfk)`G4zu{ZtYhqvdGXp z={Z!zgL4x(?egbWjU*AY%HVW8SHf2u*bD#JRmpf{LU}}vIdyUKN9=@#5@q~6a#Zqj z;#&|B#-{)Kee?V9#xX#x_|V)Txp7wXhbgtx-UN>LPxMY>q5jh0-36kV62$~`yl@Jy z%`V7Q?kI7)SbyYzh+X?5UHf8mK{_!5niY|ip6c$?k8m)KH;2>hQR34KV2V_1knWxndXhG!3T^=N zE-~mMpQl|}_YtqycM8A^s!Um9Rf>S0)`gxni+~Ye+sZ#H8H>C^(ZmT@J36Kv(XV8Z zImZ^U1z~$iL`aZudexhs9WUusE5d?++fPFRLK8`C1ESvho^2^uRb@Q&0uP#LX1le{X*#!K_Xkkk zpC_>z22~&8mw5(suA+1wwGOL;wV2Frb2LaVxM~hz;pn^oJjV6{qu@&=G_oekNM=&a zEEZNCZOTb9c?1?8*{|e*F{Y`-95Ea{Cg6b~j(-x(u;2A_!e6!HRd&2atxfm?sR8zqKJUO-AE8TATQ-jAy`oGwUj z`-}ur;RHE)K|vQwhUmyEG+GdT)=&byzqEDu3sdRE$mY;D%El_Q30W|cYXqDlkGG-W zGOLRjD=cbo+NfLZH$E9P?KKk9QHER`JdNPj0C8dug;Jjv$jPr`0QlS&!_^u~qoT^F zk2$F}QKX?(W2K#&R_z*ctNG*8yT`g}(2jFth~MY0GUSm}kN)GR6JO56#}>9jl1dl& z88an3m1EGfQwVqy8ack~Di3R8quxXfkSmoX8WuMc3hf zI4yQb;htAg&dvhRo}cQNxFmd(c!tZz<&^>crhHV3_*99@)R&5mH8edlr&r8Z4Gwg!kR$Y|FKZ#vbVJ zMk_aWPi~=0=B%?@BZg<99TNF7AgRYdh0FW;te9|6vdDj- z)qL_-0r#SQgwfKyOgoY~k+0OFQx&CE&t`S(2UbQN+V&n`msR1iuQ^j=oqVmQF1xu< zYnvX^>jiRuZ6Ke!;Fpu0Ua1vp8fSz4x3%bq9l}SEh7e1{ zB2O&S{v?Bwelu5N9ivi-t7l;wNP!xw6qCvd#|0Q6^~HQ-mzJB5U`D1-p>xkQjFzrk z7hf^W)mndjCFo`RQmNd4gH|siteRx-m;1Su4TJ5SFx8d|TE>rZu#Hao;W61_7`Si9 zPJiMx^U5foLDkqf49W^j95?NPGjb zHk$s8vn3==Zg((fUdYYu$Sdmq;3#-pbv4wBL$ppRYELZ(>`{D~z5)A;n6DkM@Z-g2 z%--%NIA#53rVqePNq+B%vG4aE;{^&@AO?ZFFD7@~9$X9bTK#^&ohm+4?>yNUx+f>f zH6RQ(P1DCulPgQir5^Z7k>I>FSYNi8R+-z;eNRRf+H4`kA{LrwL|R%pu@#I*18i)a zFUk#r`k$`T`rNnh$J0DVVmEPUWl4=2iT;(ZL|r`p{QU9z09fx`5sdV})%V)J>8kzu z1%m^-*Ly*Y%4?Eat?FCe1a{NuTiZ4}?A~t^$0dtHWPXoE@UA&dn+Ugfj35W~C6`2e zZQjhefH{6Is)i=Mw7L%q*y<1Xn}q*&EB~)uh~JhJ-~hAQoE0yBzJRgQ|I-Vg^i_Zd zNGC5Bz|3A*_o_633_t`S%ESrR{U_@KuhmNCEhz82Re3zqdfVUjbd-VBNGxE7x0`n< zxd{@ot};?m&69@X-qU`5@=R5D5R15Qs>Yi4d@XcMLd8wjd?XVDU1zW&qX8-)CG28< zLgDD`Oje)c#*xUv-Ej}O_w8O>qQI?X62~qseSv!Usw$q}#*mgK%yP2mE8OEnJFH{| z-J4)6<+IAsI<#0u03R(+V+eYcQn>`*9!1yz>SYyl2%xSgJ0gD}r$^~vGojY(ac+kw zd^FY4Y&y~|CwOawGPUyT5)gyY)WiWS{aTK=CyPPas5adZV5zFtv(=VTfS^|FjX?T> z5D=F+%+WHv3^6fvTK3fWiDDYMqlgG()kE$lK<8Z0m)T73(APwu|JoH?;u0G-`!TtK!BJV*HU>0t4R++IpFE{#?YGS#1yMQmA_jr2b{V!Slwx)ZH#s_c19ScP}(RdKpXFgjL7iZL#>8?6G z^=oJ1f$uCb-M%N;Ctk;iw_jdz#*nhodfr~EsNRGZ&(Slp zeI!OLehi2HOC;bp{u}Vc?n`L>F~_cd;?PW;RuyGEU)GIk$Y7A>OXA5;jU|>_6_xae zj})NqK3I%=daC)FN<2sM@9)gq` zQqP%~MZ?;dO(8^o+sw98pun$g`6^w`mT5UPkq@^KW7OB!70c?n(cqE zCiPSEa1C-zIpR?R)a3mIh|YJRF2s}TCk3)C@sHEgrICW5q`~MxjDrTumD+**7Ym<) zxMoX^vCr|*(#8Peljqr)BU75QRD%5t)Ac`|e|VU`umlE4Rhr-aeupDNnrU;nX_|N2 zn(IW%r*F-AXih}<0#`+p0#3z_;i?~7k*t>%CF;|c3 zKGT`|s#WYZSEA#2bv`x2W<5IvZh#g$F>u07g%R+3#B%ig4pWAc=1XCvkoGbcCAfHl zL9y3oh=UN`jrx+#$Y0aT0{6Ys?&JiI3C3XBt>6`)`s^vlu7rc84#wPqx5&F5icMX0 zbPl@9(X`5<&j0CyFQtDmWr3OzIG8S|Cmuye`%@x1p2YV?A-H_Bf%yLn_t z5H^+c7cf*7cA@&XU|lfkHmTpBw!97soDhk`v}U0`HY4M$>#&kzYN=t2+GXZF{qdQqKgLTGA%-c8*`>WUviD`c!Upprm5=_NY>jZ2tcu&u+EI9Nd&y= zlyJ^Swrr1P8mG#aLnM!ec@MYvz)bQ+VWlSB=Q>}O9qfbbdTv5lBA&AQ_8TlFO6cF; z@mHA0B!0QLGAUjUjJHN4hTD5mu5UIIcG8_pF@CSu(^&`9W4$JGS(cvHc>xKGw;fnR zCMvq9X}%fGb$ja*0VCS(C%c!R@9T}DkLp5hr>qhKt`I;|u~QfI?$+~y*oe_aUkc|p zsc7KtWMC+ahtB>-p&f>^J22S~B@k>8fYv+au3wh_ls*46g<`1FaN$Jvc+qH)kguN* zy_Qk2Dxm3iX0hh4A5~SB_uuC@6I67WBdzD~H#klYTkp!KG(guP`JHbU&oi-VdUbJY zygC>dwkjL`EKyuL_PFTydshS8?WxOL4{g>Jc5^l!ebh)OX>Mn1QK(Xv%vY}uk}JX& zm=VS2@{t9tI(cg28M4}wVRJpw4uP77Q$G)!`bX`7AH!3GQji-xp@1!4)%MzcmMM{> zrcQj9lc@UWH?7}nUkTp}0|xEl*|if*sLwYJEtjd+a7)Z81x3FKn-*9$Y+)bNEZf)_ z^o>YdNv2!X7|S*;pD_T;bhL0PdF9}F2;E%OZYGWx<6q{ioX0u0+J2}=*Puk`w#0r$ zv>Z<1&aV{i{Z96I?XDt0CD%seR@yKc&FFI3&)a}+w1d|IP;i2i zpqz#_0&0D;oaCVi94X&p=nikMg_Mz>a{lOt!i09_AaxNGhY`ptt&7^VX2OfdI!Fv5 zmmhi#Hk2oE4&MPT#zlfA0`2sZl^RyQC5f(^eXTplV(PCs`Fmf&dv(AkqER-IN~VsUKe=^*9!xm!;(Z~xR==Xg-Ra$8i{Wtn=KO6# z0_<9VxF?DIH1%+h52CZ7eVQica+mfc={AcQ9+~vhqyO^d69OuCP%M2frain)W7{&12yx%UpOAeXc^C2{pUBsX9R}uwV)HRWtS6@iPkMuy7v0u;->I+qGPzA< zi1aKAawcmnG@K8rk&ZB>+X;vN`1XNfcfMXpTC5X{3PBR%S$(W7y=1)pUMv3knZUPt zWdMxi0b}mm$1B0S2So%67%Dzzj22nOKusqwsR zLn1-ApK;7e1_mki@z|tE7T^pdiv#XZ3jpc1fQLnjlYbi3s;__8Sjd+PYM8yId;) zsbm@q0NCt$t-Mb#i1+*>8n#4LFu;}pYfOS!wN%#{^fX~8V%M^_*XL+-=dx_pYBI3W zjnDk3n(+XfpBJoU3BFgjlx&mqWm<-21Lg>){i-MoP)qh(UgmthaCH%DY%Z9Bt+FE~_Z>-BD3gk4Izfudy}a z@&Xl%s{S~rE0WvIG2>^*A>%&GSjuYMmcCf}2v+5B{{3SYiu3hmdh7o1oXs1(3>IFt zhOL8@U~#QODMhA&NwLmIe2(%fk|u0z>4tW645nFuv)@E<_jl0QfsbxX#rrmMHGum- zV!?&cJQsKhXm-|WXRy*|BrZ}xlX8}ORq>IYUPUS>?Vn+XdP7MIK2x_B*Ott)LCb_6 z_B&N`5jL@CRWP}%XO{`rdSF(^)GJ-lFtuVE{I#CL)@q0K0i-xZ7v;ARdUB;I-pBK& zOz2D=j8+YgF5>6Jk+>KyN!5LmfJfw9)Z0u2Os08-;PBDcZ?hBzue*RQHPJg4w2^#Qw z>g=F5o9utK{<9@9?>-y9%-0}Z{JGunuxPr8^RR3AKI40xN})=rG1sD3f_Kw$$MXHs zw{-P=5x+*4I=lXL64;6ucf&f+`c!BeEe zR%;H+Df@JlDxtQHbl;m{9H#4O9}F8O_oIy9U?$DGAuc`jH(S~6^%E~*tsHl@FGg;l z#a8vrpCRl$o%5tdvsEYuj`d3|cP6Wcp%2V z)%svx4aFvchcjikI5fq}*U^%Mo#)_Yx!G)N!)eO<_j&CMt{y{8_LA57U669Ck4T}1 z{!j#ZT(*x#^kIW{$b!@EdnmD5zQdfHPZ76fCt{ zuU|}gy7=HQpb00mOcZ;H+C_AHPJc%-eZBld7>3lN(%SZwHJ#fVtRqwqt8JQ}Tzouo z{p5Rh*AvBaotPLF?s5O3;gpf7WsCJwfmTbB%=FgtBtCqW3)r_Y zNCn@YFR{CbXP#Mo?+C||0&U2mhbrE(H9bmYrt7)ZO7n?g?6of0Ps*6&ia*()d>28?za-`i^Ll^al~ zf=-R&RKrVOWA7J!2w~K^>)9EX%n!>5F92em7f^Y@VqVM1N~TMEooMra+=@kLG*zS3 zmTI_rmOu(A?MximV7DUNc+jxxzvL*VRqsHHJ5D|O{hB7FL|*@D8Qw#MRoPguTF>{E z1Nl3|@g*j9aiKe|w%t3s+#|Tj8sK)e2sZHxEn5exOWx4bIUg^Yk?8D#u$?Keg+FqA z@8qpQOvq`KIET?Hd!_-){k;mNpI!Lx0s;A~4{~JO96~M|ZKpWxq{*v5IYNdW@o_SmQLA-)^aQxqUMQ)}TqKEzM`SVZ^(C z7Mo1GU3r1RsMTZ3Q}-LnpL{!irhFPR^dKPd_a-vAz1G`o*mC1>Yc zRgA9NHECSQ;r)D?jd}npQ`vRN@1B4#>f|-puSM;Qw2XNkwrtb=+FF(7@MhlicUe-t zU5km{)XKDfDe7ai$G_1L*wE+ipOYhzwQaf)a2%cOL4wP8)0-pfK0CbU6}4Nu^D=Z1 zO$E+lx#c-GmTcr6PRu`#&|fxWKR)RUk$#%K2~a(7a}wBu?BBovqBFi$%gttDvfZVz zIX`{Mskr>MCfAv~Qdje2Zk1ehm~g0TG~Ra$AD7`~$5rjA!!N^HxQJxMPo71cRPLRt z>7nLg%kZ(^9WN7l9xhX*OVo8Wga#8ITTc|cyyz8yc0-1br+ApBOb@|LR}WqJd56VS- zLCtG8=xHvyJk#0CjJmU9+3mYvWI8`s1e3rVC*Z!pY227iUYL?xS-$6fNq5m^AUFwB z=~fh<3AQ?G=kGiqI}rHWXCG%{k0%4+%p>^5(oan8w_J(pW>EfqE5e!U z^WFVHc3w?j=vC|u8`~=^Z9%2Gk+!?<`yTo6bfJKN^Tr%7HtRgkwjD!tyw80Xd`an2 za^*L851WYQQP|fFmIn{KMlv0aT?I9T7J=Iw=da$FeetBZEJa)52%;l5 z$r?)KojQ~6GElL-0fU3~VL%(N*dZkqjl(g%o*riP#s6xH`5oB-7!%@U-q8}Kn!GP6HyhCYloeK^dz zg!|oZ;2VoS(betbkfI*$kjvc`{n*Dm$`B2#G!_#fWrWlD#Iw<|0@gmxmcHVfN2_K! zgGD&Y{jQ{IOPHLz><=9C;oXyn->Sd!#eV-HY!dP_gA1bqL@OXpJp10* zik;W+gNajBsXDk165;rgY#{>Sp6}}Na}8Y{-ZoQCah2Zm+Ry04+E6(`lgKC@suWzo z47(K~5hSgiW#JJXX9I|gQ^tzx5@We372k6uerlc|CS<)vlj=siQMJ*tf$F=}Kr}0_ zGK@YO^*^wah<~0lFO1+8p04fwxYh}l)sbl}T51B?%ugu?}8`g^{E|d(d zEoszpUZe3|w}HHRN-V6iPU!btrr~3JdIQ6MH4L{Z=Mp5+Dag3c;dPw=d`TRByW+4xp}Je^W&r+AoC`-7&cBPvGuPE$>28PMIR|t@5Bo z#z$A)gO7g}i4CZt*g9Z)FTJ^4zNPpa_S1Blwhda>l_AV6&)SCc3p zd3iONr_jHkV_Uy&;(0a_ys$G6e~Wn+Y!pqrInCV4Am=&_Ty&pIL>bVdgXSHkn*E_4 zF%I)eU#0gAN8EB2zZ~3P`3sCcsakjGf^0H`V$+8$mvV- z%}JrnB#_=&kyb6! zu}=HcGyFHm{wy+|!`TStP^hbr3tRI#(3l6G(HZIxUKl?Vo8E{|tt@5$$V}3RZ289( zxDpbYP>0SjuivQSGSI%4MmxyF*6v;Z910)^*Embp`0#?t{O#N6S2A%JGOpzi z$WXfLALwmthHz^Go+{Unr1AtaSL)06XL=nx5>igj4Qy-#se2};Otwdj{nO>{>wc`0 zyn893lGrO96WCJfY+uZjIV4SwRiws~C#2y3QLGWj>;EGZK|<7LRy1+vYz{A4MkiyZzLZs>5R z<3d#wW3#aXqlSZ=lAGCBP9*QTV_uVZ1pcneeqpIVrKd!;vI4z!U5E;@%~1}4!%mad zO6K63)Y=5p;Th+a(%^#DP3M7Ka0W*EmNxe%+2mY91bwx{MZR;_dq-wNoUg6&z&J8F zya%<9Kd(7!tpC$KQ7N7=rxb zjo*!ruIBGWj40fHOez2IrMDEGZvH3Ee1#mYfXRVBOD)xX7S?ZHJet?8zu=IjZ>~wienSYJjx}1v-Xr7eEDd zDb~1f3hEzPv}9cJ&SbGEP_5QEDE$GAgy8id0)tK=M^rP!^up^49L6{*VcE&~H#H7oCTeJ~-6oxkiJS40>)Rp8@FE}L z{yy*DrVZ7WQ%3H+e>E%qd&FulUKghGJ35enu9X0RVM^SUUM`p2(=p>Rb?C1Tgv>|J zE%N_YGXC|;FPcB>b~Fb2!hQ+B_6)~z_${GJg54#}7Y+IzL4{eljbR(kV+(kNDuo-w z3k|8M8watdbeM#WgXLcZc!diOn@(NcwyKq)wQpaBMiKGO0Mn>q-+Mm*hy-^tN4e_& zubjlGK3$_Ky^3>o-uqw@pE9u8pwpH)EVNn$mb+?CCSF8Gr0?s}ySc5$J+@lylym{d zb?#23$2lE#|e8%xk*FdTxFh{B#wj>8Zw`2Fe#i5;-dydTwVYSOcM*(|{B1h?=fD_a2w~$?Gw` zYATtiawHN$0;W^!0tUB^XCixBKa;1|6ZAgf>dntFqoz+4&nc`d1AHprbd8#Amf5E{ z#p4$E8U>0Fxh?x~er#uV8Rp(>_+&bk(=&5EwWNO!^;HY-UZqCo-IH%z4fAo?E0=j_ z3X7x?N{)4kM1rGMn!pB)Eyd&?J=;Iff*;E$d2Gu7>0UwCZfnXE`wS07EZR==rcuCQ z%OLwdG@<|Ji};7yPuRxPpI*mRxpP3SF##8riRvr!{Y~Fl4iGk42b1Vd0cm4>s*i^! z82++{lA&RdM6eK0^m}D__p9s%K>{=6pv6=4(!*5x=462R;$qGgu$YGyHUK&Ta;9&> zYrwi32E?w#z;IVJceqUz{BB}DgHn!kjcv;n)qEG%;$?ayA@}@~r*-{9N_j_X(YQ1I zh^Ii2Ys^&U3ecxOTzRn1?hAU>$gaI4mv^$75lpSO3#|7UD$O|~ke*fZUYe?c+@)`rQpIrTj$MbX|!HYIJjV@^OxEg@v z^0_~@C$WI;8t0kcwL2fhzb@em20xWT)%s+=SVpZ$r^hc;^zVV!TRTC(UI~66>Pv^7 zU3#$sMe>Z!h4Y1tx_^Q>|4}CY2jKZH@99tByzirU(k4pz0)={Kh;O%uNfm_HGqjdI zAoysM7N4ecJttIX8B_|Y3YoT$idFZ2r4mbz*FgAd8zb?Zqd`-TR7>B`Td(l*=X}Ru zvahO2r{`gFPax(k-+j>)7yw-29&^|}G|n-y7VEgd5a0XT+Gj)_QkauBlqs9kCWDF3 zl`-~AP@M{B$MeB%vocZNKpa*d=eV7&@Kap#>SVoGBKj1|W(Bb4swGIgr%M_St^)>2 zn=f`qe_ZFqF=&h;auRUzjh==oRG<9W|GF>^y7~i?jGVrAjW){wmm=Hb`W1rw;*p=6fQG*Dz=6u6 zs8|uvzfiOP^>@KglO*(k_;@KS^#5b+s>7HD z77!3Yz(Bf{ICRGmkP@W3LqxhgbbjkN4;w={YDBMQzL8vUA2OYC)$V2s6w>CdveG*PB;2=-*r!5j{eDzHKa@HjZ}yA-jrDFzFRy7-;q;;p|{Dc%$iHRejv+{+G`; zS&MIH+~!iEP(aX;UUyyZKV{da=Sw$?MAF5$+Nnb5_K?bcXl2o~%3kHCRoHulr#O=a zAD^t=HH$g<`_{6od<%nb9KMiv-H(X1IQgehxJa}_6sDe|9w>QVrwS6nGrD~r6d96Q zFYuY4{_PI?c7uEleAn~w;7g2yjr1aZO9~DNp~s989ZB%{3HI-P<@ewF`o9}=adT>T z>hX3913xPb-Uo@e4^Eeb4`ad6G8jh1Mj{Q9WvM*lKJ6-_(bWN7C-@j}x~a^O<|#kv zC(QcHEEs}^y72BWLB8<8Zl@rLS(BM*Tb!un*cwftS$~mnW<m6_KzWC=jFtJT zfBo6w5aKi&)ICWfDnXmB1b4wnkm+@x|4%0Ls^kBz5*Oq62{JF$@m~X?esa7_PSp;cA)c*F| zm-`F~b-UmeUC1yAQy}&})`>@w#Qh4(>6G!Qh0td?mj?_#zDv{c{$SlQ^QsGJJNKe` zOj(9QT?t2llzA=gyW1dG;gCBqq4xxd;*fQ#ppy5_Wj+#OPlW$>!gD#J$oqFQ>a@R3E-(jlPdmkovkW2PUZ|IAvV z`o~&Qu|7coDp;DyvA0`jVf^Iq?|zb&9-HK2=e6#iZ@pqMz9{uF`%di>_4WjD8PW}x zg0iR~1KEH#CV`Nz<#IJpELaC6p+Ts!(X6eDqKM`Ar(-{K#4brBUia?cB^G>v^ zyh}&yusuqwV~YUIE%kp3*O~?DV4vfv&GbScBv$M>9HRM-Mk0GQ*FlpGBt%*_L%+K# zXoH!tfC*)NNSx{TBrcoR@0;V_XSROZBEQ`>uNi|C8J<1yGd`D-O^cVp_etA%K_Bjb zXeg(6uK={RmeX8(Or(uzSuJ@7I~C=rlbnbD(Q^IkoBLdkR6X%SwVt)Qfb%*3-J@gU z_kjy~RrlRH+Tgx=ldhCg%JM8`;{JP6hetqBk2o^F- zt={vA-<2bTH*V=x`v@;gM*6<)_SXM%N9Aw*g2)u2Np=bZ?Lw_Cr1>zfw&nb=&AG=6*{*KVUSmM9+j6Y1igU3sOJ7X_H85zYZ9vlAS zBmA#>LWK6&yNb%|NIJSIJ?j6>0MGdy{7k$j{r#2o-Le03`|@uF_mjtGEb(SVq9-3v z*+>4}>;Lp`eTz7rx_26m(Eg8hi+8%^0F~O4AJ*ufujGycc71jmR-#b5XIo8iql^Y0h^ zAKi*k@78G}H!avX-ng7&{U5C9LyO);a2=i?os89rvW18 z8Q{4B^@;-{-%84;cwzHlsIUc3o?!9C7Vu>;YBuX2Z#)Q@#QRWM3xRv#u2_THA1CCm zx9>eQ+Kc{Re{ENoDr@jj?I(YjxB9~#;P$D{biBdD|4m*QhORGgrI`d$eXCyTH@>p< zx<6FhzQ2o>8_d?b)^pTQ<{xhf4}p`LMR)GRm_A(U+iiElzfA_Og7(jfY-m6_^x)3-ylx4mB1X;xKWQGERvPjfM^RY)aC`jkeV z!PC99UY+paBYS@yC;XYEw~GMx>lViSG9NXLym#$~0sVwE0|Z`;9wy}!LOW%IC^nIO zXi|1U;LV@}asN_FOwV((ioNwbpkNO|KFvP#+62!(+UxLUk6~%ri$Sm#l!%_7`rEB| zei*?!R>nAy1W!K`BzwwojHe6AROzE)T}d6aa~KoD>?T*n!+#UJ{?!m@D~^E?#!FU~ z?!xu5q0TFx=AcXKMd|~-7Z@)BG}xx>V(p- z@7!^7HtnPNezP(-U?EA=U=i&<;?AT#*A+G8GoV66u-p^-75@I=T>RM@{_?!_7FdI% z(yzY@HZbS#^P7iEM`q)bbVuL-qaWCRu^HsEqmd0L{X8R^WA*)5{`0s0`l<(o2W?nN z2gYIi{{0?r2^qBCI;fpm1||SwTm1EbYe zx`G9I<%Q7qsJOpeOxGpWb9W%J{)Wo`<9Xha;V{!pt_5bhej6XHA+|&Sg&t&G{hXCE zHFomnV7Vr;J3%#USFUr4+_Hf-7gC*la+xVYJ7>y5Y+QONB%h~^JiB&4z^9R=~?E}<-enweKJQ*2=7`l z!-eKu^BTR?nMrNwXj*5xaC_6Z`Ob5fO+l_1W0Y1D6Z%K$ohgyagY_f=yP zLDc-*Vx75S;j!7O;5p{-`+eDGD;Cb}czr*+f9D{PuzM^Bg~6Sb^N>o=*Z+r)e-0Oc z`(d~cy0k`QxH71uhv6;O6689f4Iuo}*frbWD{X;P()}6i^Q8D)d?w}YV^!Xm>&;x- z>A*-hUs(PA=~xGVl*eoV19{Bh4XnaJv_RgmR z4g=Ya$*M?zyoxT~dexU89^3c%!;1x=M*5#Sub-63()a004*t&GLUoHyha48t%-XbfOwx{L$aeM!r1-Pd~d(lueO#V*# zU1&!!KjOSp1mK%Gu%H&=yiPROFesd7s!y@ydjI;S(oopx$ZxTkBeOh|0Ba6<44vWc zwd0FCJQv2SnrSve4?^w>)PF&DD2>~IMuB{~hC z19#;%;2{}uu|hY~C^04Ym}vhs$`X=VkK$`yM_(BQ2_Xg|hYI+QjwiRRzUP=i|c~xTaka2E4=3vCO*U}Uu-!LwQdC3HwGiUMIaTtp%$YNG9`R{7 zMLs^k*d6eDeI#`rz-fN`m%rxlXa}*NVhqQetnI@Iq0NsUr_7Capqy+TvT;RF2G|G6 zs?ifGK=v>JhE*;Q7W*(MCGjUDR(I3*`kTTz7oJH{-?X2XczU`Yn_N>@@{H9ZMzcJ) z6^pHdpaN~kpfv^qG~Tig*gS!Yi`)(;IFx$&Hnb5MN9oVnf)31AHprZ&+tx%6sIc;s6F7&VuP(!5wSjXUc;5u#eE z&h~cMbJ%ezy1mxC)MB)Jv8)TP%vaEP-3Fa_5cQye(XN@Rw^-eY&1rS|q}IIW%hSow zAO|wzq=uQUFJ2U;jWz;eYTCr-6u-y2J)Nl4&5MZaEC=ML3s8@-ZIehyQQ-D=?!>XH zf$yosBaIl@-yA2{nNAEcgNi)8ihR0!!7_WK50vC_8D?;lZ%tA<&*#+TuC6`nSywAW zACY>=Fm`pa+K#ti0&-0Q;JISbABCfCJN+<+Gj4?`17mPtOu(JJRm|vm3F?gVZiZgG zy$l}JAk-$@C7Uty9>4%xh}!8Ys7#mCF>t!AvMuy{7&Lq@mkIp>ZT_RP^*IVc`kZ0ezo`nIU|gddsCN%gz1p9X)%rvL@Of?Sa0yv|Lj!iG^}f4WadP9DcK!NxRmbRG z^S%zeHCAS%WZ6j*X$F%!iSJ6|eVKGpsq@SAO3?5S;FwQjvfvv5Z*%AD5ZZko9h=>H zl*WZHPg?Xn)K#L`@7Q>(%baD!mLVNcq+l0{0@`yO3Tefsih&Vj=srYjws;RlLX%Pw8HTyd>y104+>SmcW2YGs@!k|ke}}5CZPkSuaf90uSp2|h{ZdNBBDFFz0078h0LeC;nk%% zCdTcFl~%y9?L8E+>AFekJ2r8{GjuA1=7k)VKe9hM)pGAnC}CUgvj}se=aDkP8@n4Z zZU$Qm?=6(jYl1|h%2WshF8Uf;dbU2%cPZygSFt&M5V z{1Mc0m47<$kTV?7`Dp^R>0!6n=w#0a)-5@A19{&o z%VdGV!c~irw}g`D!2(nAa#@Q{?UFRdt97@Btw$WE)S(7+_$tW&YG)BueKubZQQ&_S zyI0w&Rdr8MSB7*ukbjX!RZk&4A9?}#rrFGPF-FN=jc*%Kk`tWp+?>sWCY74UE^aTR zF-10!v?=M=8FwnWg%+3(4=BIs;BHoZBf8d}7uf0hnxqu9P&nwKsyeJtDGjsAF+d^~ zKybsj$w;X!2a*uxJ-{Z>$K~I)+gT{`?LG=YxZK=eQC6B}UzxK##{Dsa)Kz&}8lft9 zAIr80evtnzxogs&`z9LhB0Y8xYOughlV&z@oXaRG%@RaRb@kqsVA5@9FT!NcN$zzE z&6>*SC_xFAjX&v?POCOM!9z5BYkH_50jz_9bhNDCr$~Y$b90>KGp2IQs>}g8<(xXs z%oatQMa_jj1Vlf0i~r(y{QBx!5h=Yu2&eppGCIb%!$)H==c~xS@s(@jI10%mWddR- zA69bmE`Q{xJ7daa($&<4oYg?B2bw%Ob(@iZuQ~Q0zB|oc&2nsCp{LZ=Ech&e9sRHj z)M$A1#EA-SQ1E2Cm+sAc-H*Xshp^3+FUob}yTUpyX@=cG?7GbLu#l5&b(@8%WmbjP4e~BAg5E}19m@pt5R>vw;bzHepR0MeN z&mr@$Y>}T*T$NwwmpSnx&m1hdA6TQ;f;=PI$qsHex)?QVbnc96Tp3Drga#S~;DL=G z3eK?Wib(Sy8V#Gm^l02R-SnSP;9aMeFW!!P0;_;N$4G~h|ZkIk*Zo4u}XgSp! z@eg&r9FM4;`;r&cuDJlEoPlYlDzX{sN!Nm;-6 zd$RuDdJ@5NSl%o+bRBj+53|@mv2~4}8k9DrEDci)In3GNw93PFe)#!1BLZpDp?^v% zIGh7&k3!n@IJ?<6cY=*{Goa6w8M`f{cf8^%WBp3$qeG+j>RU>0+CRpI!H5Wvhw)k) zbUYl9;T|SgbiIN@)*{Uw-{4-(cs&{E=r;hakOlN%MrkO6Z=$Nux}j9dKMI>jNJ;*3 zd1}L5P?20{V9v5R+hC;2Hc$keo-k!hN)K;fd{EYD3Yu2|nJm>m^+GU zkJ@A)-QdRDwO?|2Zg$qs1{RN-#iniPqK(xuIOK6Ng4*4c-fn2b`!B zZ|qGjkpiMi1|qG;c->D&O*%1;{+kv6_`P!=OKO=jbu)?SC4>k$7!E=?^!SS-YQI9s zIq9*97&1j}dd5M80XOtt-=c}lobzMG&V3mTqb{)w}Tu__BIHT6G^wL!VhE_EMI9)OOtM`qI!!OSy3#;oAnzX7Ois|j4_nlEh&S` z3*dBi$ZY+T$alT<1xbgpxCE6Fpr4wL<>RH-HY$SjlT03$C&VK!G~5n3zgN8NN}h51 zRK@w(w9#7J`!lY+j-FV7Kn21@%~Cw)q6rAlp?0+>kTWg-IJ?C4Mwet=@}5#>$z!os z056Am`sHhBi0CA4yR~2IrrW zD@;?FXlWL%imre6NH{ZUz2RNbOfKFr7M*=-wGdU5R?a7(LO6fkl5rxVUIc*!@RT*6 zmH)<4h!|%c-{aZ|-WqDB>60MA^<>z(bviodW}(M7yyf#_uu{BL^|U1y-Z|Q_R$gZL z9>n;gy$VhO?hgkJ-Q>J@wz0W2x0*x8%?wYx&l-LAi-u2I=iI&R3wuh?6UoD!)UBV) z3MigGUiHkUCb(?|Af10tv2A$}0v*JPpjHG^dy+_ija2DVMuozd&Cj zcv>(XBzI<=e>Npii%^Ui^cYz+b%^^5mc5QsyLf3ijEV2Raqht$F8!$K@c}v~4q+SQ zR{+7wURqciGczdvhgCYp1rRs-3I~y<$gzb%NKtFagI(5X0tfLvg|xDzwoJ&9%SWh8 zT&*%SewwAhyi5z)L1-kX)ynlqyE zy5P<_poZa9j)#w)u*oCYhk#%Q{J+V?Gm@O7?`}hLJe`X!DvuRHeg?I~Gvx$hRnpQ7 z4iPEh%#<+WR}?}mW6FhGHrb=LmTH#As)so+QC)NUQ+!#znF||j_)i55TcSSt5Zv9@ z@=Df{5gCCJw*Dcg`jy!E;m+m zOOK}C_boXEkLANMw5*x8oa?&j$nFu!%byMG^T=FX321~aZ=TZRUa}7pWRS-)jB0PU z8R@Jz5Fw9{S>^V|!mg_PHAVA|_53@>byl|cpR&&`q&1iyO$8C$W(!0}eGt^j!d3j|KmCBaXBOnNFIZa+Ul5)0e zom}VuirA7GYktrIbciJIgV|3P)5tKKE~b~o z4<@8KtGN-cl^MPgBIaALwzZzC__L~dXRk&OkRnF`=;)lM)>`cKH$Tr^#kDbH9h7z^ zY%*}7qiQ-w-+*{!4k`^A$7aFzx9!|lXM^_}j$((9x<2k24HK!Tc2%;i+)Scm9fJE% zRDb-gqBqf+b_$cGU8H*~m+Y!3b9?d3C4hWfEf=?LG%cA6%K{284<@PzXvE3 ziuIkCqAzy59CUX{@I7GmYKMcTKoK9WqQ!Qca#)SNq6l(>OE0yW>g_If(D%0(7-|V8 zVI^&&t~p(zFAN#Enp?UH;Y%UJ-y<2B6^z#p)`Ow3=Wh^iJ@cWKyw1?p`VwP_?F)m7 zc0Fr%AH*zN{EPH{ebhrHx(Mvb>$jb-e9t^EvTWbL>&Y1GFN8*}JK!LB;a-rLpGl^_ zP`0dtI0WNHsA?&QX6LmoMvNHP4SY$Cak`AZ#dtxj$h_>)po(q%-iGkIOP$ysX0VCt zf?^3(&MJqz%G7Gjg(8vsyp~xK%|4fx!?wNAV|KR-O!SeHp(+~rM$f?AcaT4oqA#zX zV=F(LV=cdBgeZQ(q-yZ`IE|a?@`!EDTqY%nU95oP^T^~a0>M>Xo(hSl5$nM4^G2M* zi0JXJ)R2kdgM&R^Xuz5kgyq9>;v0v``gT0)(tZ2Mu5Gdr;Cfkq8v zvhrn){sGN;;p4T3?YLvSWjoL)_qeYB6gG6cRe$1{ zi%X9Lw(aG4ly0S`Wx(5*Wt9=utPS6VB_}N4_nJ3N#U;rE-EtsA@*pFLpuDGw7Uv+* z87#7k^K9yhU6;aB;!NV`evs3RsnUFV`m1f8&5X%la-wk$z6a|@8_MS*tKF&Th0;O{Mh$8 zaAhZ@qAmcK%&wbblI9f!ZZ13u-HGtMc112T9ZN%nWURYg_jl6sN-qqS34~`^H)m?` z`*L4v8x2=3>pYr;zL(s`@XaF#mVwj7+uAZ2DqpBNCBQ%U2>(CAA@@XSC!97D)YbAt zGC*-|>#9+kuD#wOObUr|(w4C58m91SQLGfC7O>;1Sqt^5f1ZL?bjGNyNevltA|wP6 z0w}O8^RV&kv(VMTKw3PGOnFPCd82P=p~Z1(4WG4vCfKAG2>(NtBT>A0cahi_#R&Pw z3gr0PSMSvyPdW$)EIdkG+zRaf?B#Qk2-CM~Jyc^QMxzsr?sk=x$0_ZU8I+3`LEQxh zE0A#v?`cSvYvVq{#a4GM#|$lY0CKHrHgdnR1my%aqC{F}T-(LVya;D+>BUah zpj%t5mfUT$4U+Tjw3qGhCOvs-0`S-gCVuTR#Iw7rowbWRlTD6i1xA_{$uT*64=r(# z_`-WK6}NU0JZ3+3wSr~81j;wZ(lJg&CvFxZC0H>>rF*{|4|#CW z6kuT7R!*X|8HjZ7W+8lWtK|LApXra~1pYz5F&4Vp+{sho%qN&Wzy2v+Sh8WhPf92m zBisE64Vt){v(D2mm1K{J-OPJ#E;VZZl(Nnz)8!|g87Y1%4+1)gwD!fC=DgJ5>mrDG z+w5!rkKA1EML^H3ukp<%b z?9oL#e7f0_Abv~fp$<1c#pMg@oG8ymsI$n1)Wx9tfpbCH^>8B^l0r!NwQ%^`oWUiX zm5?p1Y@G<*;M&VLJCLxG^5hkO)vUXF)C*I|-Gt4oP-q#d5moC{&o8G+8s$LvqWATc_s7mIn8FKA@zmg70u&OAs;6??{7 zuf(I^aUG%HG7-|N369Omuw}LGTz8sbIqt>n4b!1w`n}iDPA2@Syl10MN>6{{q0#Kx zg}%`Or99m>kyjZj4_%U{u-;xZPTe+o4^_`pDk2@;;RALFN&X3m@z+PedE|BL!0_&) zT2lRp<;+M=*tQBL0zeEJFP~=9?2qt*Pw=v#bwg(IW2oV0US_iP4OzVwqc=AzWUyIj zTr30JKX78gyg^)>>9$H61^I-l0LxkydYYX&*3O7O75QG4>}3Uj$<+&x!s5ays903R ztJ+!e?hMtkQ}4iS$}?;&1Z?tXh`SJ83uoYoUO(B*S&yQj2e&L^a%nZY(p1^V#c&Ph zANd(^UBiA1nsCuJX%H;kUv{1rg=%kfRj@%m`mH0nDipaoIS8@v484r2P0Jgy<4WQ? zn=S1PVHkykD)|JlI7#YoB9)an{dINH^P>xni^1f;lt>TUtNsuSey z0DPX>E}_T__9wplrSiH(sDI?$>0vbyQMb|6aG`-f-bppo-a7xCa(YspppkS59bep1 zOpwx!7+yV05O9-JN|{HEdqEwdqP_}?(E~i>+Tteu0y#eA)pH02g~6Z^5>VF))KeSH z3JsY$A8173Lu5;w#8$p3kUp&ms)v@NBRPAGKGx-oTREa$p|W;*Npz1& z4~S=Dc2@$Cnj=<{pJ-EFO^Kt)W#-f|6r|0o` zkJZ46SJTPG%(6y+;=f@zrHevb*hf%yPs@zT4O_J**&cV+ zdX?gEWT<2ec#Pda+?DRJd8TWVKHF`2VSan1j)p!eYt41-rVcNoUL8wIfDc*1q34@p zknZ(bPLSs9&eUQq_jisaCN;>tlLPYMk9N=isXgjkA@(P8B@*`U1tajzoIY;U8juw! zTTP6F5OVqo{#3;(#>d!^Uy|9wEuhml7lK3? zOxx|M<^G3<4tu^6X;>nMr{Liz!z8c5vig$Wy~`*XF#%vh&Z6Lq);<&@c#mt@w2QMk zP_%5?C%|_kG1byXS>upQ#YlPa<2^J~3Wml=t)i zv`>gdyJ6%wE1%5WpsL54b&`AWjSLQw^_r9Q) zD|Fk-v_W;fxuuFJrLP|L*Cyn3+f^Fdy+;i27CyP_T6gG}O~NsR65sx&fq6R=)JLZH zr%8odin1$_(@jb!6=1s`iesJL|I+vpulU9Nxj~E2pUDM#Z2f^~LS+3cRfk@oTe5;B zu@&-SIJq@hFY3kW#?n5dJW6n+^W2Z&G#umQHEd z2=`vP%jaj3yNy?35eS9uc~BEIg*-qJ)S&&`?Wt1$T7;$Y?ob%H0Eub*!~2-R3`jxO z1*AUlgA{WibC&d$VEV%trt?gjnYfQJ1Pv;u>G(0e)57Pw&yc!xg@MaajHkk2mfYY3 z%_mygw#RRM~nX#i~l0ckzu=83<1$VVkGX&Q<0kDq+(TRmR zO&sZXJgQMh#OPzB!9lW%H#Wa8kv$CAvqecaWYa0LC@+^`VS5u#yLSJyT>nM1B+ zOad8{TA0Rk>Owf!Nr@pR$B#mv4#nN{xT-CijmJ81vpG%|Ozek4+-h@tX+~DU&$Uyi zM^9NqoZ~!=6;3LTr~nGZfR_Dn`%&xLI^72eqLt{@ig%Dbp7QBg@_{g!YJWi5$m zRnRGla(CaM1#!A-!@MrgZ~&N7b3N^&1fYG?68A;RnH*OxH+N5Tip@P1D#{LBAvbAKU?*VerJ;)i|eywiMX%EI2Y+u^-1nQ`ajBg5nq2+;cPMGsYh3aBpvMPf78@y0*eaWWiImvihd zQ*xZwW<%IXQgjnNMPXY*znaF6Zxb{CZ5w+_X^>3ew&T=gVFrxa2;_X&nuC?@}NIVRhYR=a#?9^&2 zi%c4wLRO_doMKFlA|jUR>a~}+R^V4y{q`-X{71E^XrvUGU~J1^S%bSq=+l6EA*_|Q8q!Nzoedp!gU=Z{=Z%_ngnyS1U%3tqPIk